提交 9712ba15 编写于 作者: K kvn

6934604: enable parts of EliminateAutoBox by default

Summary: Resurrected autobox elimination code and enabled part of it by default.
Reviewed-by: roland, twisti
上级 66cf4a21
...@@ -211,12 +211,41 @@ bool ciInstanceKlass::is_java_lang_Object() const { ...@@ -211,12 +211,41 @@ bool ciInstanceKlass::is_java_lang_Object() const {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciInstanceKlass::uses_default_loader // ciInstanceKlass::uses_default_loader
bool ciInstanceKlass::uses_default_loader() { bool ciInstanceKlass::uses_default_loader() const {
// Note: We do not need to resolve the handle or enter the VM // Note: We do not need to resolve the handle or enter the VM
// in order to test null-ness. // in order to test null-ness.
return _loader == NULL; return _loader == NULL;
} }
// ------------------------------------------------------------------
/**
* Return basic type of boxed value for box klass or T_OBJECT if not.
*/
BasicType ciInstanceKlass::box_klass_type() const {
if (uses_default_loader() && is_loaded()) {
return SystemDictionary::box_klass_type(get_Klass());
} else {
return T_OBJECT;
}
}
/**
* Is this boxing klass?
*/
bool ciInstanceKlass::is_box_klass() const {
return is_java_primitive(box_klass_type());
}
/**
* Is this boxed value offset?
*/
bool ciInstanceKlass::is_boxed_value_offset(int offset) const {
BasicType bt = box_klass_type();
return is_java_primitive(bt) &&
(offset == java_lang_boxing_object::value_offset_in_bytes(bt));
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciInstanceKlass::is_in_package // ciInstanceKlass::is_in_package
// //
......
...@@ -217,10 +217,14 @@ public: ...@@ -217,10 +217,14 @@ public:
ciInstanceKlass* implementor(); ciInstanceKlass* implementor();
// Is the defining class loader of this class the default loader? // Is the defining class loader of this class the default loader?
bool uses_default_loader(); bool uses_default_loader() const;
bool is_java_lang_Object() const; bool is_java_lang_Object() const;
BasicType box_klass_type() const;
bool is_box_klass() const;
bool is_boxed_value_offset(int offset) const;
// Is this klass in the given package? // Is this klass in the given package?
bool is_in_package(const char* packagename) { bool is_in_package(const char* packagename) {
return is_in_package(packagename, (int) strlen(packagename)); return is_in_package(packagename, (int) strlen(packagename));
......
...@@ -1179,6 +1179,44 @@ bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs); ...@@ -1179,6 +1179,44 @@ bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs);
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); } bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); } bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }
bool ciMethod::is_boxing_method() const {
if (holder()->is_box_klass()) {
switch (intrinsic_id()) {
case vmIntrinsics::_Boolean_valueOf:
case vmIntrinsics::_Byte_valueOf:
case vmIntrinsics::_Character_valueOf:
case vmIntrinsics::_Short_valueOf:
case vmIntrinsics::_Integer_valueOf:
case vmIntrinsics::_Long_valueOf:
case vmIntrinsics::_Float_valueOf:
case vmIntrinsics::_Double_valueOf:
return true;
default:
return false;
}
}
return false;
}
bool ciMethod::is_unboxing_method() const {
if (holder()->is_box_klass()) {
switch (intrinsic_id()) {
case vmIntrinsics::_booleanValue:
case vmIntrinsics::_byteValue:
case vmIntrinsics::_charValue:
case vmIntrinsics::_shortValue:
case vmIntrinsics::_intValue:
case vmIntrinsics::_longValue:
case vmIntrinsics::_floatValue:
case vmIntrinsics::_doubleValue:
return true;
default:
return false;
}
}
return false;
}
BCEscapeAnalyzer *ciMethod::get_bcea() { BCEscapeAnalyzer *ciMethod::get_bcea() {
#ifdef COMPILER2 #ifdef COMPILER2
if (_bcea == NULL) { if (_bcea == NULL) {
......
...@@ -298,6 +298,8 @@ class ciMethod : public ciMetadata { ...@@ -298,6 +298,8 @@ class ciMethod : public ciMetadata {
bool is_initializer () const; bool is_initializer () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; } bool can_be_statically_bound() const { return _can_be_statically_bound; }
void dump_replay_data(outputStream* st); void dump_replay_data(outputStream* st);
bool is_boxing_method() const;
bool is_unboxing_method() const;
// Print the bytecodes of this method. // Print the bytecodes of this method.
void print_codes_on(outputStream* st); void print_codes_on(outputStream* st);
......
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
template(java_lang_Float, "java/lang/Float") \ template(java_lang_Float, "java/lang/Float") \
template(java_lang_Double, "java/lang/Double") \ template(java_lang_Double, "java/lang/Double") \
template(java_lang_Byte, "java/lang/Byte") \ template(java_lang_Byte, "java/lang/Byte") \
template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \ template(java_lang_Byte_ByteCache, "java/lang/Byte$ByteCache") \
template(java_lang_Short, "java/lang/Short") \ template(java_lang_Short, "java/lang/Short") \
template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \ template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \
template(java_lang_Integer, "java/lang/Integer") \ template(java_lang_Integer, "java/lang/Integer") \
......
...@@ -1854,8 +1854,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { ...@@ -1854,8 +1854,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
tty->print("%4d ", compile_id); // print compilation number tty->print("%4d ", compile_id); // print compilation number
tty->print("%s ", (is_osr ? "%" : " ")); tty->print("%s ", (is_osr ? "%" : " "));
int code_size = (task->code() == NULL) ? 0 : task->code()->total_size(); if (task->code() != NULL) {
tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes()); tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size());
}
tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
} }
if (PrintCodeCacheOnCompilation) if (PrintCodeCacheOnCompilation)
......
...@@ -97,6 +97,11 @@ static bool is_init_with_ea(ciMethod* callee_method, ...@@ -97,6 +97,11 @@ static bool is_init_with_ea(ciMethod* callee_method,
); );
} }
static bool is_unboxing_method(ciMethod* callee_method, Compile* C) {
// Force inlining unboxing accessor.
return C->eliminate_boxing() && callee_method->is_unboxing_method();
}
// positive filter: should callee be inlined? // positive filter: should callee be inlined?
bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
int caller_bci, ciCallProfile& profile, int caller_bci, ciCallProfile& profile,
...@@ -144,6 +149,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, ...@@ -144,6 +149,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// bump the max size if the call is frequent // bump the max size if the call is frequent
if ((freq >= InlineFrequencyRatio) || if ((freq >= InlineFrequencyRatio) ||
(call_site_count >= InlineFrequencyCount) || (call_site_count >= InlineFrequencyCount) ||
is_unboxing_method(callee_method, C) ||
is_init_with_ea(callee_method, caller_method, C)) { is_init_with_ea(callee_method, caller_method, C)) {
max_inline_size = C->freq_inline_size(); max_inline_size = C->freq_inline_size();
...@@ -237,8 +243,25 @@ bool InlineTree::should_not_inline(ciMethod *callee_method, ...@@ -237,8 +243,25 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
return false; return false;
} }
if (callee_method->should_not_inline()) {
set_msg("disallowed by CompilerOracle");
return true;
}
#ifndef PRODUCT
if (ciReplay::should_not_inline(callee_method)) {
set_msg("disallowed by ciReplay");
return true;
}
#endif
// Now perform checks which are heuristic // Now perform checks which are heuristic
if (is_unboxing_method(callee_method, C)) {
// Inline unboxing methods.
return false;
}
if (!callee_method->force_inline()) { if (!callee_method->force_inline()) {
if (callee_method->has_compiled_code() && if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode) { callee_method->instructions_size() > InlineSmallCode) {
...@@ -260,18 +283,6 @@ bool InlineTree::should_not_inline(ciMethod *callee_method, ...@@ -260,18 +283,6 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
} }
} }
if (callee_method->should_not_inline()) {
set_msg("disallowed by CompilerOracle");
return true;
}
#ifndef PRODUCT
if (ciReplay::should_not_inline(callee_method)) {
set_msg("disallowed by ciReplay");
return true;
}
#endif
if (UseStringCache) { if (UseStringCache) {
// Do not inline StringCache::profile() method used only at the beginning. // Do not inline StringCache::profile() method used only at the beginning.
if (callee_method->name() == ciSymbol::profile_name() && if (callee_method->name() == ciSymbol::profile_name() &&
...@@ -296,9 +307,8 @@ bool InlineTree::should_not_inline(ciMethod *callee_method, ...@@ -296,9 +307,8 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
} }
if (is_init_with_ea(callee_method, caller_method, C)) { if (is_init_with_ea(callee_method, caller_method, C)) {
// Escape Analysis: inline all executed constructors // Escape Analysis: inline all executed constructors
return false;
} else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, } else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
CompileThreshold >> 1))) { CompileThreshold >> 1))) {
set_msg("executed < MinInliningThreshold times"); set_msg("executed < MinInliningThreshold times");
......
...@@ -442,12 +442,15 @@ ...@@ -442,12 +442,15 @@
notproduct(bool, PrintEliminateLocks, false, \ notproduct(bool, PrintEliminateLocks, false, \
"Print out when locks are eliminated") \ "Print out when locks are eliminated") \
\ \
diagnostic(bool, EliminateAutoBox, false, \ product(bool, EliminateAutoBox, true, \
"Private flag to control optimizations for autobox elimination") \ "Control optimizations for autobox elimination") \
\ \
product(intx, AutoBoxCacheMax, 128, \ product(intx, AutoBoxCacheMax, 128, \
"Sets max value cached by the java.lang.Integer autobox cache") \ "Sets max value cached by the java.lang.Integer autobox cache") \
\ \
experimental(bool, AggressiveUnboxing, false, \
"Control optimizations for aggressive boxing elimination") \
\
product(bool, DoEscapeAnalysis, true, \ product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \ "Perform escape analysis") \
\ \
......
...@@ -125,9 +125,10 @@ void C2Compiler::compile_method(ciEnv* env, ...@@ -125,9 +125,10 @@ void C2Compiler::compile_method(ciEnv* env,
bool subsume_loads = SubsumeLoads; bool subsume_loads = SubsumeLoads;
bool do_escape_analysis = DoEscapeAnalysis && bool do_escape_analysis = DoEscapeAnalysis &&
!env->jvmti_can_access_local_variables(); !env->jvmti_can_access_local_variables();
bool eliminate_boxing = EliminateAutoBox;
while (!env->failing()) { while (!env->failing()) {
// Attempt to compile while subsuming loads into machine instructions. // Attempt to compile while subsuming loads into machine instructions.
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing);
// Check result and retry if appropriate. // Check result and retry if appropriate.
...@@ -142,6 +143,12 @@ void C2Compiler::compile_method(ciEnv* env, ...@@ -142,6 +143,12 @@ void C2Compiler::compile_method(ciEnv* env,
do_escape_analysis = false; do_escape_analysis = false;
continue; // retry continue; // retry
} }
if (C.has_boxed_value()) {
// Recompile without boxing elimination regardless failure reason.
assert(eliminate_boxing, "must make progress");
eliminate_boxing = false;
continue; // retry
}
// Pass any other failure reason up to the ciEnv. // Pass any other failure reason up to the ciEnv.
// Note that serious, irreversible failures are already logged // Note that serious, irreversible failures are already logged
// on the ciEnv via env->record_method_not_compilable(). // on the ciEnv via env->record_method_not_compilable().
......
...@@ -134,7 +134,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) { ...@@ -134,7 +134,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
} }
CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci()); CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
_call_node = call; // Save the call node in case we need it later _call_node = call; // Save the call node in case we need it later
if (!is_static) { if (!is_static) {
// Make an explicit receiver null_check as part of this call. // Make an explicit receiver null_check as part of this call.
...@@ -304,29 +304,34 @@ class LateInlineCallGenerator : public DirectCallGenerator { ...@@ -304,29 +304,34 @@ class LateInlineCallGenerator : public DirectCallGenerator {
void LateInlineCallGenerator::do_late_inline() { void LateInlineCallGenerator::do_late_inline() {
// Can't inline it // Can't inline it
if (call_node() == NULL || call_node()->outcnt() == 0 || CallStaticJavaNode* call = call_node();
call_node()->in(0) == NULL || call_node()->in(0)->is_top()) { if (call == NULL || call->outcnt() == 0 ||
call->in(0) == NULL || call->in(0)->is_top()) {
return; return;
} }
const TypeTuple *r = call_node()->tf()->domain(); const TypeTuple *r = call->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) { for (int i1 = 0; i1 < method()->arg_size(); i1++) {
if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return; return;
} }
} }
if (call_node()->in(TypeFunc::Memory)->is_top()) { if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return; return;
} }
CallStaticJavaNode* call = call_node(); Compile* C = Compile::current();
// Remove inlined methods from Compiler's lists.
if (call->is_macro()) {
C->remove_macro_node(call);
}
// Make a clone of the JVMState that appropriate to use for driving a parse // Make a clone of the JVMState that appropriate to use for driving a parse
Compile* C = Compile::current(); JVMState* old_jvms = call->jvms();
JVMState* jvms = call->jvms()->clone_shallow(C); JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req(); uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms); SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) { for (uint i1 = 0; i1 < size; i1++) {
...@@ -340,16 +345,23 @@ void LateInlineCallGenerator::do_late_inline() { ...@@ -340,16 +345,23 @@ void LateInlineCallGenerator::do_late_inline() {
map->set_req(TypeFunc::Memory, mem); map->set_req(TypeFunc::Memory, mem);
} }
// Make enough space for the expression stack and transfer the incoming arguments uint nargs = method()->arg_size();
int nargs = method()->arg_size(); // blow away old call arguments
Node* top = C->top();
for (uint i1 = 0; i1 < nargs; i1++) {
map->set_req(TypeFunc::Parms + i1, top);
}
jvms->set_map(map); jvms->set_map(map);
// Make enough space in the expression stack to transfer
// the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack()); map->ensure_stack(jvms, jvms->method()->max_stack());
if (nargs > 0) { for (uint i1 = 0; i1 < nargs; i1++) {
for (int i1 = 0; i1 < nargs; i1++) { map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
}
} }
// This check is done here because for_method_handle_inline() method
// needs jvms for inlined state.
if (!do_late_inline_check(jvms)) { if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C); map->disconnect_inputs(NULL, C);
return; return;
...@@ -480,6 +492,26 @@ CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGener ...@@ -480,6 +492,26 @@ CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGener
return new LateInlineStringCallGenerator(method, inline_cg); return new LateInlineStringCallGenerator(method, inline_cg);
} }
class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
public:
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg) {}
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
return new_jvms;
}
};
CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineBoxingCallGenerator(method, inline_cg);
}
//---------------------------WarmCallGenerator-------------------------------- //---------------------------WarmCallGenerator--------------------------------
// Internal class which handles initial deferral of inlining decisions. // Internal class which handles initial deferral of inlining decisions.
......
...@@ -125,6 +125,7 @@ class CallGenerator : public ResourceObj { ...@@ -125,6 +125,7 @@ class CallGenerator : public ResourceObj {
static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const); static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const);
static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg); static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
// How to make a call but defer the decision whether to inline or not. // How to make a call but defer the decision whether to inline or not.
static CallGenerator* for_warm_call(WarmCallInfo* ci, static CallGenerator* for_warm_call(WarmCallInfo* ci,
......
...@@ -523,7 +523,9 @@ void JVMState::dump_spec(outputStream *st) const { ...@@ -523,7 +523,9 @@ void JVMState::dump_spec(outputStream *st) const {
void JVMState::dump_on(outputStream* st) const { void JVMState::dump_on(outputStream* st) const {
if (_map && !((uintptr_t)_map & 1)) { bool print_map = _map && !((uintptr_t)_map & 1) &&
((caller() == NULL) || (caller()->map() != _map));
if (print_map) {
if (_map->len() > _map->req()) { // _map->has_exceptions() if (_map->len() > _map->req()) { // _map->has_exceptions()
Node* ex = _map->in(_map->req()); // _map->next_exception() Node* ex = _map->in(_map->req()); // _map->next_exception()
// skip the first one; it's already being printed // skip the first one; it's already being printed
...@@ -532,7 +534,10 @@ void JVMState::dump_on(outputStream* st) const { ...@@ -532,7 +534,10 @@ void JVMState::dump_on(outputStream* st) const {
ex->dump(1); ex->dump(1);
} }
} }
_map->dump(2); _map->dump(Verbose ? 2 : 1);
}
if (caller() != NULL) {
caller()->dump_on(st);
} }
st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
...@@ -546,9 +551,6 @@ void JVMState::dump_on(outputStream* st) const { ...@@ -546,9 +551,6 @@ void JVMState::dump_on(outputStream* st) const {
_method->print_codes_on(bci(), bci()+1, st); _method->print_codes_on(bci(), bci()+1, st);
} }
} }
if (caller() != NULL) {
caller()->dump_on(st);
}
} }
// Extra way to dump a jvms from the debugger, // Extra way to dump a jvms from the debugger,
...@@ -584,6 +586,15 @@ JVMState* JVMState::clone_deep(Compile* C) const { ...@@ -584,6 +586,15 @@ JVMState* JVMState::clone_deep(Compile* C) const {
return n; return n;
} }
/**
* Reset map for all callers
*/
void JVMState::set_map_deep(SafePointNode* map) {
for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
p->set_map(map);
}
}
//============================================================================= //=============================================================================
uint CallNode::cmp( const Node &n ) const uint CallNode::cmp( const Node &n ) const
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
...@@ -663,17 +674,49 @@ uint CallNode::match_edge(uint idx) const { ...@@ -663,17 +674,49 @@ uint CallNode::match_edge(uint idx) const {
// Determine whether the call could modify the field of the specified // Determine whether the call could modify the field of the specified
// instance at the specified offset. // instance at the specified offset.
// //
bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); assert((t_oop != NULL), "sanity");
if (t_oop->is_known_instance()) {
// If not an OopPtr or not an instance type, assume the worst. // The instance_id is set only for scalar-replaceable allocations which
// Note: currently this method is called only for instance types. // are not passed as arguments according to Escape Analysis.
if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { return false;
return true;
} }
// The instance_id is set only for scalar-replaceable allocations which if (t_oop->is_ptr_to_boxed_value()) {
// are not passed as arguments according to Escape Analysis. ciKlass* boxing_klass = t_oop->klass();
return false; if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
// Skip unrelated boxing methods.
Node* proj = proj_out(TypeFunc::Parms);
if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
return false;
}
}
if (is_CallJava() && as_CallJava()->method() != NULL) {
ciMethod* meth = as_CallJava()->method();
if (meth->is_accessor()) {
return false;
}
// May modify (by reflection) if an boxing object is passed
// as argument or returned.
if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) {
Node* proj = proj_out(TypeFunc::Parms);
const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
(inst_t->klass() == boxing_klass))) {
return true;
}
}
const TypeTuple* d = tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
(inst_t->klass() == boxing_klass))) {
return true;
}
}
return false;
}
}
return true;
} }
// Does this call have a direct reference to n other than debug information? // Does this call have a direct reference to n other than debug information?
...@@ -1020,6 +1063,7 @@ void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { ...@@ -1020,6 +1063,7 @@ void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
int scloff = jvms->scloff(); int scloff = jvms->scloff();
int endoff = jvms->endoff(); int endoff = jvms->endoff();
assert(endoff == (int)req(), "no other states or debug info after me"); assert(endoff == (int)req(), "no other states or debug info after me");
assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
Node* top = Compile::current()->top(); Node* top = Compile::current()->top();
for (uint i = 0; i < grow_by; i++) { for (uint i = 0; i < grow_by; i++) {
ins_req(monoff, top); ins_req(monoff, top);
...@@ -1035,6 +1079,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) { ...@@ -1035,6 +1079,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
const int MonitorEdges = 2; const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
assert(req() == jvms()->endoff(), "correct sizing"); assert(req() == jvms()->endoff(), "correct sizing");
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
int nextmon = jvms()->scloff(); int nextmon = jvms()->scloff();
if (GenerateSynchronizationCode) { if (GenerateSynchronizationCode) {
add_req(lock->box_node()); add_req(lock->box_node());
...@@ -1050,6 +1095,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) { ...@@ -1050,6 +1095,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
void SafePointNode::pop_monitor() { void SafePointNode::pop_monitor() {
// Delete last monitor from debug info // Delete last monitor from debug info
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
debug_only(int num_before_pop = jvms()->nof_monitors()); debug_only(int num_before_pop = jvms()->nof_monitors());
const int MonitorEdges = (1<<JVMState::logMonitorEdges); const int MonitorEdges = (1<<JVMState::logMonitorEdges);
int scloff = jvms()->scloff(); int scloff = jvms()->scloff();
...@@ -1154,6 +1200,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, ...@@ -1154,6 +1200,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
init_class_id(Class_Allocate); init_class_id(Class_Allocate);
init_flags(Flag_is_macro); init_flags(Flag_is_macro);
_is_scalar_replaceable = false; _is_scalar_replaceable = false;
_is_non_escaping = false;
Node *topnode = C->top(); Node *topnode = C->top();
init_req( TypeFunc::Control , ctrl ); init_req( TypeFunc::Control , ctrl );
...@@ -1169,8 +1216,6 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, ...@@ -1169,8 +1216,6 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
} }
//============================================================================= //=============================================================================
uint AllocateArrayNode::size_of() const { return sizeof(*this); }
Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this; if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node // Don't bother trying to transform a dead node
...@@ -1235,6 +1280,8 @@ Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTran ...@@ -1235,6 +1280,8 @@ Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTran
// - the narrow_length is 0 // - the narrow_length is 0
// - the narrow_length is not wider than length // - the narrow_length is not wider than length
assert(narrow_length_type == TypeInt::ZERO || assert(narrow_length_type == TypeInt::ZERO ||
length_type->is_con() && narrow_length_type->is_con() &&
(narrow_length_type->_hi <= length_type->_lo) ||
(narrow_length_type->_hi <= length_type->_hi && (narrow_length_type->_hi <= length_type->_hi &&
narrow_length_type->_lo >= length_type->_lo), narrow_length_type->_lo >= length_type->_lo),
"narrow type must be narrower than length type"); "narrow type must be narrower than length type");
......
...@@ -49,6 +49,7 @@ class CallLeafNode; ...@@ -49,6 +49,7 @@ class CallLeafNode;
class CallLeafNoFPNode; class CallLeafNoFPNode;
class AllocateNode; class AllocateNode;
class AllocateArrayNode; class AllocateArrayNode;
class BoxLockNode;
class LockNode; class LockNode;
class UnlockNode; class UnlockNode;
class JVMState; class JVMState;
...@@ -235,7 +236,6 @@ public: ...@@ -235,7 +236,6 @@ public:
int loc_size() const { return stkoff() - locoff(); } int loc_size() const { return stkoff() - locoff(); }
int stk_size() const { return monoff() - stkoff(); } int stk_size() const { return monoff() - stkoff(); }
int arg_size() const { return monoff() - argoff(); }
int mon_size() const { return scloff() - monoff(); } int mon_size() const { return scloff() - monoff(); }
int scl_size() const { return endoff() - scloff(); } int scl_size() const { return endoff() - scloff(); }
...@@ -298,6 +298,7 @@ public: ...@@ -298,6 +298,7 @@ public:
// Miscellaneous utility functions // Miscellaneous utility functions
JVMState* clone_deep(Compile* C) const; // recursively clones caller chain JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
JVMState* clone_shallow(Compile* C) const; // retains uncloned caller JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
void set_map_deep(SafePointNode *map);// reset map for all callers
#ifndef PRODUCT #ifndef PRODUCT
void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
...@@ -439,7 +440,7 @@ public: ...@@ -439,7 +440,7 @@ public:
static bool needs_polling_address_input(); static bool needs_polling_address_input();
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const; virtual void dump_spec(outputStream *st) const;
#endif #endif
}; };
...@@ -554,10 +555,10 @@ public: ...@@ -554,10 +555,10 @@ public:
virtual bool guaranteed_safepoint() { return true; } virtual bool guaranteed_safepoint() { return true; }
// For macro nodes, the JVMState gets modified during expansion, so when cloning // For macro nodes, the JVMState gets modified during expansion, so when cloning
// the node the JVMState must be cloned. // the node the JVMState must be cloned.
virtual void clone_jvms() { } // default is not to clone virtual void clone_jvms(Compile* C) { } // default is not to clone
// Returns true if the call may modify n // Returns true if the call may modify n
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
// Does this node have a use of n other than in debug information? // Does this node have a use of n other than in debug information?
bool has_non_debug_use(Node *n); bool has_non_debug_use(Node *n);
// Returns the unique CheckCastPP of a call // Returns the unique CheckCastPP of a call
...@@ -630,9 +631,15 @@ class CallStaticJavaNode : public CallJavaNode { ...@@ -630,9 +631,15 @@ class CallStaticJavaNode : public CallJavaNode {
virtual uint cmp( const Node &n ) const; virtual uint cmp( const Node &n ) const;
virtual uint size_of() const; // Size is bigger virtual uint size_of() const; // Size is bigger
public: public:
CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
: CallJavaNode(tf, addr, method, bci), _name(NULL) { : CallJavaNode(tf, addr, method, bci), _name(NULL) {
init_class_id(Class_CallStaticJava); init_class_id(Class_CallStaticJava);
if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
_is_scalar_replaceable = false;
_is_non_escaping = false;
} }
CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
const TypePtr* adr_type) const TypePtr* adr_type)
...@@ -640,13 +647,31 @@ public: ...@@ -640,13 +647,31 @@ public:
init_class_id(Class_CallStaticJava); init_class_id(Class_CallStaticJava);
// This node calls a runtime stub, which often has narrow memory effects. // This node calls a runtime stub, which often has narrow memory effects.
_adr_type = adr_type; _adr_type = adr_type;
_is_scalar_replaceable = false;
_is_non_escaping = false;
} }
const char *_name; // Runtime wrapper name const char *_name; // Runtime wrapper name
// Result of Escape Analysis
bool _is_scalar_replaceable;
bool _is_non_escaping;
// If this is an uncommon trap, return the request code, else zero. // If this is an uncommon trap, return the request code, else zero.
int uncommon_trap_request() const; int uncommon_trap_request() const;
static int extract_uncommon_trap_request(const Node* call); static int extract_uncommon_trap_request(const Node* call);
bool is_boxing_method() const {
return is_macro() && (method() != NULL) && method()->is_boxing_method();
}
// Later inlining modifies the JVMState, so we need to clone it
// when the call node is cloned (because it is macro node).
virtual void clone_jvms(Compile* C) {
if ((jvms() != NULL) && is_boxing_method()) {
set_jvms(jvms()->clone_deep(C));
jvms()->set_map_deep(this);
}
}
virtual int Opcode() const; virtual int Opcode() const;
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const; virtual void dump_spec(outputStream *st) const;
...@@ -748,12 +773,12 @@ public: ...@@ -748,12 +773,12 @@ public:
ParmLimit ParmLimit
}; };
static const TypeFunc* alloc_type() { static const TypeFunc* alloc_type(const Type* t) {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[AllocSize] = TypeInt::POS; fields[AllocSize] = TypeInt::POS;
fields[KlassNode] = TypeInstPtr::NOTNULL; fields[KlassNode] = TypeInstPtr::NOTNULL;
fields[InitialTest] = TypeInt::BOOL; fields[InitialTest] = TypeInt::BOOL;
fields[ALength] = TypeInt::INT; // length (can be a bad length) fields[ALength] = t; // length (can be a bad length)
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
...@@ -766,21 +791,26 @@ public: ...@@ -766,21 +791,26 @@ public:
return TypeFunc::make(domain, range); return TypeFunc::make(domain, range);
} }
bool _is_scalar_replaceable; // Result of Escape Analysis // Result of Escape Analysis
bool _is_scalar_replaceable;
bool _is_non_escaping;
virtual uint size_of() const; // Size is bigger virtual uint size_of() const; // Size is bigger
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
Node *size, Node *klass_node, Node *initial_test); Node *size, Node *klass_node, Node *initial_test);
// Expansion modifies the JVMState, so we need to clone it // Expansion modifies the JVMState, so we need to clone it
virtual void clone_jvms() { virtual void clone_jvms(Compile* C) {
set_jvms(jvms()->clone_deep(Compile::current())); if (jvms() != NULL) {
set_jvms(jvms()->clone_deep(C));
jvms()->set_map_deep(this);
}
} }
virtual int Opcode() const; virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; } virtual uint ideal_reg() const { return Op_RegP; }
virtual bool guaranteed_safepoint() { return false; } virtual bool guaranteed_safepoint() { return false; }
// allocations do not modify their arguments // allocations do not modify their arguments
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
// Pattern-match a possible usage of AllocateNode. // Pattern-match a possible usage of AllocateNode.
// Return null if no allocation is recognized. // Return null if no allocation is recognized.
...@@ -815,10 +845,6 @@ public: ...@@ -815,10 +845,6 @@ public:
// are defined in graphKit.cpp, which sets up the bidirectional relation.) // are defined in graphKit.cpp, which sets up the bidirectional relation.)
InitializeNode* initialization(); InitializeNode* initialization();
// Return the corresponding storestore barrier (or null if none).
// Walks out edges to find it...
MemBarStoreStoreNode* storestore();
// Convenience for initialization->maybe_set_complete(phase) // Convenience for initialization->maybe_set_complete(phase)
bool maybe_set_complete(PhaseGVN* phase); bool maybe_set_complete(PhaseGVN* phase);
}; };
...@@ -840,7 +866,6 @@ public: ...@@ -840,7 +866,6 @@ public:
set_req(AllocateNode::ALength, count_val); set_req(AllocateNode::ALength, count_val);
} }
virtual int Opcode() const; virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Dig the length operand out of a array allocation site. // Dig the length operand out of a array allocation site.
...@@ -918,7 +943,7 @@ public: ...@@ -918,7 +943,7 @@ public:
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
// locking does not modify its arguments // locking does not modify its arguments
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
#ifndef PRODUCT #ifndef PRODUCT
void create_lock_counter(JVMState* s); void create_lock_counter(JVMState* s);
...@@ -965,8 +990,11 @@ public: ...@@ -965,8 +990,11 @@ public:
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Expansion modifies the JVMState, so we need to clone it // Expansion modifies the JVMState, so we need to clone it
virtual void clone_jvms() { virtual void clone_jvms(Compile* C) {
set_jvms(jvms()->clone_deep(Compile::current())); if (jvms() != NULL) {
set_jvms(jvms()->clone_deep(C));
jvms()->set_map_deep(this);
}
} }
bool is_nested_lock_region(); // Is this Lock nested? bool is_nested_lock_region(); // Is this Lock nested?
......
...@@ -806,7 +806,7 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons ...@@ -806,7 +806,7 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons
Node *in = ophi->in(i); Node *in = ophi->in(i);
if (in == NULL || igvn->type(in) == Type::TOP) if (in == NULL || igvn->type(in) == Type::TOP)
continue; continue;
Node *opt = MemNode::optimize_simple_memory_chain(in, at, igvn); Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn);
PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL;
if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) {
opt = node_map[optphi->_idx]; opt = node_map[optphi->_idx];
...@@ -1921,7 +1921,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -1921,7 +1921,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypePtr* at = adr_type(); const TypePtr* at = adr_type();
for( uint i=1; i<req(); ++i ) {// For all paths in for( uint i=1; i<req(); ++i ) {// For all paths in
Node *ii = in(i); Node *ii = in(i);
Node *new_in = MemNode::optimize_memory_chain(ii, at, phase); Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL, phase);
if (ii != new_in ) { if (ii != new_in ) {
set_req(i, new_in); set_req(i, new_in);
progress = this; progress = this;
......
...@@ -418,6 +418,7 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) { ...@@ -418,6 +418,7 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
} }
// clean up the late inline lists // clean up the late inline lists
remove_useless_late_inlines(&_string_late_inlines, useful); remove_useless_late_inlines(&_string_late_inlines, useful);
remove_useless_late_inlines(&_boxing_late_inlines, useful);
remove_useless_late_inlines(&_late_inlines, useful); remove_useless_late_inlines(&_late_inlines, useful);
debug_only(verify_graph_edges(true/*check for no_dead_code*/);) debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
} }
...@@ -485,6 +486,12 @@ void Compile::print_compile_messages() { ...@@ -485,6 +486,12 @@ void Compile::print_compile_messages() {
tty->print_cr("** Bailout: Recompile without escape analysis **"); tty->print_cr("** Bailout: Recompile without escape analysis **");
tty->print_cr("*********************************************************"); tty->print_cr("*********************************************************");
} }
if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
// Recompiling without boxing elimination
tty->print_cr("*********************************************************");
tty->print_cr("** Bailout: Recompile without boxing elimination **");
tty->print_cr("*********************************************************");
}
if (env()->break_at_compile()) { if (env()->break_at_compile()) {
// Open the debugger when compiling this method. // Open the debugger when compiling this method.
tty->print("### Breaking when compiling: "); tty->print("### Breaking when compiling: ");
...@@ -601,7 +608,8 @@ debug_only( int Compile::_debug_idx = 100000; ) ...@@ -601,7 +608,8 @@ debug_only( int Compile::_debug_idx = 100000; )
// the continuation bci for on stack replacement. // the continuation bci for on stack replacement.
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis ) Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
: Phase(Compiler), : Phase(Compiler),
_env(ci_env), _env(ci_env),
_log(ci_env->log()), _log(ci_env->log()),
...@@ -617,6 +625,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -617,6 +625,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_warm_calls(NULL), _warm_calls(NULL),
_subsume_loads(subsume_loads), _subsume_loads(subsume_loads),
_do_escape_analysis(do_escape_analysis), _do_escape_analysis(do_escape_analysis),
_eliminate_boxing(eliminate_boxing),
_failure_reason(NULL), _failure_reason(NULL),
_code_buffer("Compile::Fill_buffer"), _code_buffer("Compile::Fill_buffer"),
_orig_pc_slot(0), _orig_pc_slot(0),
...@@ -638,6 +647,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -638,6 +647,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_congraph(NULL), _congraph(NULL),
_late_inlines(comp_arena(), 2, 0, NULL), _late_inlines(comp_arena(), 2, 0, NULL),
_string_late_inlines(comp_arena(), 2, 0, NULL), _string_late_inlines(comp_arena(), 2, 0, NULL),
_boxing_late_inlines(comp_arena(), 2, 0, NULL),
_late_inlines_pos(0), _late_inlines_pos(0),
_number_of_mh_late_inlines(0), _number_of_mh_late_inlines(0),
_inlining_progress(false), _inlining_progress(false),
...@@ -906,6 +916,7 @@ Compile::Compile( ciEnv* ci_env, ...@@ -906,6 +916,7 @@ Compile::Compile( ciEnv* ci_env,
_orig_pc_slot_offset_in_bytes(0), _orig_pc_slot_offset_in_bytes(0),
_subsume_loads(true), _subsume_loads(true),
_do_escape_analysis(false), _do_escape_analysis(false),
_eliminate_boxing(false),
_failure_reason(NULL), _failure_reason(NULL),
_code_buffer("Compile::Fill_buffer"), _code_buffer("Compile::Fill_buffer"),
_has_method_handle_invokes(false), _has_method_handle_invokes(false),
...@@ -1016,6 +1027,7 @@ void Compile::Init(int aliaslevel) { ...@@ -1016,6 +1027,7 @@ void Compile::Init(int aliaslevel) {
set_has_split_ifs(false); set_has_split_ifs(false);
set_has_loops(has_method() && method()->has_loops()); // first approximation set_has_loops(has_method() && method()->has_loops()); // first approximation
set_has_stringbuilder(false); set_has_stringbuilder(false);
set_has_boxed_value(false);
_trap_can_recompile = false; // no traps emitted yet _trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen _major_progress = true; // start out assuming good things will happen
set_has_unsafe_access(false); set_has_unsafe_access(false);
...@@ -1807,6 +1819,38 @@ void Compile::inline_string_calls(bool parse_time) { ...@@ -1807,6 +1819,38 @@ void Compile::inline_string_calls(bool parse_time) {
_string_late_inlines.trunc_to(0); _string_late_inlines.trunc_to(0);
} }
// Late inlining of boxing methods
void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
if (_boxing_late_inlines.length() > 0) {
assert(has_boxed_value(), "inconsistent");
PhaseGVN* gvn = initial_gvn();
set_inlining_incrementally(true);
assert( igvn._worklist.size() == 0, "should be done with igvn" );
for_igvn()->clear();
gvn->replace_with(&igvn);
while (_boxing_late_inlines.length() > 0) {
CallGenerator* cg = _boxing_late_inlines.pop();
cg->do_late_inline();
if (failing()) return;
}
_boxing_late_inlines.trunc_to(0);
{
ResourceMark rm;
PhaseRemoveUseless pru(gvn, for_igvn());
}
igvn = PhaseIterGVN(gvn);
igvn.optimize();
set_inlining_progress(false);
set_inlining_incrementally(false);
}
}
void Compile::inline_incrementally_one(PhaseIterGVN& igvn) { void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
assert(IncrementalInline, "incremental inlining should be on"); assert(IncrementalInline, "incremental inlining should be on");
PhaseGVN* gvn = initial_gvn(); PhaseGVN* gvn = initial_gvn();
...@@ -1831,7 +1875,7 @@ void Compile::inline_incrementally_one(PhaseIterGVN& igvn) { ...@@ -1831,7 +1875,7 @@ void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
{ {
ResourceMark rm; ResourceMark rm;
PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn()); PhaseRemoveUseless pru(gvn, for_igvn());
} }
igvn = PhaseIterGVN(gvn); igvn = PhaseIterGVN(gvn);
...@@ -1929,12 +1973,25 @@ void Compile::Optimize() { ...@@ -1929,12 +1973,25 @@ void Compile::Optimize() {
if (failing()) return; if (failing()) return;
inline_incrementally(igvn); {
NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
inline_incrementally(igvn);
}
print_method("Incremental Inline", 2); print_method("Incremental Inline", 2);
if (failing()) return; if (failing()) return;
if (eliminate_boxing()) {
NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
// Inline valueOf() methods now.
inline_boxing_calls(igvn);
print_method("Incremental Boxing Inline", 2);
if (failing()) return;
}
// No more new expensive nodes will be added to the list from here // No more new expensive nodes will be added to the list from here
// so keep only the actual candidates for optimizations. // so keep only the actual candidates for optimizations.
cleanup_expensive_nodes(igvn); cleanup_expensive_nodes(igvn);
...@@ -2896,6 +2953,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) { ...@@ -2896,6 +2953,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
} }
break; break;
case Op_MemBarStoreStore: case Op_MemBarStoreStore:
case Op_MemBarRelease:
// Break the link with AllocateNode: it is no longer useful and // Break the link with AllocateNode: it is no longer useful and
// confuses register allocation. // confuses register allocation.
if (n->req() > MemBarNode::Precedent) { if (n->req() > MemBarNode::Precedent) {
......
...@@ -262,6 +262,7 @@ class Compile : public Phase { ...@@ -262,6 +262,7 @@ class Compile : public Phase {
const bool _save_argument_registers; // save/restore arg regs for trampolines const bool _save_argument_registers; // save/restore arg regs for trampolines
const bool _subsume_loads; // Load can be matched as part of a larger op. const bool _subsume_loads; // Load can be matched as part of a larger op.
const bool _do_escape_analysis; // Do escape analysis. const bool _do_escape_analysis; // Do escape analysis.
const bool _eliminate_boxing; // Do boxing elimination.
ciMethod* _method; // The method being compiled. ciMethod* _method; // The method being compiled.
int _entry_bci; // entry bci for osr methods. int _entry_bci; // entry bci for osr methods.
const TypeFunc* _tf; // My kind of signature const TypeFunc* _tf; // My kind of signature
...@@ -287,6 +288,7 @@ class Compile : public Phase { ...@@ -287,6 +288,7 @@ class Compile : public Phase {
bool _has_split_ifs; // True if the method _may_ have some split-if bool _has_split_ifs; // True if the method _may_ have some split-if
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
bool _has_boxed_value; // True if a boxed object is allocated
int _max_vector_size; // Maximum size of generated vectors int _max_vector_size; // Maximum size of generated vectors
uint _trap_hist[trapHistLength]; // Cumulative traps uint _trap_hist[trapHistLength]; // Cumulative traps
bool _trap_can_recompile; // Have we emitted a recompiling trap? bool _trap_can_recompile; // Have we emitted a recompiling trap?
...@@ -375,6 +377,8 @@ class Compile : public Phase { ...@@ -375,6 +377,8 @@ class Compile : public Phase {
// main parsing has finished. // main parsing has finished.
GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
...@@ -486,8 +490,12 @@ class Compile : public Phase { ...@@ -486,8 +490,12 @@ class Compile : public Phase {
// instructions that subsume a load may result in an unschedulable // instructions that subsume a load may result in an unschedulable
// instruction sequence. // instruction sequence.
bool subsume_loads() const { return _subsume_loads; } bool subsume_loads() const { return _subsume_loads; }
// Do escape analysis. /** Do escape analysis. */
bool do_escape_analysis() const { return _do_escape_analysis; } bool do_escape_analysis() const { return _do_escape_analysis; }
/** Do boxing elimination. */
bool eliminate_boxing() const { return _eliminate_boxing; }
/** Do aggressive boxing elimination. */
bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
bool save_argument_registers() const { return _save_argument_registers; } bool save_argument_registers() const { return _save_argument_registers; }
...@@ -527,6 +535,8 @@ class Compile : public Phase { ...@@ -527,6 +535,8 @@ class Compile : public Phase {
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_stringbuilder() const { return _has_stringbuilder; } bool has_stringbuilder() const { return _has_stringbuilder; }
void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
bool has_boxed_value() const { return _has_boxed_value; }
void set_has_boxed_value(bool z) { _has_boxed_value = z; }
int max_vector_size() const { return _max_vector_size; } int max_vector_size() const { return _max_vector_size; }
void set_max_vector_size(int s) { _max_vector_size = s; } void set_max_vector_size(int s) { _max_vector_size = s; }
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
...@@ -579,12 +589,12 @@ class Compile : public Phase { ...@@ -579,12 +589,12 @@ class Compile : public Phase {
#endif #endif
} }
int macro_count() { return _macro_nodes->length(); } int macro_count() const { return _macro_nodes->length(); }
int predicate_count() { return _predicate_opaqs->length();} int predicate_count() const { return _predicate_opaqs->length();}
int expensive_count() { return _expensive_nodes->length(); } int expensive_count() const { return _expensive_nodes->length(); }
Node* macro_node(int idx) { return _macro_nodes->at(idx); } Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);} Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
Node* expensive_node(int idx) { return _expensive_nodes->at(idx); } Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
ConnectionGraph* congraph() { return _congraph;} ConnectionGraph* congraph() { return _congraph;}
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
void add_macro_node(Node * n) { void add_macro_node(Node * n) {
...@@ -766,7 +776,12 @@ class Compile : public Phase { ...@@ -766,7 +776,12 @@ class Compile : public Phase {
// Decide how to build a call. // Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile. // The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false); CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms); bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return should_delay_string_inlining(call_method, jvms) ||
should_delay_boxing_inlining(call_method, jvms);
}
bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
// Helper functions to identify inlining potential at call-site // Helper functions to identify inlining potential at call-site
ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
...@@ -822,6 +837,10 @@ class Compile : public Phase { ...@@ -822,6 +837,10 @@ class Compile : public Phase {
_string_late_inlines.push(cg); _string_late_inlines.push(cg);
} }
void add_boxing_late_inline(CallGenerator* cg) {
_boxing_late_inlines.push(cg);
}
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
void dump_inlining(); void dump_inlining();
...@@ -841,6 +860,7 @@ class Compile : public Phase { ...@@ -841,6 +860,7 @@ class Compile : public Phase {
void inline_incrementally_one(PhaseIterGVN& igvn); void inline_incrementally_one(PhaseIterGVN& igvn);
void inline_incrementally(PhaseIterGVN& igvn); void inline_incrementally(PhaseIterGVN& igvn);
void inline_string_calls(bool parse_time); void inline_string_calls(bool parse_time);
void inline_boxing_calls(PhaseIterGVN& igvn);
// Matching, CFG layout, allocation, code generation // Matching, CFG layout, allocation, code generation
PhaseCFG* cfg() { return _cfg; } PhaseCFG* cfg() { return _cfg; }
...@@ -913,7 +933,8 @@ class Compile : public Phase { ...@@ -913,7 +933,8 @@ class Compile : public Phase {
// replacement, entry_bci indicates the bytecode for which to compile a // replacement, entry_bci indicates the bytecode for which to compile a
// continuation. // continuation.
Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
int entry_bci, bool subsume_loads, bool do_escape_analysis); int entry_bci, bool subsume_loads, bool do_escape_analysis,
bool eliminate_boxing);
// Second major entry point. From the TypeFunc signature, generate code // Second major entry point. From the TypeFunc signature, generate code
// to pass arguments from the Java calling convention to the C calling // to pass arguments from the Java calling convention to the C calling
......
...@@ -176,9 +176,12 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool ...@@ -176,9 +176,12 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Delay the inlining of this method to give us the // Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations // opportunity to perform some high level optimizations
// first. // first.
if (should_delay_inlining(callee, jvms)) { if (should_delay_string_inlining(callee, jvms)) {
assert(!delayed_forbidden, "strange"); assert(!delayed_forbidden, "strange");
return CallGenerator::for_string_late_inline(callee, cg); return CallGenerator::for_string_late_inline(callee, cg);
} else if (should_delay_boxing_inlining(callee, jvms)) {
assert(!delayed_forbidden, "strange");
return CallGenerator::for_boxing_late_inline(callee, cg);
} else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) { } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
return CallGenerator::for_late_inline(callee, cg); return CallGenerator::for_late_inline(callee, cg);
} }
...@@ -276,7 +279,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool ...@@ -276,7 +279,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Return true for methods that shouldn't be inlined early so that // Return true for methods that shouldn't be inlined early so that
// they are easier to analyze and optimize as intrinsics. // they are easier to analyze and optimize as intrinsics.
bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) { bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
if (has_stringbuilder()) { if (has_stringbuilder()) {
if ((call_method->holder() == C->env()->StringBuilder_klass() || if ((call_method->holder() == C->env()->StringBuilder_klass() ||
...@@ -327,6 +330,13 @@ bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) { ...@@ -327,6 +330,13 @@ bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return false; return false;
} }
bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
if (eliminate_boxing() && call_method->is_boxing_method()) {
set_has_boxed_value(true);
return true;
}
return false;
}
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
......
...@@ -63,15 +63,19 @@ bool ConnectionGraph::has_candidates(Compile *C) { ...@@ -63,15 +63,19 @@ bool ConnectionGraph::has_candidates(Compile *C) {
// EA brings benefits only when the code has allocations and/or locks which // EA brings benefits only when the code has allocations and/or locks which
// are represented by ideal Macro nodes. // are represented by ideal Macro nodes.
int cnt = C->macro_count(); int cnt = C->macro_count();
for( int i=0; i < cnt; i++ ) { for (int i = 0; i < cnt; i++) {
Node *n = C->macro_node(i); Node *n = C->macro_node(i);
if ( n->is_Allocate() ) if (n->is_Allocate())
return true; return true;
if( n->is_Lock() ) { if (n->is_Lock()) {
Node* obj = n->as_Lock()->obj_node()->uncast(); Node* obj = n->as_Lock()->obj_node()->uncast();
if( !(obj->is_Parm() || obj->is_Con()) ) if (!(obj->is_Parm() || obj->is_Con()))
return true; return true;
} }
if (n->is_CallStaticJava() &&
n->as_CallStaticJava()->is_boxing_method()) {
return true;
}
} }
return false; return false;
} }
...@@ -115,7 +119,7 @@ bool ConnectionGraph::compute_escape() { ...@@ -115,7 +119,7 @@ bool ConnectionGraph::compute_escape() {
{ Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true); { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
// 1. Populate Connection Graph (CG) with PointsTo nodes. // 1. Populate Connection Graph (CG) with PointsTo nodes.
ideal_nodes.map(C->unique(), NULL); // preallocate space ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
// Initialize worklist // Initialize worklist
if (C->root() != NULL) { if (C->root() != NULL) {
ideal_nodes.push(C->root()); ideal_nodes.push(C->root());
...@@ -152,8 +156,11 @@ bool ConnectionGraph::compute_escape() { ...@@ -152,8 +156,11 @@ bool ConnectionGraph::compute_escape() {
// escape status of the associated Allocate node some of them // escape status of the associated Allocate node some of them
// may be eliminated. // may be eliminated.
storestore_worklist.append(n); storestore_worklist.append(n);
} else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
(n->req() > MemBarNode::Precedent)) {
record_for_optimizer(n);
#ifdef ASSERT #ifdef ASSERT
} else if(n->is_AddP()) { } else if (n->is_AddP()) {
// Collect address nodes for graph verification. // Collect address nodes for graph verification.
addp_worklist.append(n); addp_worklist.append(n);
#endif #endif
...@@ -206,8 +213,15 @@ bool ConnectionGraph::compute_escape() { ...@@ -206,8 +213,15 @@ bool ConnectionGraph::compute_escape() {
int non_escaped_length = non_escaped_worklist.length(); int non_escaped_length = non_escaped_worklist.length();
for (int next = 0; next < non_escaped_length; next++) { for (int next = 0; next < non_escaped_length; next++) {
JavaObjectNode* ptn = non_escaped_worklist.at(next); JavaObjectNode* ptn = non_escaped_worklist.at(next);
if (ptn->escape_state() == PointsToNode::NoEscape && bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
ptn->scalar_replaceable()) { Node* n = ptn->ideal_node();
if (n->is_Allocate()) {
n->as_Allocate()->_is_non_escaping = noescape;
}
if (n->is_CallStaticJava()) {
n->as_CallStaticJava()->_is_non_escaping = noescape;
}
if (noescape && ptn->scalar_replaceable()) {
adjust_scalar_replaceable_state(ptn); adjust_scalar_replaceable_state(ptn);
if (ptn->scalar_replaceable()) { if (ptn->scalar_replaceable()) {
alloc_worklist.append(ptn->ideal_node()); alloc_worklist.append(ptn->ideal_node());
...@@ -330,8 +344,10 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de ...@@ -330,8 +344,10 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
// Don't mark as processed since call's arguments have to be processed. // Don't mark as processed since call's arguments have to be processed.
delayed_worklist->push(n); delayed_worklist->push(n);
// Check if a call returns an object. // Check if a call returns an object.
if (n->as_Call()->returns_pointer() && if ((n->as_Call()->returns_pointer() &&
n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
(n->is_CallStaticJava() &&
n->as_CallStaticJava()->is_boxing_method())) {
add_call_node(n->as_Call()); add_call_node(n->as_Call());
} }
} }
...@@ -387,8 +403,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de ...@@ -387,8 +403,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
case Op_ConNKlass: { case Op_ConNKlass: {
// assume all oop constants globally escape except for null // assume all oop constants globally escape except for null
PointsToNode::EscapeState es; PointsToNode::EscapeState es;
if (igvn->type(n) == TypePtr::NULL_PTR || const Type* t = igvn->type(n);
igvn->type(n) == TypeNarrowOop::NULL_PTR) { if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
es = PointsToNode::NoEscape; es = PointsToNode::NoEscape;
} else { } else {
es = PointsToNode::GlobalEscape; es = PointsToNode::GlobalEscape;
...@@ -797,6 +813,9 @@ void ConnectionGraph::add_call_node(CallNode* call) { ...@@ -797,6 +813,9 @@ void ConnectionGraph::add_call_node(CallNode* call) {
// Returns a newly allocated unescaped object. // Returns a newly allocated unescaped object.
add_java_object(call, PointsToNode::NoEscape); add_java_object(call, PointsToNode::NoEscape);
ptnode_adr(call_idx)->set_scalar_replaceable(false); ptnode_adr(call_idx)->set_scalar_replaceable(false);
} else if (meth->is_boxing_method()) {
// Returns boxing object
add_java_object(call, PointsToNode::NoEscape);
} else { } else {
BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
call_analyzer->copy_dependencies(_compile->dependencies()); call_analyzer->copy_dependencies(_compile->dependencies());
...@@ -943,6 +962,9 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { ...@@ -943,6 +962,9 @@ void ConnectionGraph::process_call_arguments(CallNode *call) {
assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
#endif #endif
ciMethod* meth = call->as_CallJava()->method(); ciMethod* meth = call->as_CallJava()->method();
if ((meth != NULL) && meth->is_boxing_method()) {
break; // Boxing methods do not modify any oops.
}
BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
// fall-through if not a Java method or no analyzer information // fall-through if not a Java method or no analyzer information
if (call_analyzer != NULL) { if (call_analyzer != NULL) {
...@@ -2744,6 +2766,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -2744,6 +2766,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// so it could be eliminated if it has no uses. // so it could be eliminated if it has no uses.
alloc->as_Allocate()->_is_scalar_replaceable = true; alloc->as_Allocate()->_is_scalar_replaceable = true;
} }
if (alloc->is_CallStaticJava()) {
// Set the scalar_replaceable flag for boxing method
// so it could be eliminated if it has no uses.
alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
}
continue; continue;
} }
if (!n->is_CheckCastPP()) { // not unique CheckCastPP. if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
...@@ -2782,6 +2809,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -2782,6 +2809,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// so it could be eliminated. // so it could be eliminated.
alloc->as_Allocate()->_is_scalar_replaceable = true; alloc->as_Allocate()->_is_scalar_replaceable = true;
} }
if (alloc->is_CallStaticJava()) {
// Set the scalar_replaceable flag for boxing method
// so it could be eliminated.
alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
}
set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
// in order for an object to be scalar-replaceable, it must be: // in order for an object to be scalar-replaceable, it must be:
// - a direct allocation (not a call returning an object) // - a direct allocation (not a call returning an object)
...@@ -2911,7 +2943,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -2911,7 +2943,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// Load/store to instance's field // Load/store to instance's field
memnode_worklist.append_if_missing(use); memnode_worklist.append_if_missing(use);
} else if (use->is_MemBar()) { } else if (use->is_MemBar()) {
memnode_worklist.append_if_missing(use); if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
}
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
Node* addp2 = find_second_addp(use, n); Node* addp2 = find_second_addp(use, n);
if (addp2 != NULL) { if (addp2 != NULL) {
...@@ -3028,7 +3062,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -3028,7 +3062,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
continue; continue;
memnode_worklist.append_if_missing(use); memnode_worklist.append_if_missing(use);
} else if (use->is_MemBar()) { } else if (use->is_MemBar()) {
memnode_worklist.append_if_missing(use); if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
}
#ifdef ASSERT #ifdef ASSERT
} else if(use->is_Mem()) { } else if(use->is_Mem()) {
assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
...@@ -3264,7 +3300,12 @@ void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { ...@@ -3264,7 +3300,12 @@ void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
if (ptn == NULL || !ptn->is_JavaObject()) if (ptn == NULL || !ptn->is_JavaObject())
continue; continue;
PointsToNode::EscapeState es = ptn->escape_state(); PointsToNode::EscapeState es = ptn->escape_state();
if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { if ((es != PointsToNode::NoEscape) && !Verbose) {
continue;
}
Node* n = ptn->ideal_node();
if (n->is_Allocate() || (n->is_CallStaticJava() &&
n->as_CallStaticJava()->is_boxing_method())) {
if (first) { if (first) {
tty->cr(); tty->cr();
tty->print("======== Connection graph for "); tty->print("======== Connection graph for ");
......
...@@ -333,6 +333,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph ...@@ -333,6 +333,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
assert(ex_map->req() == phi_map->req(), "matching maps"); assert(ex_map->req() == phi_map->req(), "matching maps");
uint tos = ex_jvms->stkoff() + ex_jvms->sp(); uint tos = ex_jvms->stkoff() + ex_jvms->sp();
Node* hidden_merge_mark = root(); Node* hidden_merge_mark = root();
...@@ -409,7 +410,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph ...@@ -409,7 +410,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
while (dst->req() > orig_width) dst->del_req(dst->req()-1); while (dst->req() > orig_width) dst->del_req(dst->req()-1);
} else { } else {
assert(dst->is_Phi(), "nobody else uses a hidden region"); assert(dst->is_Phi(), "nobody else uses a hidden region");
phi = (PhiNode*)dst; phi = dst->as_Phi();
} }
if (add_multiple && src->in(0) == ex_control) { if (add_multiple && src->in(0) == ex_control) {
// Both are phis. // Both are phis.
...@@ -1438,7 +1439,12 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, ...@@ -1438,7 +1439,12 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
} else { } else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
} }
return _gvn.transform(ld); ld = _gvn.transform(ld);
if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
record_for_igvn(ld);
}
return ld;
} }
Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
...@@ -3144,7 +3150,7 @@ Node* GraphKit::new_instance(Node* klass_node, ...@@ -3144,7 +3150,7 @@ Node* GraphKit::new_instance(Node* klass_node,
set_all_memory(mem); // Create new memory state set_all_memory(mem); // Create new memory state
AllocateNode* alloc AllocateNode* alloc
= new (C) AllocateNode(C, AllocateNode::alloc_type(), = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
control(), mem, i_o(), control(), mem, i_o(),
size, klass_node, size, klass_node,
initial_slow_test); initial_slow_test);
...@@ -3285,7 +3291,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) ...@@ -3285,7 +3291,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
// Create the AllocateArrayNode and its result projections // Create the AllocateArrayNode and its result projections
AllocateArrayNode* alloc AllocateArrayNode* alloc
= new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(), = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
control(), mem, i_o(), control(), mem, i_o(),
size, klass_node, size, klass_node,
initial_slow_test, initial_slow_test,
...@@ -3326,10 +3332,9 @@ AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { ...@@ -3326,10 +3332,9 @@ AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
if (ptr == NULL) { // reduce dumb test in callers if (ptr == NULL) { // reduce dumb test in callers
return NULL; return NULL;
} }
if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast ptr = ptr->uncast(); // strip a raw-to-oop cast
ptr = ptr->in(1); if (ptr == NULL) return NULL;
if (ptr == NULL) return NULL;
}
if (ptr->is_Proj()) { if (ptr->is_Proj()) {
Node* allo = ptr->in(0); Node* allo = ptr->in(0);
if (allo != NULL && allo->is_Allocate()) { if (allo != NULL && allo->is_Allocate()) {
...@@ -3374,19 +3379,6 @@ InitializeNode* AllocateNode::initialization() { ...@@ -3374,19 +3379,6 @@ InitializeNode* AllocateNode::initialization() {
return NULL; return NULL;
} }
// Trace Allocate -> Proj[Parm] -> MemBarStoreStore
MemBarStoreStoreNode* AllocateNode::storestore() {
ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
if (rawoop == NULL) return NULL;
for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
Node* storestore = rawoop->fast_out(i);
if (storestore->is_MemBarStoreStore()) {
return storestore->as_MemBarStoreStore();
}
}
return NULL;
}
//----------------------------- loop predicates --------------------------- //----------------------------- loop predicates ---------------------------
//------------------------------add_predicate_impl---------------------------- //------------------------------add_predicate_impl----------------------------
......
...@@ -673,7 +673,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj ...@@ -673,7 +673,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj
// / Region // / Region
// //
Node* IfNode::fold_compares(PhaseGVN* phase) { Node* IfNode::fold_compares(PhaseGVN* phase) {
if (!EliminateAutoBox || Opcode() != Op_If) return NULL; if (!phase->C->eliminate_boxing() || Opcode() != Op_If) return NULL;
Node* this_cmp = in(1)->in(1); Node* this_cmp = in(1)->in(1);
if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI && if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI &&
......
...@@ -3703,7 +3703,7 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual ...@@ -3703,7 +3703,7 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual
CallJavaNode* slow_call; CallJavaNode* slow_call;
if (is_static) { if (is_static) {
assert(!is_virtual, ""); assert(!is_virtual, "");
slow_call = new(C) CallStaticJavaNode(tf, slow_call = new(C) CallStaticJavaNode(C, tf,
SharedRuntime::get_resolve_static_call_stub(), SharedRuntime::get_resolve_static_call_stub(),
method, bci()); method, bci());
} else if (is_virtual) { } else if (is_virtual) {
...@@ -3722,7 +3722,7 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual ...@@ -3722,7 +3722,7 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual
method, vtable_index, bci()); method, vtable_index, bci());
} else { // neither virtual nor static: opt_virtual } else { // neither virtual nor static: opt_virtual
null_check_receiver(); null_check_receiver();
slow_call = new(C) CallStaticJavaNode(tf, slow_call = new(C) CallStaticJavaNode(C, tf,
SharedRuntime::get_resolve_opt_virtual_call_stub(), SharedRuntime::get_resolve_opt_virtual_call_stub(),
method, bci()); method, bci());
slow_call->set_optimized_virtual(true); slow_call->set_optimized_virtual(true);
......
...@@ -821,8 +821,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { ...@@ -821,8 +821,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
loop->dump_head(); loop->dump_head();
} }
#endif #endif
} else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { } else if ((cl != NULL) && (proj->_con == predicate_proj->_con) &&
assert(proj->_con == predicate_proj->_con, "must match"); loop->is_range_check_if(iff, this, invar)) {
// Range check for counted loops // Range check for counted loops
const Node* cmp = bol->in(1)->as_Cmp(); const Node* cmp = bol->in(1)->as_Cmp();
......
...@@ -666,7 +666,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr ...@@ -666,7 +666,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
alloc->dump(); alloc->dump();
else else
res->dump(); res->dump();
} else { } else if (alloc->_is_scalar_replaceable) {
tty->print("NotScalar (%s)", fail_eliminate); tty->print("NotScalar (%s)", fail_eliminate);
if (res == NULL) if (res == NULL)
alloc->dump(); alloc->dump();
...@@ -845,18 +845,14 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa ...@@ -845,18 +845,14 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
// to the allocated object with "sobj" // to the allocated object with "sobj"
int start = jvms->debug_start(); int start = jvms->debug_start();
int end = jvms->debug_end(); int end = jvms->debug_end();
for (int i = start; i < end; i++) { sfpt->replace_edges_in_range(res, sobj, start, end);
if (sfpt->in(i) == res) {
sfpt->set_req(i, sobj);
}
}
safepoints_done.append_if_missing(sfpt); // keep it for rollback safepoints_done.append_if_missing(sfpt); // keep it for rollback
} }
return true; return true;
} }
// Process users of eliminated allocation. // Process users of eliminated allocation.
void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
Node* res = alloc->result_cast(); Node* res = alloc->result_cast();
if (res != NULL) { if (res != NULL) {
for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
...@@ -899,6 +895,17 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { ...@@ -899,6 +895,17 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
// Process other users of allocation's projections // Process other users of allocation's projections
// //
if (_resproj != NULL && _resproj->outcnt() != 0) { if (_resproj != NULL && _resproj->outcnt() != 0) {
// First disconnect stores captured by Initialize node.
// If Initialize node is eliminated first in the following code,
// it will kill such stores and DUIterator_Last will assert.
for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax); j < jmax; j++) {
Node *use = _resproj->fast_out(j);
if (use->is_AddP()) {
// raw memory addresses used only by the initialization
_igvn.replace_node(use, C->top());
--j; --jmax;
}
}
for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
Node *use = _resproj->last_out(j); Node *use = _resproj->last_out(j);
uint oc1 = _resproj->outcnt(); uint oc1 = _resproj->outcnt();
...@@ -923,9 +930,6 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { ...@@ -923,9 +930,6 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
#endif #endif
_igvn.replace_node(mem_proj, mem); _igvn.replace_node(mem_proj, mem);
} }
} else if (use->is_AddP()) {
// raw memory addresses used only by the initialization
_igvn.replace_node(use, C->top());
} else { } else {
assert(false, "only Initialize or AddP expected"); assert(false, "only Initialize or AddP expected");
} }
...@@ -953,8 +957,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { ...@@ -953,8 +957,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
} }
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
if (!EliminateAllocations || !alloc->_is_non_escaping) {
if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { return false;
}
Node* klass = alloc->in(AllocateNode::KlassNode);
const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
Node* res = alloc->result_cast();
// Eliminate boxing allocations which are not used
// regardless scalar replacable status.
bool boxing_alloc = C->eliminate_boxing() &&
tklass->klass()->is_instance_klass() &&
tklass->klass()->as_instance_klass()->is_box_klass();
if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
return false; return false;
} }
...@@ -965,14 +979,22 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { ...@@ -965,14 +979,22 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
return false; return false;
} }
if (!alloc->_is_scalar_replaceable) {
assert(res == NULL, "sanity");
// We can only eliminate allocation if all debug info references
// are already replaced with SafePointScalarObject because
// we can't search for a fields value without instance_id.
if (safepoints.length() > 0) {
return false;
}
}
if (!scalar_replacement(alloc, safepoints)) { if (!scalar_replacement(alloc, safepoints)) {
return false; return false;
} }
CompileLog* log = C->log(); CompileLog* log = C->log();
if (log != NULL) { if (log != NULL) {
Node* klass = alloc->in(AllocateNode::KlassNode);
const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
log->head("eliminate_allocation type='%d'", log->head("eliminate_allocation type='%d'",
log->identify(tklass->klass())); log->identify(tklass->klass()));
JVMState* p = alloc->jvms(); JVMState* p = alloc->jvms();
...@@ -997,6 +1019,43 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { ...@@ -997,6 +1019,43 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
return true; return true;
} }
bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
// EA should remove all uses of non-escaping boxing node.
if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) {
return false;
}
extract_call_projections(boxing);
const TypeTuple* r = boxing->tf()->range();
assert(r->cnt() > TypeFunc::Parms, "sanity");
const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
assert(t != NULL, "sanity");
CompileLog* log = C->log();
if (log != NULL) {
log->head("eliminate_boxing type='%d'",
log->identify(t->klass()));
JVMState* p = boxing->jvms();
while (p != NULL) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
log->tail("eliminate_boxing");
}
process_users_of_allocation(boxing);
#ifndef PRODUCT
if (PrintEliminateAllocations) {
tty->print("++++ Eliminated: %d ", boxing->_idx);
boxing->method()->print_short_name(tty);
tty->cr();
}
#endif
return true;
}
//---------------------------set_eden_pointers------------------------- //---------------------------set_eden_pointers-------------------------
void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
...@@ -2384,6 +2443,9 @@ void PhaseMacroExpand::eliminate_macro_nodes() { ...@@ -2384,6 +2443,9 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
case Node::Class_AllocateArray: case Node::Class_AllocateArray:
success = eliminate_allocate_node(n->as_Allocate()); success = eliminate_allocate_node(n->as_Allocate());
break; break;
case Node::Class_CallStaticJava:
success = eliminate_boxing_node(n->as_CallStaticJava());
break;
case Node::Class_Lock: case Node::Class_Lock:
case Node::Class_Unlock: case Node::Class_Unlock:
assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
...@@ -2424,6 +2486,11 @@ bool PhaseMacroExpand::expand_macro_nodes() { ...@@ -2424,6 +2486,11 @@ bool PhaseMacroExpand::expand_macro_nodes() {
C->remove_macro_node(n); C->remove_macro_node(n);
_igvn._worklist.push(n); _igvn._worklist.push(n);
success = true; success = true;
} else if (n->Opcode() == Op_CallStaticJava) {
// Remove it from macro list and put on IGVN worklist to optimize.
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
_igvn.replace_node(n, n->in(1)); _igvn.replace_node(n, n->in(1));
success = true; success = true;
......
...@@ -86,10 +86,11 @@ private: ...@@ -86,10 +86,11 @@ private:
Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc); Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc);
Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level); Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level);
bool eliminate_boxing_node(CallStaticJavaNode *boxing);
bool eliminate_allocate_node(AllocateNode *alloc); bool eliminate_allocate_node(AllocateNode *alloc);
bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints); bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints);
bool scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints_done); bool scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints_done);
void process_users_of_allocation(AllocateNode *alloc); void process_users_of_allocation(CallNode *alloc);
void eliminate_card_mark(Node *cm); void eliminate_card_mark(Node *cm);
void mark_eliminated_box(Node* box, Node* obj); void mark_eliminated_box(Node* box, Node* obj);
......
此差异已折叠。
...@@ -75,8 +75,8 @@ public: ...@@ -75,8 +75,8 @@ public:
PhaseTransform* phase); PhaseTransform* phase);
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
// This one should probably be a phase-specific function: // This one should probably be a phase-specific function:
static bool all_controls_dominate(Node* dom, Node* sub); static bool all_controls_dominate(Node* dom, Node* sub);
...@@ -1099,7 +1099,7 @@ public: ...@@ -1099,7 +1099,7 @@ public:
Node* make_raw_address(intptr_t offset, PhaseTransform* phase); Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
bool detect_init_independence(Node* n, bool st_is_pinned, int& count); bool detect_init_independence(Node* n, int& count);
void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
PhaseGVN* phase); PhaseGVN* phase);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "opto/callnode.hpp"
#include "opto/matcher.hpp" #include "opto/matcher.hpp"
#include "opto/multnode.hpp" #include "opto/multnode.hpp"
#include "opto/opcodes.hpp" #include "opto/opcodes.hpp"
...@@ -73,13 +74,26 @@ bool ProjNode::is_CFG() const { ...@@ -73,13 +74,26 @@ bool ProjNode::is_CFG() const {
return (_con == TypeFunc::Control && def->is_CFG()); return (_con == TypeFunc::Control && def->is_CFG());
} }
const Type* ProjNode::proj_type(const Type* t) const {
if (t == Type::TOP) {
return Type::TOP;
}
if (t == Type::BOTTOM) {
return Type::BOTTOM;
}
t = t->is_tuple()->field_at(_con);
Node* n = in(0);
if ((_con == TypeFunc::Parms) &&
n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
// The result of autoboxing is always non-null on normal path.
t = t->join(TypePtr::NOTNULL);
}
return t;
}
const Type *ProjNode::bottom_type() const { const Type *ProjNode::bottom_type() const {
if (in(0) == NULL) return Type::TOP; if (in(0) == NULL) return Type::TOP;
const Type *tb = in(0)->bottom_type(); return proj_type(in(0)->bottom_type());
if( tb == Type::TOP ) return Type::TOP;
if( tb == Type::BOTTOM ) return Type::BOTTOM;
const TypeTuple *t = tb->is_tuple();
return t->field_at(_con);
} }
const TypePtr *ProjNode::adr_type() const { const TypePtr *ProjNode::adr_type() const {
...@@ -115,11 +129,8 @@ void ProjNode::check_con() const { ...@@ -115,11 +129,8 @@ void ProjNode::check_con() const {
//------------------------------Value------------------------------------------ //------------------------------Value------------------------------------------
const Type *ProjNode::Value( PhaseTransform *phase ) const { const Type *ProjNode::Value( PhaseTransform *phase ) const {
if( !in(0) ) return Type::TOP; if (in(0) == NULL) return Type::TOP;
const Type *t = phase->type(in(0)); return proj_type(phase->type(in(0)));
if( t == Type::TOP ) return t;
if( t == Type::BOTTOM ) return t;
return t->is_tuple()->field_at(_con);
} }
//------------------------------out_RegMask------------------------------------ //------------------------------out_RegMask------------------------------------
......
...@@ -60,6 +60,7 @@ protected: ...@@ -60,6 +60,7 @@ protected:
virtual uint cmp( const Node &n ) const; virtual uint cmp( const Node &n ) const;
virtual uint size_of() const; virtual uint size_of() const;
void check_con() const; // Called from constructor. void check_con() const; // Called from constructor.
const Type* proj_type(const Type* t) const;
public: public:
ProjNode( Node *src, uint con, bool io_use = false ) ProjNode( Node *src, uint con, bool io_use = false )
...@@ -83,6 +84,7 @@ public: ...@@ -83,6 +84,7 @@ public:
virtual const Type *Value( PhaseTransform *phase ) const; virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const; virtual uint ideal_reg() const;
virtual const RegMask &out_RegMask() const; virtual const RegMask &out_RegMask() const;
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const; virtual void dump_spec(outputStream *st) const;
#endif #endif
......
...@@ -67,7 +67,8 @@ void Node::verify_construction() { ...@@ -67,7 +67,8 @@ void Node::verify_construction() {
} }
Compile::set_debug_idx(new_debug_idx); Compile::set_debug_idx(new_debug_idx);
set_debug_idx( new_debug_idx ); set_debug_idx( new_debug_idx );
assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX"); assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit");
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
BREAKPOINT; BREAKPOINT;
...@@ -471,9 +472,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, ...@@ -471,9 +472,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
//------------------------------clone------------------------------------------ //------------------------------clone------------------------------------------
// Clone a Node. // Clone a Node.
Node *Node::clone() const { Node *Node::clone() const {
Compile *compile = Compile::current(); Compile* C = Compile::current();
uint s = size_of(); // Size of inherited Node uint s = size_of(); // Size of inherited Node
Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
// Set the new input pointer array // Set the new input pointer array
n->_in = (Node**)(((char*)n)+s); n->_in = (Node**)(((char*)n)+s);
...@@ -492,18 +493,18 @@ Node *Node::clone() const { ...@@ -492,18 +493,18 @@ Node *Node::clone() const {
if (x != NULL) x->add_out(n); if (x != NULL) x->add_out(n);
} }
if (is_macro()) if (is_macro())
compile->add_macro_node(n); C->add_macro_node(n);
if (is_expensive()) if (is_expensive())
compile->add_expensive_node(n); C->add_expensive_node(n);
n->set_idx(compile->next_unique()); // Get new unique index as well n->set_idx(C->next_unique()); // Get new unique index as well
debug_only( n->verify_construction() ); debug_only( n->verify_construction() );
NOT_PRODUCT(nodes_created++); NOT_PRODUCT(nodes_created++);
// Do not patch over the debug_idx of a clone, because it makes it // Do not patch over the debug_idx of a clone, because it makes it
// impossible to break on the clone's moment of creation. // impossible to break on the clone's moment of creation.
//debug_only( n->set_debug_idx( debug_idx() ) ); //debug_only( n->set_debug_idx( debug_idx() ) );
compile->copy_node_notes_to(n, (Node*) this); C->copy_node_notes_to(n, (Node*) this);
// MachNode clone // MachNode clone
uint nopnds; uint nopnds;
...@@ -518,13 +519,12 @@ Node *Node::clone() const { ...@@ -518,13 +519,12 @@ Node *Node::clone() const {
(const void*)(&mthis->_opnds), 1)); (const void*)(&mthis->_opnds), 1));
mach->_opnds = to; mach->_opnds = to;
for ( uint i = 0; i < nopnds; ++i ) { for ( uint i = 0; i < nopnds; ++i ) {
to[i] = from[i]->clone(compile); to[i] = from[i]->clone(C);
} }
} }
// cloning CallNode may need to clone JVMState // cloning CallNode may need to clone JVMState
if (n->is_Call()) { if (n->is_Call()) {
CallNode *call = n->as_Call(); n->as_Call()->clone_jvms(C);
call->clone_jvms();
} }
return n; // Return the clone return n; // Return the clone
} }
...@@ -811,6 +811,21 @@ int Node::replace_edge(Node* old, Node* neww) { ...@@ -811,6 +811,21 @@ int Node::replace_edge(Node* old, Node* neww) {
return nrep; return nrep;
} }
/**
* Replace input edges in the range pointing to 'old' node.
*/
int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
if (old == neww) return 0; // nothing to do
uint nrep = 0;
for (int i = start; i < end; i++) {
if (in(i) == old) {
set_req(i, neww);
nrep++;
}
}
return nrep;
}
//-------------------------disconnect_inputs----------------------------------- //-------------------------disconnect_inputs-----------------------------------
// NULL out all inputs to eliminate incoming Def-Use edges. // NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this' // Return the number of edges between 'n' and 'this'
......
...@@ -410,6 +410,7 @@ protected: ...@@ -410,6 +410,7 @@ protected:
// Find first occurrence of n among my edges: // Find first occurrence of n among my edges:
int find_edge(Node* n); int find_edge(Node* n);
int replace_edge(Node* old, Node* neww); int replace_edge(Node* old, Node* neww);
int replace_edges_in_range(Node* old, Node* neww, int start, int end);
// NULL out all inputs to eliminate incoming Def-Use edges. // NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this' // Return the number of edges between 'n' and 'this'
int disconnect_inputs(Node *n, Compile *c); int disconnect_inputs(Node *n, Compile *c);
......
...@@ -330,6 +330,7 @@ class Parse : public GraphKit { ...@@ -330,6 +330,7 @@ class Parse : public GraphKit {
bool _wrote_final; // Did we write a final field? bool _wrote_final; // Did we write a final field?
bool _count_invocations; // update and test invocation counter bool _count_invocations; // update and test invocation counter
bool _method_data_update; // update method data oop bool _method_data_update; // update method data oop
Node* _alloc_with_final; // An allocation node with final field
// Variables which track Java semantics during bytecode parsing: // Variables which track Java semantics during bytecode parsing:
...@@ -370,6 +371,11 @@ class Parse : public GraphKit { ...@@ -370,6 +371,11 @@ class Parse : public GraphKit {
void set_wrote_final(bool z) { _wrote_final = z; } void set_wrote_final(bool z) { _wrote_final = z; }
bool count_invocations() const { return _count_invocations; } bool count_invocations() const { return _count_invocations; }
bool method_data_update() const { return _method_data_update; } bool method_data_update() const { return _method_data_update; }
Node* alloc_with_final() const { return _alloc_with_final; }
void set_alloc_with_final(Node* n) {
assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
_alloc_with_final = n;
}
Block* block() const { return _block; } Block* block() const { return _block; }
ciBytecodeStream& iter() { return _iter; } ciBytecodeStream& iter() { return _iter; }
...@@ -512,7 +518,7 @@ class Parse : public GraphKit { ...@@ -512,7 +518,7 @@ class Parse : public GraphKit {
// loading from a constant field or the constant pool // loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs) // returns false if push failed (non-perm field constants only, not ldcs)
bool push_constant(ciConstant con, bool require_constant = false); bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false);
// implementation of object creation bytecodes // implementation of object creation bytecodes
void emit_guard_for_new(ciInstanceKlass* klass); void emit_guard_for_new(ciInstanceKlass* klass);
......
...@@ -390,6 +390,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) ...@@ -390,6 +390,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
_expected_uses = expected_uses; _expected_uses = expected_uses;
_depth = 1 + (caller->has_method() ? caller->depth() : 0); _depth = 1 + (caller->has_method() ? caller->depth() : 0);
_wrote_final = false; _wrote_final = false;
_alloc_with_final = NULL;
_entry_bci = InvocationEntryBci; _entry_bci = InvocationEntryBci;
_tf = NULL; _tf = NULL;
_block = NULL; _block = NULL;
...@@ -723,6 +724,8 @@ void Parse::build_exits() { ...@@ -723,6 +724,8 @@ void Parse::build_exits() {
// Note: iophi and memphi are not transformed until do_exits. // Note: iophi and memphi are not transformed until do_exits.
Node* iophi = new (C) PhiNode(region, Type::ABIO); Node* iophi = new (C) PhiNode(region, Type::ABIO);
Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
gvn().set_type_bottom(iophi);
gvn().set_type_bottom(memphi);
_exits.set_i_o(iophi); _exits.set_i_o(iophi);
_exits.set_all_memory(memphi); _exits.set_all_memory(memphi);
...@@ -738,6 +741,7 @@ void Parse::build_exits() { ...@@ -738,6 +741,7 @@ void Parse::build_exits() {
} }
int ret_size = type2size[ret_type->basic_type()]; int ret_size = type2size[ret_type->basic_type()];
Node* ret_phi = new (C) PhiNode(region, ret_type); Node* ret_phi = new (C) PhiNode(region, ret_type);
gvn().set_type_bottom(ret_phi);
_exits.ensure_stack(ret_size); _exits.ensure_stack(ret_size);
assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
...@@ -917,7 +921,7 @@ void Parse::do_exits() { ...@@ -917,7 +921,7 @@ void Parse::do_exits() {
// such unusual early publications. But no barrier is needed on // such unusual early publications. But no barrier is needed on
// exceptional returns, since they cannot publish normally. // exceptional returns, since they cannot publish normally.
// //
_exits.insert_mem_bar(Op_MemBarRelease); _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
#ifndef PRODUCT #ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) { if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); method()->print_name();
......
...@@ -987,7 +987,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) { ...@@ -987,7 +987,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
uncommon_trap(Deoptimization::Reason_unreached, uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret, Deoptimization::Action_reinterpret,
NULL, "cold"); NULL, "cold");
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed // Mark the successor blocks as parsed
branch_block->next_path_num(); branch_block->next_path_num();
next_block->next_path_num(); next_block->next_path_num();
...@@ -1012,7 +1012,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) { ...@@ -1012,7 +1012,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
if (stopped()) { // Path is dead? if (stopped()) { // Path is dead?
explicit_null_checks_elided++; explicit_null_checks_elided++;
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor block as parsed // Mark the successor block as parsed
branch_block->next_path_num(); branch_block->next_path_num();
} }
...@@ -1032,7 +1032,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) { ...@@ -1032,7 +1032,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
if (stopped()) { // Path is dead? if (stopped()) { // Path is dead?
explicit_null_checks_elided++; explicit_null_checks_elided++;
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor block as parsed // Mark the successor block as parsed
next_block->next_path_num(); next_block->next_path_num();
} }
...@@ -1069,7 +1069,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) { ...@@ -1069,7 +1069,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
uncommon_trap(Deoptimization::Reason_unreached, uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret, Deoptimization::Action_reinterpret,
NULL, "cold"); NULL, "cold");
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed // Mark the successor blocks as parsed
branch_block->next_path_num(); branch_block->next_path_num();
next_block->next_path_num(); next_block->next_path_num();
...@@ -1135,7 +1135,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) { ...@@ -1135,7 +1135,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
set_control(taken_branch); set_control(taken_branch);
if (stopped()) { if (stopped()) {
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor block as parsed // Mark the successor block as parsed
branch_block->next_path_num(); branch_block->next_path_num();
} }
...@@ -1154,7 +1154,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) { ...@@ -1154,7 +1154,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
// Branch not taken. // Branch not taken.
if (stopped()) { if (stopped()) {
if (EliminateAutoBox) { if (C->eliminate_boxing()) {
// Mark the successor block as parsed // Mark the successor block as parsed
next_block->next_path_num(); next_block->next_path_num();
} }
......
...@@ -150,6 +150,23 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { ...@@ -150,6 +150,23 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// final field // final field
if (field->is_static()) { if (field->is_static()) {
// final static field // final static field
if (C->eliminate_boxing()) {
// The pointers in the autobox arrays are always non-null.
ciSymbol* klass_name = field->holder()->name();
if (field->name() == ciSymbol::cache_field_name() &&
field->holder()->uses_default_loader() &&
(klass_name == ciSymbol::java_lang_Character_CharacterCache() ||
klass_name == ciSymbol::java_lang_Byte_ByteCache() ||
klass_name == ciSymbol::java_lang_Short_ShortCache() ||
klass_name == ciSymbol::java_lang_Integer_IntegerCache() ||
klass_name == ciSymbol::java_lang_Long_LongCache())) {
bool require_const = true;
bool autobox_cache = true;
if (push_constant(field->constant_value(), require_const, autobox_cache)) {
return;
}
}
}
if (push_constant(field->constant_value())) if (push_constant(field->constant_value()))
return; return;
} }
...@@ -304,11 +321,18 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { ...@@ -304,11 +321,18 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// out of the constructor. // out of the constructor.
if (is_field && field->is_final()) { if (is_field && field->is_final()) {
set_wrote_final(true); set_wrote_final(true);
// Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor.
if (C->eliminate_boxing() &&
adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
set_alloc_with_final(obj);
}
} }
} }
bool Parse::push_constant(ciConstant constant, bool require_constant) { bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache) {
switch (constant.basic_type()) { switch (constant.basic_type()) {
case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break; case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
case T_INT: push( intcon(constant.as_int()) ); break; case T_INT: push( intcon(constant.as_int()) ); break;
...@@ -329,7 +353,7 @@ bool Parse::push_constant(ciConstant constant, bool require_constant) { ...@@ -329,7 +353,7 @@ bool Parse::push_constant(ciConstant constant, bool require_constant) {
push( zerocon(T_OBJECT) ); push( zerocon(T_OBJECT) );
break; break;
} else if (require_constant || oop_constant->should_be_constant()) { } else if (require_constant || oop_constant->should_be_constant()) {
push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) ); push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache)) );
break; break;
} else { } else {
// we cannot inline the oop, but we can use it later to narrow a type // we cannot inline the oop, but we can use it later to narrow a type
......
...@@ -284,6 +284,11 @@ void Parse::do_new() { ...@@ -284,6 +284,11 @@ void Parse::do_new() {
klass == C->env()->StringBuffer_klass())) { klass == C->env()->StringBuffer_klass())) {
C->set_has_stringbuilder(true); C->set_has_stringbuilder(true);
} }
// Keep track of boxed values for EliminateAutoBox optimizations.
if (C->eliminate_boxing() && klass->is_box_klass()) {
C->set_has_boxed_value(true);
}
} }
#ifndef PRODUCT #ifndef PRODUCT
......
...@@ -64,6 +64,7 @@ elapsedTimer Phase::_t_idealLoopVerify; ...@@ -64,6 +64,7 @@ elapsedTimer Phase::_t_idealLoopVerify;
// Subtimers for _t_optimizer // Subtimers for _t_optimizer
elapsedTimer Phase::_t_iterGVN; elapsedTimer Phase::_t_iterGVN;
elapsedTimer Phase::_t_iterGVN2; elapsedTimer Phase::_t_iterGVN2;
elapsedTimer Phase::_t_incrInline;
// Subtimers for _t_registerAllocation // Subtimers for _t_registerAllocation
elapsedTimer Phase::_t_ctorChaitin; elapsedTimer Phase::_t_ctorChaitin;
...@@ -110,6 +111,7 @@ void Phase::print_timers() { ...@@ -110,6 +111,7 @@ void Phase::print_timers() {
tty->print_cr (" macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds()); tty->print_cr (" macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds());
} }
tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds()); tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds());
tty->print_cr (" incrInline : %3.3f sec", Phase::_t_incrInline.seconds());
tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds()); tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds());
tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds()); tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds()); tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds());
......
...@@ -100,6 +100,7 @@ protected: ...@@ -100,6 +100,7 @@ protected:
// Subtimers for _t_optimizer // Subtimers for _t_optimizer
static elapsedTimer _t_iterGVN; static elapsedTimer _t_iterGVN;
static elapsedTimer _t_iterGVN2; static elapsedTimer _t_iterGVN2;
static elapsedTimer _t_incrInline;
// Subtimers for _t_registerAllocation // Subtimers for _t_registerAllocation
static elapsedTimer _t_ctorChaitin; static elapsedTimer _t_ctorChaitin;
......
...@@ -882,7 +882,7 @@ void PhaseIterGVN::optimize() { ...@@ -882,7 +882,7 @@ void PhaseIterGVN::optimize() {
return; return;
} }
Node *n = _worklist.pop(); Node *n = _worklist.pop();
if (++loop_count >= K * C->unique()) { if (++loop_count >= K * C->live_nodes()) {
debug_only(n->dump(4);) debug_only(n->dump(4);)
assert(false, "infinite loop in PhaseIterGVN::optimize"); assert(false, "infinite loop in PhaseIterGVN::optimize");
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize"); C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
......
...@@ -2372,7 +2372,12 @@ TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int ...@@ -2372,7 +2372,12 @@ TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int
_klass_is_exact(xk), _klass_is_exact(xk),
_is_ptr_to_narrowoop(false), _is_ptr_to_narrowoop(false),
_is_ptr_to_narrowklass(false), _is_ptr_to_narrowklass(false),
_is_ptr_to_boxed_value(false),
_instance_id(instance_id) { _instance_id(instance_id) {
if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
(offset > 0) && xk && (k != 0) && k->is_instance_klass()) {
_is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset);
}
#ifdef _LP64 #ifdef _LP64
if (_offset != 0) { if (_offset != 0) {
if (_offset == oopDesc::klass_offset_in_bytes()) { if (_offset == oopDesc::klass_offset_in_bytes()) {
...@@ -2613,44 +2618,50 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_ ...@@ -2613,44 +2618,50 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_
//------------------------------make_from_constant----------------------------- //------------------------------make_from_constant-----------------------------
// Make a java pointer from an oop constant // Make a java pointer from an oop constant
const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) { const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o,
assert(!o->is_null_object(), "null object not yet handled here."); bool require_constant,
ciKlass* klass = o->klass(); bool is_autobox_cache) {
if (klass->is_instance_klass()) { assert(!o->is_null_object(), "null object not yet handled here.");
// Element is an instance ciKlass* klass = o->klass();
if (require_constant) { if (klass->is_instance_klass()) {
if (!o->can_be_constant()) return NULL; // Element is an instance
} else if (!o->should_be_constant()) { if (require_constant) {
return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0); if (!o->can_be_constant()) return NULL;
} } else if (!o->should_be_constant()) {
return TypeInstPtr::make(o); return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
} else if (klass->is_obj_array_klass()) { }
// Element is an object array. Recursively call ourself. return TypeInstPtr::make(o);
const Type *etype = } else if (klass->is_obj_array_klass()) {
// Element is an object array. Recursively call ourself.
const TypeOopPtr *etype =
TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass()); TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass());
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); if (is_autobox_cache) {
// We used to pass NotNull in here, asserting that the sub-arrays // The pointers in the autobox arrays are always non-null.
// are all not-null. This is not true in generally, as code can etype = etype->cast_to_ptr_type(TypePtr::NotNull)->is_oopptr();
// slam NULLs down in the subarrays. }
if (require_constant) { const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
if (!o->can_be_constant()) return NULL; // We used to pass NotNull in here, asserting that the sub-arrays
} else if (!o->should_be_constant()) { // are all not-null. This is not true in generally, as code can
return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); // slam NULLs down in the subarrays.
} if (require_constant) {
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); if (!o->can_be_constant()) return NULL;
} else if (!o->should_be_constant()) {
return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
}
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, is_autobox_cache);
return arr; return arr;
} else if (klass->is_type_array_klass()) { } else if (klass->is_type_array_klass()) {
// Element is an typeArray // Element is an typeArray
const Type* etype = const Type* etype =
(Type*)get_const_basic_type(klass->as_type_array_klass()->element_type()); (Type*)get_const_basic_type(klass->as_type_array_klass()->element_type());
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
// We used to pass NotNull in here, asserting that the array pointer // We used to pass NotNull in here, asserting that the array pointer
// is not-null. That was not true in general. // is not-null. That was not true in general.
if (require_constant) { if (require_constant) {
if (!o->can_be_constant()) return NULL; if (!o->can_be_constant()) return NULL;
} else if (!o->should_be_constant()) { } else if (!o->should_be_constant()) {
return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0); return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
} }
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
return arr; return arr;
} }
...@@ -2856,6 +2867,28 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr, ...@@ -2856,6 +2867,28 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr,
return result; return result;
} }
/**
* Create constant type for a constant boxed value
*/
const Type* TypeInstPtr::get_const_boxed_value() const {
assert(is_ptr_to_boxed_value(), "should be called only for boxed value");
assert((const_oop() != NULL), "should be called only for constant object");
ciConstant constant = const_oop()->as_instance()->field_value_by_offset(offset());
BasicType bt = constant.basic_type();
switch (bt) {
case T_BOOLEAN: return TypeInt::make(constant.as_boolean());
case T_INT: return TypeInt::make(constant.as_int());
case T_CHAR: return TypeInt::make(constant.as_char());
case T_BYTE: return TypeInt::make(constant.as_byte());
case T_SHORT: return TypeInt::make(constant.as_short());
case T_FLOAT: return TypeF::make(constant.as_float());
case T_DOUBLE: return TypeD::make(constant.as_double());
case T_LONG: return TypeLong::make(constant.as_long());
default: break;
}
fatal(err_msg_res("Invalid boxed value type '%s'", type2name(bt)));
return NULL;
}
//------------------------------cast_to_ptr_type------------------------------- //------------------------------cast_to_ptr_type-------------------------------
const Type *TypeInstPtr::cast_to_ptr_type(PTR ptr) const { const Type *TypeInstPtr::cast_to_ptr_type(PTR ptr) const {
...@@ -3330,18 +3363,18 @@ const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, boo ...@@ -3330,18 +3363,18 @@ const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, boo
if (!xk) xk = ary->ary_must_be_exact(); if (!xk) xk = ary->ary_must_be_exact();
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
if (!UseExactTypes) xk = (ptr == Constant); if (!UseExactTypes) xk = (ptr == Constant);
return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id))->hashcons(); return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false))->hashcons();
} }
//------------------------------make------------------------------------------- //------------------------------make-------------------------------------------
const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) { const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, bool is_autobox_cache) {
assert(!(k == NULL && ary->_elem->isa_int()), assert(!(k == NULL && ary->_elem->isa_int()),
"integral arrays must be pre-equipped with a class"); "integral arrays must be pre-equipped with a class");
assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" ); assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); if (!xk) xk = (o != NULL) || ary->ary_must_be_exact();
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
if (!UseExactTypes) xk = (ptr == Constant); if (!UseExactTypes) xk = (ptr == Constant);
return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id))->hashcons(); return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache))->hashcons();
} }
//------------------------------cast_to_ptr_type------------------------------- //------------------------------cast_to_ptr_type-------------------------------
...@@ -3397,8 +3430,20 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const { ...@@ -3397,8 +3430,20 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
jint max_hi = max_array_length(elem()->basic_type()); jint max_hi = max_array_length(elem()->basic_type());
//if (index_not_size) --max_hi; // type of a valid array index, FTR //if (index_not_size) --max_hi; // type of a valid array index, FTR
bool chg = false; bool chg = false;
if (lo < min_lo) { lo = min_lo; chg = true; } if (lo < min_lo) {
if (hi > max_hi) { hi = max_hi; chg = true; } lo = min_lo;
if (size->is_con()) {
hi = lo;
}
chg = true;
}
if (hi > max_hi) {
hi = max_hi;
if (size->is_con()) {
lo = hi;
}
chg = true;
}
// Negative length arrays will produce weird intermediate dead fast-path code // Negative length arrays will produce weird intermediate dead fast-path code
if (lo > hi) if (lo > hi)
return TypeInt::ZERO; return TypeInt::ZERO;
...@@ -3630,7 +3675,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const { ...@@ -3630,7 +3675,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
//------------------------------xdual------------------------------------------ //------------------------------xdual------------------------------------------
// Dual: compute field-by-field dual // Dual: compute field-by-field dual
const Type *TypeAryPtr::xdual() const { const Type *TypeAryPtr::xdual() const {
return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id() ); return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache() );
} }
//----------------------interface_vs_oop--------------------------------------- //----------------------interface_vs_oop---------------------------------------
......
...@@ -234,6 +234,9 @@ public: ...@@ -234,6 +234,9 @@ public:
bool is_ptr_to_narrowoop() const; bool is_ptr_to_narrowoop() const;
bool is_ptr_to_narrowklass() const; bool is_ptr_to_narrowklass() const;
bool is_ptr_to_boxing_obj() const;
// Convenience access // Convenience access
float getf() const; float getf() const;
double getd() const; double getd() const;
...@@ -794,6 +797,7 @@ protected: ...@@ -794,6 +797,7 @@ protected:
bool _klass_is_exact; bool _klass_is_exact;
bool _is_ptr_to_narrowoop; bool _is_ptr_to_narrowoop;
bool _is_ptr_to_narrowklass; bool _is_ptr_to_narrowklass;
bool _is_ptr_to_boxed_value;
// If not InstanceTop or InstanceBot, indicates that this is // If not InstanceTop or InstanceBot, indicates that this is
// a particular instance of this type which is distinct. // a particular instance of this type which is distinct.
...@@ -826,7 +830,9 @@ public: ...@@ -826,7 +830,9 @@ public:
// If the object cannot be rendered as a constant, // If the object cannot be rendered as a constant,
// may return a non-singleton type. // may return a non-singleton type.
// If require_constant, produce a NULL if a singleton is not possible. // If require_constant, produce a NULL if a singleton is not possible.
static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false); static const TypeOopPtr* make_from_constant(ciObject* o,
bool require_constant = false,
bool not_null_elements = false);
// Make a generic (unclassed) pointer to an oop. // Make a generic (unclassed) pointer to an oop.
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id); static const TypeOopPtr* make(PTR ptr, int offset, int instance_id);
...@@ -839,7 +845,7 @@ public: ...@@ -839,7 +845,7 @@ public:
// compressed oop references. // compressed oop references.
bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; } bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; }
bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; } bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; }
bool is_ptr_to_boxed_value() const { return _is_ptr_to_boxed_value; }
bool is_known_instance() const { return _instance_id > 0; } bool is_known_instance() const { return _instance_id > 0; }
int instance_id() const { return _instance_id; } int instance_id() const { return _instance_id; }
bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
...@@ -912,6 +918,9 @@ class TypeInstPtr : public TypeOopPtr { ...@@ -912,6 +918,9 @@ class TypeInstPtr : public TypeOopPtr {
// Make a pointer to an oop. // Make a pointer to an oop.
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot ); static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot );
/** Create constant type for a constant boxed value */
const Type* get_const_boxed_value() const;
// If this is a java.lang.Class constant, return the type for it or NULL. // If this is a java.lang.Class constant, return the type for it or NULL.
// Pass to Type::get_const_type to turn it to a type, which will usually // Pass to Type::get_const_type to turn it to a type, which will usually
// be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc. // be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc.
...@@ -943,7 +952,12 @@ class TypeInstPtr : public TypeOopPtr { ...@@ -943,7 +952,12 @@ class TypeInstPtr : public TypeOopPtr {
//------------------------------TypeAryPtr------------------------------------- //------------------------------TypeAryPtr-------------------------------------
// Class of Java array pointers // Class of Java array pointers
class TypeAryPtr : public TypeOopPtr { class TypeAryPtr : public TypeOopPtr {
TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), _ary(ary) { TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
int offset, int instance_id, bool is_autobox_cache )
: TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id),
_ary(ary),
_is_autobox_cache(is_autobox_cache)
{
#ifdef ASSERT #ifdef ASSERT
if (k != NULL) { if (k != NULL) {
// Verify that specified klass and TypeAryPtr::klass() follow the same rules. // Verify that specified klass and TypeAryPtr::klass() follow the same rules.
...@@ -964,6 +978,7 @@ class TypeAryPtr : public TypeOopPtr { ...@@ -964,6 +978,7 @@ class TypeAryPtr : public TypeOopPtr {
virtual bool eq( const Type *t ) const; virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing virtual int hash() const; // Type specific hashing
const TypeAry *_ary; // Array we point into const TypeAry *_ary; // Array we point into
const bool _is_autobox_cache;
ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const; ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const;
...@@ -974,9 +989,11 @@ public: ...@@ -974,9 +989,11 @@ public:
const Type* elem() const { return _ary->_elem; } const Type* elem() const { return _ary->_elem; }
const TypeInt* size() const { return _ary->_size; } const TypeInt* size() const { return _ary->_size; }
bool is_autobox_cache() const { return _is_autobox_cache; }
static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
// Constant pointer to array // Constant pointer to array
static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false);
// Return a 'ptr' version of this type // Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const; virtual const Type *cast_to_ptr_type(PTR ptr) const;
...@@ -1504,6 +1521,13 @@ inline bool Type::is_floatingpoint() const { ...@@ -1504,6 +1521,13 @@ inline bool Type::is_floatingpoint() const {
return false; return false;
} }
inline bool Type::is_ptr_to_boxing_obj() const {
const TypeInstPtr* tp = isa_instptr();
return (tp != NULL) && (tp->offset() == 0) &&
tp->klass()->is_instance_klass() &&
tp->klass()->as_instance_klass()->is_box_klass();
}
// =============================================================== // ===============================================================
// Things that need to be 64-bits in the 64-bit build but // Things that need to be 64-bits in the 64-bit build but
......
...@@ -1089,6 +1089,10 @@ void Arguments::set_tiered_flags() { ...@@ -1089,6 +1089,10 @@ void Arguments::set_tiered_flags() {
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5); FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
} }
if (!UseInterpreter) { // -Xcomp
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
} }
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
...@@ -1661,6 +1665,20 @@ void Arguments::set_bytecode_flags() { ...@@ -1661,6 +1665,20 @@ void Arguments::set_bytecode_flags() {
// Aggressive optimization flags -XX:+AggressiveOpts // Aggressive optimization flags -XX:+AggressiveOpts
void Arguments::set_aggressive_opts_flags() { void Arguments::set_aggressive_opts_flags() {
#ifdef COMPILER2 #ifdef COMPILER2
if (AggressiveUnboxing) {
if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
FLAG_SET_DEFAULT(EliminateAutoBox, true);
} else if (!EliminateAutoBox) {
// warning("AggressiveUnboxing is disabled because EliminateAutoBox is disabled");
AggressiveUnboxing = false;
}
if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) {
FLAG_SET_DEFAULT(DoEscapeAnalysis, true);
} else if (!DoEscapeAnalysis) {
// warning("AggressiveUnboxing is disabled because DoEscapeAnalysis is disabled");
AggressiveUnboxing = false;
}
}
if (AggressiveOpts || !FLAG_IS_DEFAULT(AutoBoxCacheMax)) { if (AggressiveOpts || !FLAG_IS_DEFAULT(AutoBoxCacheMax)) {
if (FLAG_IS_DEFAULT(EliminateAutoBox)) { if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
FLAG_SET_DEFAULT(EliminateAutoBox, true); FLAG_SET_DEFAULT(EliminateAutoBox, true);
......
...@@ -1057,6 +1057,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary; ...@@ -1057,6 +1057,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
c2_nonstatic_field(Compile, _save_argument_registers, const bool) \ c2_nonstatic_field(Compile, _save_argument_registers, const bool) \
c2_nonstatic_field(Compile, _subsume_loads, const bool) \ c2_nonstatic_field(Compile, _subsume_loads, const bool) \
c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \ c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \
c2_nonstatic_field(Compile, _eliminate_boxing, const bool) \
c2_nonstatic_field(Compile, _ilt, InlineTree*) \ c2_nonstatic_field(Compile, _ilt, InlineTree*) \
\ \
c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \ c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 6934604
* @summary enable parts of EliminateAutoBox by default
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox TestByteBoxing
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
* -XX:CompileCommand=exclude,TestByteBoxing.dummy -XX:CompileCommand=exclude,TestByteBoxing.foo -XX:CompileCommand=exclude,TestByteBoxing.foob TestByteBoxing
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-EliminateAutoBox
* -XX:CompileCommand=exclude,TestByteBoxing.dummy -XX:CompileCommand=exclude,TestByteBoxing.foo -XX:CompileCommand=exclude,TestByteBoxing.foob TestByteBoxing
*
*/
public class TestByteBoxing {
static final Byte ibc = new Byte((byte)1);
//===============================================
// Non-inlined methods to test deoptimization info
static void dummy() { }
static byte foo(byte i) { return i; }
static Byte foob(byte i) { return Byte.valueOf(i); }
static byte simple(byte i) {
Byte ib = new Byte(i);
return ib;
}
static byte simpleb(byte i) {
Byte ib = Byte.valueOf(i);
return ib;
}
static byte simplec() {
Byte ib = ibc;
return ib;
}
static byte simplef(byte i) {
Byte ib = foob(i);
return ib;
}
static byte simplep(Byte ib) {
return ib;
}
static byte simple2(byte i) {
Byte ib1 = new Byte(i);
Byte ib2 = new Byte((byte)(i+1));
return (byte)(ib1 + ib2);
}
static byte simpleb2(byte i) {
Byte ib1 = Byte.valueOf(i);
Byte ib2 = Byte.valueOf((byte)(i+1));
return (byte)(ib1 + ib2);
}
static byte simplem2(byte i) {
Byte ib1 = new Byte(i);
Byte ib2 = Byte.valueOf((byte)(i+1));
return (byte)(ib1 + ib2);
}
static byte simplep2(byte i, Byte ib1) {
Byte ib2 = Byte.valueOf((byte)(i+1));
return (byte)(ib1 + ib2);
}
static byte simplec2(byte i) {
Byte ib1 = ibc;
Byte ib2 = Byte.valueOf((byte)(i+1));
return (byte)(ib1 + ib2);
}
//===============================================
static byte test(byte i) {
Byte ib = new Byte(i);
if ((i&1) == 0)
ib = (byte)(i+1);
return ib;
}
static byte testb(byte i) {
Byte ib = i;
if ((i&1) == 0)
ib = (byte)(i+1);
return ib;
}
static byte testm(byte i) {
Byte ib = i;
if ((i&1) == 0)
ib = new Byte((byte)(i+1));
return ib;
}
static byte testp(byte i, Byte ib) {
if ((i&1) == 0)
ib = new Byte((byte)(i+1));
return ib;
}
static byte testc(byte i) {
Byte ib = ibc;
if ((i&1) == 0)
ib = new Byte((byte)(i+1));
return ib;
}
static byte test2(byte i) {
Byte ib1 = new Byte(i);
Byte ib2 = new Byte((byte)(i+1));
if ((i&1) == 0) {
ib1 = new Byte((byte)(i+1));
ib2 = new Byte((byte)(i+2));
}
return (byte)(ib1+ib2);
}
static byte testb2(byte i) {
Byte ib1 = i;
Byte ib2 = (byte)(i+1);
if ((i&1) == 0) {
ib1 = (byte)(i+1);
ib2 = (byte)(i+2);
}
return (byte)(ib1 + ib2);
}
static byte testm2(byte i) {
Byte ib1 = new Byte(i);
Byte ib2 = (byte)(i+1);
if ((i&1) == 0) {
ib1 = new Byte((byte)(i+1));
ib2 = (byte)(i+2);
}
return (byte)(ib1 + ib2);
}
static byte testp2(byte i, Byte ib1) {
Byte ib2 = (byte)(i+1);
if ((i&1) == 0) {
ib1 = new Byte((byte)(i+1));
ib2 = (byte)(i+2);
}
return (byte)(ib1 + ib2);
}
static byte testc2(byte i) {
Byte ib1 = ibc;
Byte ib2 = (byte)(i+1);
if ((i&1) == 0) {
ib1 = (byte)(ibc+1);
ib2 = (byte)(i+2);
}
return (byte)(ib1 + ib2);
}
//===============================================
static byte sum(byte[] a) {
byte result = 1;
for (Byte i : a)
result += i;
return result;
}
static byte sumb(byte[] a) {
Byte result = 1;
for (Byte i : a)
result = (byte)(result + i);
return result;
}
static byte sumc(byte[] a) {
Byte result = ibc;
for (Byte i : a)
result = (byte)(result + i);
return result;
}
static byte sumf(byte[] a) {
Byte result = foob((byte)1);
for (Byte i : a)
result = (byte)(result + i);
return result;
}
static byte sump(byte[] a, Byte result) {
for (Byte i : a)
result = (byte)(result + i);
return result;
}
static byte sum2(byte[] a) {
byte result1 = 1;
byte result2 = 1;
for (Byte i : a) {
result1 += i;
result2 += i + 1;
}
return (byte)(result1 + result2);
}
static byte sumb2(byte[] a) {
Byte result1 = 1;
Byte result2 = 1;
for (Byte i : a) {
result1 = (byte)(result1 + i);
result2 = (byte)(result2 + i + 1);
}
return (byte)(result1 + result2);
}
static byte summ2(byte[] a) {
Byte result1 = 1;
Byte result2 = new Byte((byte)1);
for (Byte i : a) {
result1 = (byte)(result1 + i);
result2 = (byte)(result2 + new Byte((byte)(i + 1)));
}
return (byte)(result1 + result2);
}
static byte sump2(byte[] a, Byte result2) {
Byte result1 = 1;
for (Byte i : a) {
result1 = (byte)(result1 + i);
result2 = (byte)(result2 + i + 1);
}
return (byte)(result1 + result2);
}
static byte sumc2(byte[] a) {
Byte result1 = 1;
Byte result2 = ibc;
for (Byte i : a) {
result1 = (byte)(result1 + i);
result2 = (byte)(result2 + i + ibc);
}
return (byte)(result1 + result2);
}
//===============================================
static byte remi_sum() {
Byte j = new Byte((byte)1);
for (int i = 0; i< 1000; i++) {
j = new Byte((byte)(j + 1));
}
return j;
}
static byte remi_sumb() {
Byte j = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
j = (byte)(j + 1);
}
return j;
}
static byte remi_sumf() {
Byte j = foob((byte)1);
for (int i = 0; i< 1000; i++) {
j = (byte)(j + 1);
}
return j;
}
static byte remi_sump(Byte j) {
for (int i = 0; i< 1000; i++) {
j = new Byte((byte)(j + 1));
}
return j;
}
static byte remi_sumc() {
Byte j = ibc;
for (int i = 0; i< 1000; i++) {
j = (byte)(j + ibc);
}
return j;
}
static byte remi_sum2() {
Byte j1 = new Byte((byte)1);
Byte j2 = new Byte((byte)1);
for (int i = 0; i< 1000; i++) {
j1 = new Byte((byte)(j1 + 1));
j2 = new Byte((byte)(j2 + 2));
}
return (byte)(j1 + j2);
}
static byte remi_sumb2() {
Byte j1 = Byte.valueOf((byte)1);
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
j1 = (byte)(j1 + 1);
j2 = (byte)(j2 + 2);
}
return (byte)(j1 + j2);
}
static byte remi_summ2() {
Byte j1 = new Byte((byte)1);
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
j1 = new Byte((byte)(j1 + 1));
j2 = (byte)(j2 + 2);
}
return (byte)(j1 + j2);
}
static byte remi_sump2(Byte j1) {
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
j1 = new Byte((byte)(j1 + 1));
j2 = (byte)(j2 + 2);
}
return (byte)(j1 + j2);
}
static byte remi_sumc2() {
Byte j1 = ibc;
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
j1 = (byte)(j1 + ibc);
j2 = (byte)(j2 + 2);
}
return (byte)(j1 + j2);
}
//===============================================
// Safepointa and debug info for deoptimization
static byte simple_deop(byte i) {
Byte ib = new Byte(foo(i));
dummy();
return ib;
}
static byte simpleb_deop(byte i) {
Byte ib = Byte.valueOf(foo(i));
dummy();
return ib;
}
static byte simplef_deop(byte i) {
Byte ib = foob(i);
dummy();
return ib;
}
static byte simplep_deop(Byte ib) {
dummy();
return ib;
}
static byte simplec_deop(byte i) {
Byte ib = ibc;
dummy();
return ib;
}
static byte test_deop(byte i) {
Byte ib = new Byte(foo(i));
if ((i&1) == 0)
ib = foo((byte)(i+1));
dummy();
return ib;
}
static byte testb_deop(byte i) {
Byte ib = foo(i);
if ((i&1) == 0)
ib = foo((byte)(i+1));
dummy();
return ib;
}
static byte testf_deop(byte i) {
Byte ib = foob(i);
if ((i&1) == 0)
ib = foo((byte)(i+1));
dummy();
return ib;
}
static byte testp_deop(byte i, Byte ib) {
if ((i&1) == 0)
ib = foo((byte)(i+1));
dummy();
return ib;
}
static byte testc_deop(byte i) {
Byte ib = ibc;
if ((i&1) == 0)
ib = foo((byte)(i+1));
dummy();
return ib;
}
static byte sum_deop(byte[] a) {
byte result = 1;
for (Byte i : a)
result += foo(i);
dummy();
return result;
}
static byte sumb_deop(byte[] a) {
Byte result = 1;
for (Byte i : a)
result = (byte)(result + foo(i));
dummy();
return result;
}
static byte sumf_deop(byte[] a) {
Byte result = 1;
for (Byte i : a)
result = (byte)(result + foob(i));
dummy();
return result;
}
static byte sump_deop(byte[] a, Byte result) {
for (Byte i : a)
result = (byte)(result + foob(i));
dummy();
return result;
}
static byte sumc_deop(byte[] a) {
Byte result = ibc;
for (Byte i : a)
result = (byte)(result + foo(i));
dummy();
return result;
}
static byte remi_sum_deop() {
Byte j = new Byte(foo((byte)1));
for (int i = 0; i< 1000; i++) {
j = new Byte(foo((byte)(j + 1)));
}
dummy();
return j;
}
static byte remi_sumb_deop() {
Byte j = Byte.valueOf(foo((byte)1));
for (int i = 0; i< 1000; i++) {
j = foo((byte)(j + 1));
}
dummy();
return j;
}
static byte remi_sumf_deop() {
Byte j = foob((byte)1);
for (int i = 0; i< 1000; i++) {
j = foo((byte)(j + 1));
}
dummy();
return j;
}
static byte remi_sump_deop(Byte j) {
for (int i = 0; i< 1000; i++) {
j = foo((byte)(j + 1));
}
dummy();
return j;
}
static byte remi_sumc_deop() {
Byte j = ibc;
for (int i = 0; i< 1000; i++) {
j = foo((byte)(j + 1));
}
dummy();
return j;
}
//===============================================
// Conditional increment
static byte remi_sum_cond() {
Byte j = new Byte((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = new Byte((byte)(j + 1));
}
}
return j;
}
static byte remi_sumb_cond() {
Byte j = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = (byte)(j + 1);
}
}
return j;
}
static byte remi_sumf_cond() {
Byte j = foob((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = (byte)(j + 1);
}
}
return j;
}
static byte remi_sump_cond(Byte j) {
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = (byte)(j + 1);
}
}
return j;
}
static byte remi_sumc_cond() {
Byte j = ibc;
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = (byte)(j + ibc);
}
}
return j;
}
static byte remi_sum2_cond() {
Byte j1 = new Byte((byte)1);
Byte j2 = new Byte((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Byte((byte)(j1 + 1));
} else {
j2 = new Byte((byte)(j2 + 2));
}
}
return (byte)(j1 + j2);
}
static byte remi_sumb2_cond() {
Byte j1 = Byte.valueOf((byte)1);
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = (byte)(j1 + 1);
} else {
j2 = (byte)(j2 + 2);
}
}
return (byte)(j1 + j2);
}
static byte remi_summ2_cond() {
Byte j1 = new Byte((byte)1);
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Byte((byte)(j1 + 1));
} else {
j2 = (byte)(j2 + 2);
}
}
return (byte)(j1 + j2);
}
static byte remi_sump2_cond(Byte j1) {
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Byte((byte)(j1 + 1));
} else {
j2 = (byte)(j2 + 2);
}
}
return (byte)(j1 + j2);
}
static byte remi_sumc2_cond() {
Byte j1 = ibc;
Byte j2 = Byte.valueOf((byte)1);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = (byte)(j1 + ibc);
} else {
j2 = (byte)(j2 + 2);
}
}
return (byte)(j1 + j2);
}
public static void main(String[] args) {
final int ntests = 70;
String[] test_name = new String[] {
"simple", "simpleb", "simplec", "simplef", "simplep",
"simple2", "simpleb2", "simplec2", "simplem2", "simplep2",
"simple_deop", "simpleb_deop", "simplec_deop", "simplef_deop", "simplep_deop",
"test", "testb", "testc", "testm", "testp",
"test2", "testb2", "testc2", "testm2", "testp2",
"test_deop", "testb_deop", "testc_deop", "testf_deop", "testp_deop",
"sum", "sumb", "sumc", "sumf", "sump",
"sum2", "sumb2", "sumc2", "summ2", "sump2",
"sum_deop", "sumb_deop", "sumc_deop", "sumf_deop", "sump_deop",
"remi_sum", "remi_sumb", "remi_sumc", "remi_sumf", "remi_sump",
"remi_sum2", "remi_sumb2", "remi_sumc2", "remi_summ2", "remi_sump2",
"remi_sum_deop", "remi_sumb_deop", "remi_sumc_deop", "remi_sumf_deop", "remi_sump_deop",
"remi_sum_cond", "remi_sumb_cond", "remi_sumc_cond", "remi_sumf_cond", "remi_sump_cond",
"remi_sum2_cond", "remi_sumb2_cond", "remi_sumc2_cond", "remi_summ2_cond", "remi_sump2_cond"
};
final int[] val = new int[] {
-5488, -5488, 12000, -5488, -5488,
1024, 1024, -5552, 1024, 1024,
-5488, -5488, 12000, -5488, -5488,
512, 512, 6256, 512, 512,
13024, 13024, -5584, 13024, 13024,
512, 512, 6256, 512, 512,
45, 45, 45, 45, 45,
66, 66, 66, 66, 66,
45, 45, 45, 45, 45,
-23, -23, -23, -23, -23,
-70, -70, -70, -70, -70,
-23, -23, -23, -23, -23,
-11, -11, -11, -11, -11,
-34, -34, -34, -34, -34
};
int[] res = new int[ntests];
for (int i = 0; i < ntests; i++) {
res[i] = 0;
}
for (int i = 0; i < 12000; i++) {
res[0] += simple((byte)i);
res[1] += simpleb((byte)i);
res[2] += simplec();
res[3] += simplef((byte)i);
res[4] += simplep((byte)i);
res[5] += simple2((byte)i);
res[6] += simpleb2((byte)i);
res[7] += simplec2((byte)i);
res[8] += simplem2((byte)i);
res[9] += simplep2((byte)i, (byte)i);
res[10] += simple_deop((byte)i);
res[11] += simpleb_deop((byte)i);
res[12] += simplec_deop((byte)i);
res[13] += simplef_deop((byte)i);
res[14] += simplep_deop((byte)i);
res[15] += test((byte)i);
res[16] += testb((byte)i);
res[17] += testc((byte)i);
res[18] += testm((byte)i);
res[19] += testp((byte)i, (byte)i);
res[20] += test2((byte)i);
res[21] += testb2((byte)i);
res[22] += testc2((byte)i);
res[23] += testm2((byte)i);
res[24] += testp2((byte)i, (byte)i);
res[25] += test_deop((byte)i);
res[26] += testb_deop((byte)i);
res[27] += testc_deop((byte)i);
res[28] += testf_deop((byte)i);
res[29] += testp_deop((byte)i, (byte)i);
}
byte[] ia = new byte[1000];
for (int i = 0; i < 1000; i++) {
ia[i] = (byte)i;
}
for (int i = 0; i < 100; i++) {
res[30] = sum(ia);
res[31] = sumb(ia);
res[32] = sumc(ia);
res[33] = sumf(ia);
res[34] = sump(ia, (byte)1);
res[35] = sum2(ia);
res[36] = sumb2(ia);
res[37] = sumc2(ia);
res[38] = summ2(ia);
res[39] = sump2(ia, (byte)1);
res[40] = sum_deop(ia);
res[41] = sumb_deop(ia);
res[42] = sumc_deop(ia);
res[43] = sumf_deop(ia);
res[44] = sump_deop(ia, (byte)1);
res[45] = remi_sum();
res[46] = remi_sumb();
res[47] = remi_sumc();
res[48] = remi_sumf();
res[49] = remi_sump((byte)1);
res[50] = remi_sum2();
res[51] = remi_sumb2();
res[52] = remi_sumc2();
res[53] = remi_summ2();
res[54] = remi_sump2((byte)1);
res[55] = remi_sum_deop();
res[56] = remi_sumb_deop();
res[57] = remi_sumc_deop();
res[58] = remi_sumf_deop();
res[59] = remi_sump_deop((byte)1);
res[60] = remi_sum_cond();
res[61] = remi_sumb_cond();
res[62] = remi_sumc_cond();
res[63] = remi_sumf_cond();
res[64] = remi_sump_cond((byte)1);
res[65] = remi_sum2_cond();
res[66] = remi_sumb2_cond();
res[67] = remi_sumc2_cond();
res[68] = remi_summ2_cond();
res[69] = remi_sump2_cond((byte)1);
}
int failed = 0;
for (int i = 0; i < ntests; i++) {
if (res[i] != val[i]) {
System.err.println(test_name[i] + ": " + res[i] + " != " + val[i]);
failed++;
}
}
if (failed > 0) {
System.err.println("Failed " + failed + " tests.");
throw new InternalError();
} else {
System.out.println("Passed.");
}
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 6934604
* @summary enable parts of EliminateAutoBox by default
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox TestDoubleBoxing
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
* -XX:CompileCommand=exclude,TestDoubleBoxing.dummy -XX:CompileCommand=exclude,TestDoubleBoxing.foo -XX:CompileCommand=exclude,TestDoubleBoxing.foob TestDoubleBoxing
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-EliminateAutoBox
* -XX:CompileCommand=exclude,TestDoubleBoxing.dummy -XX:CompileCommand=exclude,TestDoubleBoxing.foo -XX:CompileCommand=exclude,TestDoubleBoxing.foob TestDoubleBoxing
*
*/
public class TestDoubleBoxing {
static final Double ibc = new Double(1.);
//===============================================
// Non-inlined methods to test deoptimization info
static void dummy() { }
static double foo(double i) { return i; }
static Double foob(double i) { return Double.valueOf(i); }
static double simple(double i) {
Double ib = new Double(i);
return ib;
}
static double simpleb(double i) {
Double ib = Double.valueOf(i);
return ib;
}
static double simplec() {
Double ib = ibc;
return ib;
}
static double simplef(double i) {
Double ib = foob(i);
return ib;
}
static double simplep(Double ib) {
return ib;
}
static double simple2(double i) {
Double ib1 = new Double(i);
Double ib2 = new Double(i+1.);
return ib1 + ib2;
}
static double simpleb2(double i) {
Double ib1 = Double.valueOf(i);
Double ib2 = Double.valueOf(i+1.);
return ib1 + ib2;
}
static double simplem2(double i) {
Double ib1 = new Double(i);
Double ib2 = Double.valueOf(i+1.);
return ib1 + ib2;
}
static double simplep2(double i, Double ib1) {
Double ib2 = Double.valueOf(i+1.);
return ib1 + ib2;
}
static double simplec2(double i) {
Double ib1 = ibc;
Double ib2 = Double.valueOf(i+1.);
return ib1 + ib2;
}
//===============================================
static double test(double f, int i) {
Double ib = new Double(f);
if ((i&1) == 0)
ib = f+1.;
return ib;
}
static double testb(double f, int i) {
Double ib = f;
if ((i&1) == 0)
ib = (f+1.);
return ib;
}
static double testm(double f, int i) {
Double ib = f;
if ((i&1) == 0)
ib = new Double(f+1.);
return ib;
}
static double testp(double f, int i, Double ib) {
if ((i&1) == 0)
ib = new Double(f+1.);
return ib;
}
static double testc(double f, int i) {
Double ib = ibc;
if ((i&1) == 0)
ib = new Double(f+1.);
return ib;
}
static double test2(double f, int i) {
Double ib1 = new Double(f);
Double ib2 = new Double(f+1.);
if ((i&1) == 0) {
ib1 = new Double(f+1.);
ib2 = new Double(f+2.);
}
return ib1+ib2;
}
static double testb2(double f, int i) {
Double ib1 = f;
Double ib2 = f+1.;
if ((i&1) == 0) {
ib1 = (f+1.);
ib2 = (f+2.);
}
return ib1+ib2;
}
static double testm2(double f, int i) {
Double ib1 = new Double(f);
Double ib2 = f+1.;
if ((i&1) == 0) {
ib1 = new Double(f+1.);
ib2 = (f+2.);
}
return ib1+ib2;
}
static double testp2(double f, int i, Double ib1) {
Double ib2 = f+1.;
if ((i&1) == 0) {
ib1 = new Double(f+1.);
ib2 = (f+2.);
}
return ib1+ib2;
}
static double testc2(double f, int i) {
Double ib1 = ibc;
Double ib2 = f+1.;
if ((i&1) == 0) {
ib1 = (ibc+1.);
ib2 = (f+2.);
}
return ib1+ib2;
}
//===============================================
static double sum(double[] a) {
double result = 1.;
for (Double i : a)
result += i;
return result;
}
static double sumb(double[] a) {
Double result = 1.;
for (Double i : a)
result += i;
return result;
}
static double sumc(double[] a) {
Double result = ibc;
for (Double i : a)
result += i;
return result;
}
static double sumf(double[] a) {
Double result = foob(1.);
for (Double i : a)
result += i;
return result;
}
static double sump(double[] a, Double result) {
for (Double i : a)
result += i;
return result;
}
static double sum2(double[] a) {
double result1 = 1.;
double result2 = 1.;
for (Double i : a) {
result1 += i;
result2 += i + 1.;
}
return result1 + result2;
}
static double sumb2(double[] a) {
Double result1 = 1.;
Double result2 = 1.;
for (Double i : a) {
result1 += i;
result2 += i + 1.;
}
return result1 + result2;
}
static double summ2(double[] a) {
Double result1 = 1.;
Double result2 = new Double(1.);
for (Double i : a) {
result1 += i;
result2 += new Double(i + 1.);
}
return result1 + result2;
}
static double sump2(double[] a, Double result2) {
Double result1 = 1.;
for (Double i : a) {
result1 += i;
result2 += i + 1.;
}
return result1 + result2;
}
static double sumc2(double[] a) {
Double result1 = 1.;
Double result2 = ibc;
for (Double i : a) {
result1 += i;
result2 += i + ibc;
}
return result1 + result2;
}
//===============================================
static double remi_sum() {
Double j = new Double(1.);
for (int i = 0; i< 1000; i++) {
j = new Double(j + 1.);
}
return j;
}
static double remi_sumb() {
Double j = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
j = j + 1.;
}
return j;
}
static double remi_sumf() {
Double j = foob(1.);
for (int i = 0; i< 1000; i++) {
j = j + 1.;
}
return j;
}
static double remi_sump(Double j) {
for (int i = 0; i< 1000; i++) {
j = new Double(j + 1.);
}
return j;
}
static double remi_sumc() {
Double j = ibc;
for (int i = 0; i< 1000; i++) {
j = j + ibc;
}
return j;
}
static double remi_sum2() {
Double j1 = new Double(1.);
Double j2 = new Double(1.);
for (int i = 0; i< 1000; i++) {
j1 = new Double(j1 + 1.);
j2 = new Double(j2 + 2.);
}
return j1 + j2;
}
static double remi_sumb2() {
Double j1 = Double.valueOf(1.);
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
j1 = j1 + 1.;
j2 = j2 + 2.;
}
return j1 + j2;
}
static double remi_summ2() {
Double j1 = new Double(1.);
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
j1 = new Double(j1 + 1.);
j2 = j2 + 2.;
}
return j1 + j2;
}
static double remi_sump2(Double j1) {
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
j1 = new Double(j1 + 1.);
j2 = j2 + 2.;
}
return j1 + j2;
}
static double remi_sumc2() {
Double j1 = ibc;
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
j1 = j1 + ibc;
j2 = j2 + 2.;
}
return j1 + j2;
}
//===============================================
// Safepointa and debug info for deoptimization
static double simple_deop(double i) {
Double ib = new Double(foo(i));
dummy();
return ib;
}
static double simpleb_deop(double i) {
Double ib = Double.valueOf(foo(i));
dummy();
return ib;
}
static double simplef_deop(double i) {
Double ib = foob(i);
dummy();
return ib;
}
static double simplep_deop(Double ib) {
dummy();
return ib;
}
static double simplec_deop(double i) {
Double ib = ibc;
dummy();
return ib;
}
static double test_deop(double f, int i) {
Double ib = new Double(foo(f));
if ((i&1) == 0)
ib = foo(f+1.);
dummy();
return ib;
}
static double testb_deop(double f, int i) {
Double ib = foo(f);
if ((i&1) == 0)
ib = foo(f+1.);
dummy();
return ib;
}
static double testf_deop(double f, int i) {
Double ib = foob(f);
if ((i&1) == 0)
ib = foo(f+1.);
dummy();
return ib;
}
static double testp_deop(double f, int i, Double ib) {
if ((i&1) == 0)
ib = foo(f+1.);
dummy();
return ib;
}
static double testc_deop(double f, int i) {
Double ib = ibc;
if ((i&1) == 0)
ib = foo(f+1.);
dummy();
return ib;
}
static double sum_deop(double[] a) {
double result = 1.;
for (Double i : a)
result += foo(i);
dummy();
return result;
}
static double sumb_deop(double[] a) {
Double result = 1.;
for (Double i : a)
result += foo(i);
dummy();
return result;
}
static double sumf_deop(double[] a) {
Double result = 1.;
for (Double i : a)
result += foob(i);
dummy();
return result;
}
static double sump_deop(double[] a, Double result) {
for (Double i : a)
result += foob(i);
dummy();
return result;
}
static double sumc_deop(double[] a) {
Double result = ibc;
for (Double i : a)
result += foo(i);
dummy();
return result;
}
static double remi_sum_deop() {
Double j = new Double(foo(1.));
for (int i = 0; i< 1000; i++) {
j = new Double(foo(j + 1.));
}
dummy();
return j;
}
static double remi_sumb_deop() {
Double j = Double.valueOf(foo(1.));
for (int i = 0; i< 1000; i++) {
j = foo(j + 1.);
}
dummy();
return j;
}
static double remi_sumf_deop() {
Double j = foob(1.);
for (int i = 0; i< 1000; i++) {
j = foo(j + 1.);
}
dummy();
return j;
}
static double remi_sump_deop(Double j) {
for (int i = 0; i< 1000; i++) {
j = foo(j + 1.);
}
dummy();
return j;
}
static double remi_sumc_deop() {
Double j = ibc;
for (int i = 0; i< 1000; i++) {
j = foo(j + 1.);
}
dummy();
return j;
}
//===============================================
// Conditional increment
static double remi_sum_cond() {
Double j = new Double(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = new Double(j + 1.);
}
}
return j;
}
static double remi_sumb_cond() {
Double j = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = j + 1.;
}
}
return j;
}
static double remi_sumf_cond() {
Double j = foob(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = j + 1.;
}
}
return j;
}
static double remi_sump_cond(Double j) {
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = j + 1.;
}
}
return j;
}
static double remi_sumc_cond() {
Double j = ibc;
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j = j + ibc;
}
}
return j;
}
static double remi_sum2_cond() {
Double j1 = new Double(1.);
Double j2 = new Double(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Double(j1 + 1.);
} else {
j2 = new Double(j2 + 2.);
}
}
return j1 + j2;
}
static double remi_sumb2_cond() {
Double j1 = Double.valueOf(1.);
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = j1 + 1.;
} else {
j2 = j2 + 2.;
}
}
return j1 + j2;
}
static double remi_summ2_cond() {
Double j1 = new Double(1.);
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Double(j1 + 1.);
} else {
j2 = j2 + 2.;
}
}
return j1 + j2;
}
static double remi_sump2_cond(Double j1) {
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = new Double(j1 + 1.);
} else {
j2 = j2 + 2.;
}
}
return j1 + j2;
}
static double remi_sumc2_cond() {
Double j1 = ibc;
Double j2 = Double.valueOf(1.);
for (int i = 0; i< 1000; i++) {
if ((i&1) == 0) {
j1 = j1 + ibc;
} else {
j2 = j2 + 2;
}
}
return j1 + j2;
}
public static void main(String[] args) {
final int ntests = 70;
String[] test_name = new String[] {
"simple", "simpleb", "simplec", "simplef", "simplep",
"simple2", "simpleb2", "simplec2", "simplem2", "simplep2",
"simple_deop", "simpleb_deop", "simplec_deop", "simplef_deop", "simplep_deop",
"test", "testb", "testc", "testm", "testp",
"test2", "testb2", "testc2", "testm2", "testp2",
"test_deop", "testb_deop", "testc_deop", "testf_deop", "testp_deop",
"sum", "sumb", "sumc", "sumf", "sump",
"sum2", "sumb2", "sumc2", "summ2", "sump2",
"sum_deop", "sumb_deop", "sumc_deop", "sumf_deop", "sump_deop",
"remi_sum", "remi_sumb", "remi_sumc", "remi_sumf", "remi_sump",
"remi_sum2", "remi_sumb2", "remi_sumc2", "remi_summ2", "remi_sump2",
"remi_sum_deop", "remi_sumb_deop", "remi_sumc_deop", "remi_sumf_deop", "remi_sump_deop",
"remi_sum_cond", "remi_sumb_cond", "remi_sumc_cond", "remi_sumf_cond", "remi_sump_cond",
"remi_sum2_cond", "remi_sumb2_cond", "remi_sumc2_cond", "remi_summ2_cond", "remi_sump2_cond"
};
final double[] val = new double[] {
71994000., 71994000., 12000., 71994000., 71994000.,
144000000., 144000000., 72018000., 144000000., 144000000.,
71994000., 71994000., 12000., 71994000., 71994000.,
72000000., 72000000., 36006000., 72000000., 72000000.,
144012000., 144012000., 72030000., 144012000., 144012000.,
72000000., 72000000., 36006000., 72000000., 72000000.,
499501., 499501., 499501., 499501., 499501.,
1000002., 1000002., 1000002., 1000002., 1000002.,
499501., 499501., 499501., 499501., 499501.,
1001., 1001., 1001., 1001., 1001.,
3002., 3002., 3002., 3002., 3002.,
1001., 1001., 1001., 1001., 1001.,
501., 501., 501., 501., 501.,
1502., 1502., 1502., 1502., 1502.
};
double[] res = new double[ntests];
for (int i = 0; i < ntests; i++) {
res[i] = 0.;
}
for (int i = 0; i < 12000; i++) {
res[0] += simple(i);
res[1] += simpleb(i);
res[2] += simplec();
res[3] += simplef(i);
res[4] += simplep((double)i);
res[5] += simple2((double)i);
res[6] += simpleb2((double)i);
res[7] += simplec2((double)i);
res[8] += simplem2((double)i);
res[9] += simplep2((double)i, (double)i);
res[10] += simple_deop((double)i);
res[11] += simpleb_deop((double)i);
res[12] += simplec_deop((double)i);
res[13] += simplef_deop((double)i);
res[14] += simplep_deop((double)i);
res[15] += test((double)i, i);
res[16] += testb((double)i, i);
res[17] += testc((double)i, i);
res[18] += testm((double)i, i);
res[19] += testp((double)i, i, (double)i);
res[20] += test2((double)i, i);
res[21] += testb2((double)i, i);
res[22] += testc2((double)i, i);
res[23] += testm2((double)i, i);
res[24] += testp2((double)i, i, (double)i);
res[25] += test_deop((double)i, i);
res[26] += testb_deop((double)i, i);
res[27] += testc_deop((double)i, i);
res[28] += testf_deop((double)i, i);
res[29] += testp_deop((double)i, i, (double)i);
}
double[] ia = new double[1000];
for (int i = 0; i < 1000; i++) {
ia[i] = i;
}
for (int i = 0; i < 100; i++) {
res[30] = sum(ia);
res[31] = sumb(ia);
res[32] = sumc(ia);
res[33] = sumf(ia);
res[34] = sump(ia, 1.);
res[35] = sum2(ia);
res[36] = sumb2(ia);
res[37] = sumc2(ia);
res[38] = summ2(ia);
res[39] = sump2(ia, 1.);
res[40] = sum_deop(ia);
res[41] = sumb_deop(ia);
res[42] = sumc_deop(ia);
res[43] = sumf_deop(ia);
res[44] = sump_deop(ia, 1.);
res[45] = remi_sum();
res[46] = remi_sumb();
res[47] = remi_sumc();
res[48] = remi_sumf();
res[49] = remi_sump(1.);
res[50] = remi_sum2();
res[51] = remi_sumb2();
res[52] = remi_sumc2();
res[53] = remi_summ2();
res[54] = remi_sump2(1.);
res[55] = remi_sum_deop();
res[56] = remi_sumb_deop();
res[57] = remi_sumc_deop();
res[58] = remi_sumf_deop();
res[59] = remi_sump_deop(1.);
res[60] = remi_sum_cond();
res[61] = remi_sumb_cond();
res[62] = remi_sumc_cond();
res[63] = remi_sumf_cond();
res[64] = remi_sump_cond(1.);
res[65] = remi_sum2_cond();
res[66] = remi_sumb2_cond();
res[67] = remi_sumc2_cond();
res[68] = remi_summ2_cond();
res[69] = remi_sump2_cond(1.);
}
int failed = 0;
for (int i = 0; i < ntests; i++) {
if (res[i] != val[i]) {
System.err.println(test_name[i] + ": " + res[i] + " != " + val[i]);
failed++;
}
}
if (failed > 0) {
System.err.println("Failed " + failed + " tests.");
throw new InternalError();
} else {
System.out.println("Passed.");
}
}
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册