提交 b2d14d0d 编写于 作者: R roland

8005031: Some cleanup in c2 to prepare for incremental inlining support

Summary: collection of small changes to prepare for incremental inlining.
Reviewed-by: twisti, kvn
上级 8862f1b8
...@@ -366,10 +366,12 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass, ...@@ -366,10 +366,12 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass,
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciField::print // ciField::print
void ciField::print() { void ciField::print() {
tty->print("<ciField "); tty->print("<ciField name=");
_holder->print_name(); _holder->print_name();
tty->print("."); tty->print(".");
_name->print_symbol(); _name->print_symbol();
tty->print(" signature=");
_signature->print_symbol();
tty->print(" offset=%d type=", _offset); tty->print(" offset=%d type=", _offset);
if (_type != NULL) _type->print_name(); if (_type != NULL) _type->print_name();
else tty->print("(reference)"); else tty->print("(reference)");
......
...@@ -538,6 +538,7 @@ void CompilerOracle::parse_from_line(char* line) { ...@@ -538,6 +538,7 @@ void CompilerOracle::parse_from_line(char* line) {
if (match != NULL) { if (match != NULL) {
if (!_quiet) { if (!_quiet) {
ResourceMark rm;
tty->print("CompilerOracle: %s ", command_names[command]); tty->print("CompilerOracle: %s ", command_names[command]);
match->print(); match->print();
} }
......
...@@ -189,6 +189,11 @@ Node *AddNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -189,6 +189,11 @@ Node *AddNode::Ideal(PhaseGVN *phase, bool can_reshape) {
set_req(1, addx); set_req(1, addx);
set_req(2, a22); set_req(2, a22);
progress = this; progress = this;
PhaseIterGVN *igvn = phase->is_IterGVN();
if (add2->outcnt() == 0 && igvn) {
// add disconnected.
igvn->_worklist.push(add2);
}
} }
} }
...@@ -624,6 +629,11 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -624,6 +629,11 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( t22->singleton() && (t22 != Type::TOP) ) { // Right input is an add of a constant? if( t22->singleton() && (t22 != Type::TOP) ) { // Right input is an add of a constant?
set_req(Address, phase->transform(new (phase->C) AddPNode(in(Base),in(Address),add->in(1)))); set_req(Address, phase->transform(new (phase->C) AddPNode(in(Base),in(Address),add->in(1))));
set_req(Offset, add->in(2)); set_req(Offset, add->in(2));
PhaseIterGVN *igvn = phase->is_IterGVN();
if (add->outcnt() == 0 && igvn) {
// add disconnected.
igvn->_worklist.push((Node*)add);
}
return this; // Made progress return this; // Made progress
} }
} }
......
...@@ -403,7 +403,7 @@ const char* InlineTree::check_can_parse(ciMethod* callee) { ...@@ -403,7 +403,7 @@ const char* InlineTree::check_can_parse(ciMethod* callee) {
//------------------------------print_inlining--------------------------------- //------------------------------print_inlining---------------------------------
// Really, the failure_msg can be a success message also. // Really, the failure_msg can be a success message also.
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline"); C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
if (Verbose && callee_method) { if (Verbose && callee_method) {
const InlineTree *top = this; const InlineTree *top = this;
......
...@@ -274,6 +274,9 @@ class LateInlineCallGenerator : public DirectCallGenerator { ...@@ -274,6 +274,9 @@ class LateInlineCallGenerator : public DirectCallGenerator {
virtual void do_late_inline(); virtual void do_late_inline();
virtual JVMState* generate(JVMState* jvms) { virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
// Record that this call site should be revisited once the main // Record that this call site should be revisited once the main
// parse is finished. // parse is finished.
Compile::current()->add_late_inline(this); Compile::current()->add_late_inline(this);
...@@ -284,7 +287,6 @@ class LateInlineCallGenerator : public DirectCallGenerator { ...@@ -284,7 +287,6 @@ class LateInlineCallGenerator : public DirectCallGenerator {
// as is done for allocations and macro expansion. // as is done for allocations and macro expansion.
return DirectCallGenerator::generate(jvms); return DirectCallGenerator::generate(jvms);
} }
}; };
...@@ -307,7 +309,9 @@ void LateInlineCallGenerator::do_late_inline() { ...@@ -307,7 +309,9 @@ void LateInlineCallGenerator::do_late_inline() {
// Make sure the state is a MergeMem for parsing. // Make sure the state is a MergeMem for parsing.
if (!map->in(TypeFunc::Memory)->is_MergeMem()) { if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory))); Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
C->initial_gvn()->set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
} }
// Make enough space for the expression stack and transfer the incoming arguments // Make enough space for the expression stack and transfer the incoming arguments
...@@ -320,6 +324,8 @@ void LateInlineCallGenerator::do_late_inline() { ...@@ -320,6 +324,8 @@ void LateInlineCallGenerator::do_late_inline() {
} }
} }
C->print_inlining_insert(this);
CompileLog* log = C->log(); CompileLog* log = C->log();
if (log != NULL) { if (log != NULL) {
log->head("late_inline method='%d'", log->identify(method())); log->head("late_inline method='%d'", log->identify(method()));
...@@ -608,7 +614,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* ...@@ -608,7 +614,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
if (cg != NULL && cg->is_inline()) if (cg != NULL && cg->is_inline())
return cg; return cg;
} else { } else {
if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
} }
} }
break; break;
......
...@@ -147,9 +147,9 @@ class CallGenerator : public ResourceObj { ...@@ -147,9 +147,9 @@ class CallGenerator : public ResourceObj {
CallGenerator* cg); CallGenerator* cg);
virtual Node* generate_predicate(JVMState* jvms) { return NULL; }; virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
static void print_inlining(ciMethod* callee, int inline_level, int bci, const char* msg) { static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
if (PrintInlining) if (PrintInlining)
CompileTask::print_inlining(callee, inline_level, bci, msg); C->print_inlining(callee, inline_level, bci, msg);
} }
}; };
......
...@@ -751,7 +751,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj ...@@ -751,7 +751,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
projs->fallthrough_ioproj = pn; projs->fallthrough_ioproj = pn;
for (DUIterator j = pn->outs(); pn->has_out(j); j++) { for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
Node* e = pn->out(j); Node* e = pn->out(j);
if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) { if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
assert(projs->exobj == NULL, "only one"); assert(projs->exobj == NULL, "only one");
projs->exobj = e; projs->exobj = e;
} }
......
...@@ -1566,6 +1566,10 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -1566,6 +1566,10 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* n = in(j); // Get the input Node* n = in(j); // Get the input
if (rc == NULL || phase->type(rc) == Type::TOP) { if (rc == NULL || phase->type(rc) == Type::TOP) {
if (n != top) { // Not already top? if (n != top) { // Not already top?
PhaseIterGVN *igvn = phase->is_IterGVN();
if (can_reshape && igvn != NULL) {
igvn->_worklist.push(r);
}
set_req(j, top); // Nuke it down set_req(j, top); // Nuke it down
progress = this; // Record progress progress = this; // Record progress
} }
......
...@@ -610,7 +610,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -610,7 +610,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")), _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
_printer(IdealGraphPrinter::printer()), _printer(IdealGraphPrinter::printer()),
#endif #endif
_congraph(NULL) { _congraph(NULL),
_print_inlining_list(NULL),
_print_inlining(0) {
C = this; C = this;
CompileWrapper cw(this); CompileWrapper cw(this);
...@@ -666,6 +668,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -666,6 +668,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size); PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn); set_initial_gvn(&gvn);
if (PrintInlining) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser { // Scope for timing the parser
TracePhase t3("parse", &_t_parser, true); TracePhase t3("parse", &_t_parser, true);
...@@ -754,6 +759,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -754,6 +759,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
} }
} }
assert(_late_inlines.length() == 0, "should have been processed"); assert(_late_inlines.length() == 0, "should have been processed");
dump_inlining();
print_method("Before RemoveUseless", 3); print_method("Before RemoveUseless", 3);
...@@ -899,7 +905,9 @@ Compile::Compile( ciEnv* ci_env, ...@@ -899,7 +905,9 @@ Compile::Compile( ciEnv* ci_env,
#endif #endif
_dead_node_list(comp_arena()), _dead_node_list(comp_arena()),
_dead_node_count(0), _dead_node_count(0),
_congraph(NULL) { _congraph(NULL),
_print_inlining_list(NULL),
_print_inlining(0) {
C = this; C = this;
#ifndef PRODUCT #ifndef PRODUCT
...@@ -3351,3 +3359,11 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n ...@@ -3351,3 +3359,11 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type); cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
} }
} }
void Compile::dump_inlining() {
if (PrintInlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
tty->print(_print_inlining_list->at(i).ss()->as_string());
}
}
}
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "code/debugInfoRec.hpp" #include "code/debugInfoRec.hpp"
#include "code/exceptionHandlerTable.hpp" #include "code/exceptionHandlerTable.hpp"
#include "compiler/compilerOracle.hpp" #include "compiler/compilerOracle.hpp"
#include "compiler/compileBroker.hpp"
#include "libadt/dict.hpp" #include "libadt/dict.hpp"
#include "libadt/port.hpp" #include "libadt/port.hpp"
#include "libadt/vectset.hpp" #include "libadt/vectset.hpp"
...@@ -369,6 +370,61 @@ class Compile : public Phase { ...@@ -369,6 +370,61 @@ class Compile : public Phase {
GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
// main parsing has finished. // main parsing has finished.
// Inlining may not happen in parse order which would make
// PrintInlining output confusing. Keep track of PrintInlining
// pieces in order.
class PrintInliningBuffer : public ResourceObj {
private:
CallGenerator* _cg;
stringStream* _ss;
public:
PrintInliningBuffer()
: _cg(NULL) { _ss = new stringStream(); }
stringStream* ss() const { return _ss; }
CallGenerator* cg() const { return _cg; }
void set_cg(CallGenerator* cg) { _cg = cg; }
};
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
int _print_inlining;
public:
outputStream* print_inlining_stream() const {
return _print_inlining_list->at(_print_inlining).ss();
}
void print_inlining_skip(CallGenerator* cg) {
if (PrintInlining) {
_print_inlining_list->at(_print_inlining).set_cg(cg);
_print_inlining++;
_print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
}
}
void print_inlining_insert(CallGenerator* cg) {
if (PrintInlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
if (_print_inlining_list->at(i).cg() == cg) {
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
_print_inlining = i+1;
_print_inlining_list->at(i).set_cg(NULL);
return;
}
}
ShouldNotReachHere();
}
}
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
stringStream ss;
CompileTask::print_inlining(&ss, method, inline_level, bci, msg);
print_inlining_stream()->print(ss.as_string());
}
private:
// Matching, CFG layout, allocation, code generation // Matching, CFG layout, allocation, code generation
PhaseCFG* _cfg; // Results of CFG finding PhaseCFG* _cfg; // Results of CFG finding
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
...@@ -591,7 +647,7 @@ class Compile : public Phase { ...@@ -591,7 +647,7 @@ class Compile : public Phase {
void reset_dead_node_list() { _dead_node_list.Reset(); void reset_dead_node_list() { _dead_node_list.Reset();
_dead_node_count = 0; _dead_node_count = 0;
} }
uint live_nodes() { uint live_nodes() const {
int val = _unique - _dead_node_count; int val = _unique - _dead_node_count;
assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count)); assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
return (uint) val; return (uint) val;
...@@ -702,7 +758,7 @@ class Compile : public Phase { ...@@ -702,7 +758,7 @@ class Compile : public Phase {
void identify_useful_nodes(Unique_Node_List &useful); void identify_useful_nodes(Unique_Node_List &useful);
void update_dead_node_list(Unique_Node_List &useful); void update_dead_node_list(Unique_Node_List &useful);
void remove_useless_nodes (Unique_Node_List &useful); void remove_useless_nodes (Unique_Node_List &useful);
WarmCallInfo* warm_calls() const { return _warm_calls; } WarmCallInfo* warm_calls() const { return _warm_calls; }
void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
...@@ -711,6 +767,8 @@ class Compile : public Phase { ...@@ -711,6 +767,8 @@ class Compile : public Phase {
// Record this CallGenerator for inlining at the end of parsing. // Record this CallGenerator for inlining at the end of parsing.
void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); } void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
void dump_inlining();
// Matching, CFG layout, allocation, code generation // Matching, CFG layout, allocation, code generation
PhaseCFG* cfg() { return _cfg; } PhaseCFG* cfg() { return _cfg; }
bool select_24_bit_instr() const { return _select_24_bit_instr; } bool select_24_bit_instr() const { return _select_24_bit_instr; }
......
...@@ -40,19 +40,24 @@ ...@@ -40,19 +40,24 @@
#include "prims/nativeLookup.hpp" #include "prims/nativeLookup.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) { if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
outputStream* out = tty;
if (!PrintInlining) { if (!PrintInlining) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) { if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
method->print_short_name(); method->print_short_name();
tty->cr(); tty->cr();
} }
CompileTask::print_inlining(prof_method, depth, bci); CompileTask::print_inlining(prof_method, depth, bci);
} else {
out = C->print_inlining_stream();
} }
CompileTask::print_inline_indent(depth); CompileTask::print_inline_indent(depth, out);
tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
prof_klass->name()->print_symbol(); stringStream ss;
tty->cr(); prof_klass->name()->print_symbol_on(&ss);
out->print(ss.as_string());
out->cr();
} }
} }
...@@ -233,13 +238,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool ...@@ -233,13 +238,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
} }
if (miss_cg != NULL) { if (miss_cg != NULL) {
if (next_hit_cg != NULL) { if (next_hit_cg != NULL) {
trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
// We don't need to record dependency on a receiver here and below. // We don't need to record dependency on a receiver here and below.
// Whenever we inline, the dependency is added by Parse::Parse(). // Whenever we inline, the dependency is added by Parse::Parse().
miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
} }
if (miss_cg != NULL) { if (miss_cg != NULL) {
trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count); trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
if (cg != NULL) return cg; if (cg != NULL) return cg;
} }
......
...@@ -1771,11 +1771,21 @@ void GraphKit::replace_call(CallNode* call, Node* result) { ...@@ -1771,11 +1771,21 @@ void GraphKit::replace_call(CallNode* call, Node* result) {
CallProjections callprojs; CallProjections callprojs;
call->extract_projections(&callprojs, true); call->extract_projections(&callprojs, true);
// Replace all the old call edges with the edges from the inlining result Node* init_mem = call->in(TypeFunc::Memory);
C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
C->gvn_replace_by(callprojs.fallthrough_memproj, final_state->in(TypeFunc::Memory));
C->gvn_replace_by(callprojs.fallthrough_ioproj, final_state->in(TypeFunc::I_O));
Node* final_mem = final_state->in(TypeFunc::Memory); Node* final_mem = final_state->in(TypeFunc::Memory);
Node* final_ctl = final_state->in(TypeFunc::Control);
Node* final_io = final_state->in(TypeFunc::I_O);
// Replace all the old call edges with the edges from the inlining result
if (callprojs.fallthrough_catchproj != NULL) {
C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
}
if (callprojs.fallthrough_memproj != NULL) {
C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
}
if (callprojs.fallthrough_ioproj != NULL) {
C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
}
// Replace the result with the new result if it exists and is used // Replace the result with the new result if it exists and is used
if (callprojs.resproj != NULL && result != NULL) { if (callprojs.resproj != NULL && result != NULL) {
......
...@@ -412,16 +412,16 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { ...@@ -412,16 +412,16 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
break; break;
case vmIntrinsics::_reverseBytes_c: case vmIntrinsics::_reverseBytes_c:
if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false; if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
break; break;
case vmIntrinsics::_reverseBytes_s: case vmIntrinsics::_reverseBytes_s:
if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return false; if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return NULL;
break; break;
case vmIntrinsics::_reverseBytes_i: case vmIntrinsics::_reverseBytes_i:
if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return false; if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return NULL;
break; break;
case vmIntrinsics::_reverseBytes_l: case vmIntrinsics::_reverseBytes_l:
if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false; if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return NULL;
break; break;
case vmIntrinsics::_Reference_get: case vmIntrinsics::_Reference_get:
...@@ -536,7 +536,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { ...@@ -536,7 +536,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
// Try to inline the intrinsic. // Try to inline the intrinsic.
if (kit.try_to_inline()) { if (kit.try_to_inline()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
} }
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) { if (C->log()) {
...@@ -555,7 +555,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { ...@@ -555,7 +555,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
if (jvms->has_method()) { if (jvms->has_method()) {
// Not a root compile. // Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg); C->print_inlining(callee, jvms->depth() - 1, bci, msg);
} else { } else {
// Root compile // Root compile
tty->print("Did not generate intrinsic %s%s at bci:%d in", tty->print("Did not generate intrinsic %s%s at bci:%d in",
...@@ -585,7 +585,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) { ...@@ -585,7 +585,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
Node* slow_ctl = kit.try_to_predicate(); Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) { if (!kit.failing()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
} }
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) { if (C->log()) {
...@@ -602,12 +602,12 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) { ...@@ -602,12 +602,12 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
if (jvms->has_method()) { if (jvms->has_method()) {
// Not a root compile. // Not a root compile.
const char* msg = "failed to generate predicate for intrinsic"; const char* msg = "failed to generate predicate for intrinsic";
CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg); C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
} else { } else {
// Root compile // Root compile
tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in", C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), bci); (is_virtual() ? " (virtual)" : ""), bci);
} }
} }
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
...@@ -3319,7 +3319,7 @@ bool LibraryCallKit::inline_native_subtype_check() { ...@@ -3319,7 +3319,7 @@ bool LibraryCallKit::inline_native_subtype_check() {
Node* arg = args[which_arg]; Node* arg = args[which_arg];
arg = null_check(arg); arg = null_check(arg);
if (stopped()) break; if (stopped()) break;
args[which_arg] = _gvn.transform(arg); args[which_arg] = arg;
Node* p = basic_plus_adr(arg, class_klass_offset); Node* p = basic_plus_adr(arg, class_klass_offset);
Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
......
...@@ -744,7 +744,9 @@ bool StringConcat::validate_control_flow() { ...@@ -744,7 +744,9 @@ bool StringConcat::validate_control_flow() {
ctrl_path.push(cn); ctrl_path.push(cn);
ctrl_path.push(cn->proj_out(0)); ctrl_path.push(cn->proj_out(0));
ctrl_path.push(cn->proj_out(0)->unique_out()); ctrl_path.push(cn->proj_out(0)->unique_out());
ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0)); if (cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0) != NULL) {
ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
}
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
...@@ -762,6 +764,12 @@ bool StringConcat::validate_control_flow() { ...@@ -762,6 +764,12 @@ bool StringConcat::validate_control_flow() {
} else if (ptr->is_IfTrue()) { } else if (ptr->is_IfTrue()) {
IfNode* iff = ptr->in(0)->as_If(); IfNode* iff = ptr->in(0)->as_If();
BoolNode* b = iff->in(1)->isa_Bool(); BoolNode* b = iff->in(1)->isa_Bool();
if (b == NULL) {
fail = true;
break;
}
Node* cmp = b->in(1); Node* cmp = b->in(1);
Node* v1 = cmp->in(1); Node* v1 = cmp->in(1);
Node* v2 = cmp->in(2); Node* v2 = cmp->in(2);
...@@ -1408,71 +1416,76 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { ...@@ -1408,71 +1416,76 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
Deoptimization::Action_make_not_entrant); Deoptimization::Action_make_not_entrant);
} }
// length now contains the number of characters needed for the Node* result;
// char[] so create a new AllocateArray for the char[] if (!kit.stopped()) {
Node* char_array = NULL;
{ // length now contains the number of characters needed for the
PreserveReexecuteState preexecs(&kit); // char[] so create a new AllocateArray for the char[]
// The original jvms is for an allocation of either a String or Node* char_array = NULL;
// StringBuffer so no stack adjustment is necessary for proper {
// reexecution. If we deoptimize in the slow path the bytecode PreserveReexecuteState preexecs(&kit);
// will be reexecuted and the char[] allocation will be thrown away. // The original jvms is for an allocation of either a String or
kit.jvms()->set_should_reexecute(true); // StringBuffer so no stack adjustment is necessary for proper
char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))), // reexecution. If we deoptimize in the slow path the bytecode
length, 1); // will be reexecuted and the char[] allocation will be thrown away.
} kit.jvms()->set_should_reexecute(true);
char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
// Mark the allocation so that zeroing is skipped since the code length, 1);
// below will overwrite the entire array }
AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
char_alloc->maybe_set_complete(_gvn); // Mark the allocation so that zeroing is skipped since the code
// below will overwrite the entire array
// Now copy the string representations into the final char[] AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
Node* start = __ intcon(0); char_alloc->maybe_set_complete(_gvn);
for (int argi = 0; argi < sc->num_arguments(); argi++) {
Node* arg = sc->argument(argi); // Now copy the string representations into the final char[]
switch (sc->mode(argi)) { Node* start = __ intcon(0);
case StringConcat::IntMode: { for (int argi = 0; argi < sc->num_arguments(); argi++) {
Node* end = __ AddI(start, string_sizes->in(argi)); Node* arg = sc->argument(argi);
// getChars words backwards so pass the ending point as well as the start switch (sc->mode(argi)) {
int_getChars(kit, arg, char_array, start, end); case StringConcat::IntMode: {
start = end; Node* end = __ AddI(start, string_sizes->in(argi));
break; // getChars words backwards so pass the ending point as well as the start
} int_getChars(kit, arg, char_array, start, end);
case StringConcat::StringNullCheckMode: start = end;
case StringConcat::StringMode: { break;
start = copy_string(kit, arg, char_array, start); }
break; case StringConcat::StringNullCheckMode:
} case StringConcat::StringMode: {
case StringConcat::CharMode: { start = copy_string(kit, arg, char_array, start);
__ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR), break;
arg, T_CHAR, char_adr_idx); }
start = __ AddI(start, __ intcon(1)); case StringConcat::CharMode: {
break; __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
arg, T_CHAR, char_adr_idx);
start = __ AddI(start, __ intcon(1));
break;
}
default:
ShouldNotReachHere();
} }
default:
ShouldNotReachHere();
} }
}
// If we're not reusing an existing String allocation then allocate one here. // If we're not reusing an existing String allocation then allocate one here.
Node* result = sc->string_alloc(); result = sc->string_alloc();
if (result == NULL) { if (result == NULL) {
PreserveReexecuteState preexecs(&kit); PreserveReexecuteState preexecs(&kit);
// The original jvms is for an allocation of either a String or // The original jvms is for an allocation of either a String or
// StringBuffer so no stack adjustment is necessary for proper // StringBuffer so no stack adjustment is necessary for proper
// reexecution. // reexecution.
kit.jvms()->set_should_reexecute(true); kit.jvms()->set_should_reexecute(true);
result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass()))); result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
} }
// Intialize the string // Intialize the string
if (java_lang_String::has_offset_field()) { if (java_lang_String::has_offset_field()) {
kit.store_String_offset(kit.control(), result, __ intcon(0)); kit.store_String_offset(kit.control(), result, __ intcon(0));
kit.store_String_length(kit.control(), result, length); kit.store_String_length(kit.control(), result, length);
}
kit.store_String_value(kit.control(), result, char_array);
} else {
result = C->top();
} }
kit.store_String_value(kit.control(), result, char_array);
// hook up the outgoing control and result // hook up the outgoing control and result
kit.replace_call(sc->end(), result); kit.replace_call(sc->end(), result);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册