提交 751cd1f9 编写于 作者: R roland

8024069: replace_in_map() should operate on parent maps

Summary: type information gets lost because replace_in_map() doesn't update parent maps
Reviewed-by: kvn, twisti
上级 215e5b2c
......@@ -638,7 +638,10 @@
"Find best control for expensive operations") \
\
product(bool, UseMathExactIntrinsics, true, \
"Enables intrinsification of various java.lang.Math funcitons")
"Enables intrinsification of various java.lang.Math functions") \
\
experimental(bool, ReplaceInParentMaps, false, \
"Propagate type improvements in callers of inlinee if possible")
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
......
......@@ -63,12 +63,12 @@ public:
}
virtual bool is_parse() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
int is_osr() { return _is_osr; }
};
JVMState* ParseGenerator::generate(JVMState* jvms) {
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
Compile* C = Compile::current();
if (is_osr()) {
......@@ -80,7 +80,7 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
return NULL; // bailing out of the compile; do not try to parse
}
Parse parser(jvms, method(), _expected_uses);
Parse parser(jvms, method(), _expected_uses, parent_parser);
// Grab signature for matching/allocation
#ifdef ASSERT
if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
......@@ -119,12 +119,12 @@ class DirectCallGenerator : public CallGenerator {
_separate_io_proj(separate_io_proj)
{
}
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
CallStaticJavaNode* call_node() const { return _call_node; }
};
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
bool is_static = method()->is_static();
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
......@@ -171,10 +171,10 @@ public:
vtable_index >= 0, "either invalid or usable");
}
virtual bool is_virtual() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
};
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
Node* receiver = kit.argument(0);
......@@ -276,7 +276,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
// Convert the CallStaticJava into an inline
virtual void do_late_inline();
virtual JVMState* generate(JVMState* jvms) {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
......@@ -290,7 +290,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
// that the late inlining logic can distinguish between fall
// through and exceptional uses of the memory and io projections
// as is done for allocations and macro expansion.
return DirectCallGenerator::generate(jvms);
return DirectCallGenerator::generate(jvms, parent_parser);
}
virtual void print_inlining_late(const char* msg) {
......@@ -389,7 +389,7 @@ void LateInlineCallGenerator::do_late_inline() {
}
// Now perform the inling using the synthesized JVMState
JVMState* new_jvms = _inline_cg->generate(jvms);
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
if (new_jvms == NULL) return; // no change
if (C->failing()) return;
......@@ -429,8 +429,8 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
virtual bool is_mh_late_inline() const { return true; }
virtual JVMState* generate(JVMState* jvms) {
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
if (_input_not_const) {
// inlining won't be possible so no need to enqueue right now.
call_node()->set_generator(this);
......@@ -477,13 +477,13 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg) {}
virtual JVMState* generate(JVMState* jvms) {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
C->add_string_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
return new_jvms;
}
};
......@@ -498,13 +498,13 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg) {}
virtual JVMState* generate(JVMState* jvms) {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->print_inlining_skip(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
return new_jvms;
}
};
......@@ -540,7 +540,7 @@ public:
virtual bool is_virtual() const { return _is_virtual; }
virtual bool is_deferred() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
};
......@@ -550,12 +550,12 @@ CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
return new WarmCallGenerator(ci, if_cold, if_hot);
}
JVMState* WarmCallGenerator::generate(JVMState* jvms) {
JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
Compile* C = Compile::current();
if (C->log() != NULL) {
C->log()->elem("warm_call bci='%d'", jvms->bci());
}
jvms = _if_cold->generate(jvms);
jvms = _if_cold->generate(jvms, parent_parser);
if (jvms != NULL) {
Node* m = jvms->map()->control();
if (m->is_CatchProj()) m = m->in(0); else m = C->top();
......@@ -616,7 +616,7 @@ public:
virtual bool is_inline() const { return _if_hit->is_inline(); }
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
};
......@@ -628,7 +628,7 @@ CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
}
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
// We need an explicit receiver null_check before checking its type.
......@@ -656,7 +656,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
{ PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _if_missed->generate(kit.sync_jvms());
slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
......@@ -677,12 +677,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
kit.replace_in_map(receiver, exact_receiver);
// Make the hot call:
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
if (new_jvms == NULL) {
// Inline failed, so make a direct call.
assert(_if_hit->is_inline(), "must have been a failed inline");
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
new_jvms = cg->generate(kit.sync_jvms());
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
}
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
......@@ -874,7 +874,7 @@ public:
virtual bool is_inlined() const { return true; }
virtual bool is_intrinsic() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
};
......@@ -884,7 +884,7 @@ CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
}
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
......@@ -904,7 +904,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _cg->generate(kit.sync_jvms());
slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
......@@ -922,12 +922,12 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
}
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
if (new_jvms == NULL) {
// Intrinsic failed, so use slow code or make a direct call.
if (slow_map == NULL) {
CallGenerator* cg = CallGenerator::for_direct_call(method());
new_jvms = cg->generate(kit.sync_jvms());
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
} else {
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
......@@ -997,7 +997,7 @@ public:
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
virtual bool is_trap() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
};
......@@ -1009,7 +1009,7 @@ CallGenerator::for_uncommon_trap(ciMethod* m,
}
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
GraphKit kit(jvms);
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
int nargs = method()->arg_size();
......
......@@ -31,6 +31,8 @@
#include "opto/type.hpp"
#include "runtime/deoptimization.hpp"
class Parse;
//---------------------------CallGenerator-------------------------------------
// The subclasses of this class handle generation of ideal nodes for
// call sites and method entry points.
......@@ -108,7 +110,7 @@ class CallGenerator : public ResourceObj {
//
// If the result is NULL, it means that this CallGenerator was unable
// to handle the given call, and another CallGenerator should be consulted.
virtual JVMState* generate(JVMState* jvms) = 0;
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
// How to generate a call site that is inlined:
static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
......
......@@ -655,7 +655,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
_print_inlining_idx(0) {
_print_inlining_idx(0),
_preserve_jvm_state(0) {
C = this;
CompileWrapper cw(this);
......@@ -763,7 +764,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
return;
}
JVMState* jvms = build_start_state(start(), tf());
if ((jvms = cg->generate(jvms)) == NULL) {
if ((jvms = cg->generate(jvms, NULL)) == NULL) {
record_method_not_compilable("method parse failed");
return;
}
......@@ -940,7 +941,8 @@ Compile::Compile( ciEnv* ci_env,
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
_print_inlining_idx(0) {
_print_inlining_idx(0),
_preserve_jvm_state(0) {
C = this;
#ifndef PRODUCT
......
......@@ -425,6 +425,9 @@ class Compile : public Phase {
// Expensive nodes list already sorted?
bool expensive_nodes_sorted() const;
// Are we within a PreserveJVMState block?
int _preserve_jvm_state;
public:
outputStream* print_inlining_stream() const {
......@@ -820,7 +823,9 @@ class Compile : public Phase {
// Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true,
bool delayed_forbidden = false);
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return should_delay_string_inlining(call_method, jvms) ||
should_delay_boxing_inlining(call_method, jvms);
......@@ -1156,6 +1161,21 @@ class Compile : public Phase {
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
// enter a PreserveJVMState block
void inc_preserve_jvm_state() {
_preserve_jvm_state++;
}
// exit a PreserveJVMState block
void dec_preserve_jvm_state() {
_preserve_jvm_state--;
assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
}
bool has_preserve_jvm_state() const {
return _preserve_jvm_state > 0;
}
};
#endif // SHARE_VM_OPTO_COMPILE_HPP
......@@ -495,7 +495,7 @@ void Parse::do_call() {
// because exceptions don't return to the call site.)
profile_call(receiver);
JVMState* new_jvms = cg->generate(jvms);
JVMState* new_jvms = cg->generate(jvms, this);
if (new_jvms == NULL) {
// When inlining attempt fails (e.g., too many arguments),
// it may contaminate the current compile state, making it
......@@ -509,7 +509,7 @@ void Parse::do_call() {
// intrinsic was expecting to optimize. Should always be possible to
// get a normal java call that may inline in that case
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
if ((new_jvms = cg->generate(jvms)) == NULL) {
if ((new_jvms = cg->generate(jvms, this)) == NULL) {
guarantee(failing(), "call failed to generate: calls should work");
return;
}
......
......@@ -639,6 +639,7 @@ PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
_map = kit->map(); // preserve the map
_sp = kit->sp();
kit->set_map(clone_map ? kit->clone_map() : NULL);
Compile::current()->inc_preserve_jvm_state();
#ifdef ASSERT
_bci = kit->bci();
Parse* parser = kit->is_Parse();
......@@ -656,6 +657,7 @@ PreserveJVMState::~PreserveJVMState() {
#endif
kit->set_map(_map);
kit->set_sp(_sp);
Compile::current()->dec_preserve_jvm_state();
}
......@@ -1373,17 +1375,70 @@ Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
//--------------------------replace_in_map-------------------------------------
void GraphKit::replace_in_map(Node* old, Node* neww) {
this->map()->replace_edge(old, neww);
if (old == neww) {
return;
}
map()->replace_edge(old, neww);
// Note: This operation potentially replaces any edge
// on the map. This includes locals, stack, and monitors
// of the current (innermost) JVM state.
// We can consider replacing in caller maps.
// The idea would be that an inlined function's null checks
// can be shared with the entire inlining tree.
// The expense of doing this is that the PreserveJVMState class
// would have to preserve caller states too, with a deep copy.
if (!ReplaceInParentMaps) {
return;
}
// PreserveJVMState doesn't do a deep copy so we can't modify
// parents
if (Compile::current()->has_preserve_jvm_state()) {
return;
}
Parse* parser = is_Parse();
bool progress = true;
Node* ctrl = map()->in(0);
// Follow the chain of parsers and see whether the update can be
// done in the map of callers. We can do the replace for a caller if
// the current control post dominates the control of a caller.
while (parser != NULL && parser->caller() != NULL && progress) {
progress = false;
Node* parent_map = parser->caller()->map();
assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
Node* parent_ctrl = parent_map->in(0);
while (parent_ctrl->is_Region()) {
Node* n = parent_ctrl->as_Region()->is_copy();
if (n == NULL) {
break;
}
parent_ctrl = n;
}
for (;;) {
if (ctrl == parent_ctrl) {
// update the map of the exits which is the one that will be
// used when compilation resume after inlining
parser->exits().map()->replace_edge(old, neww);
progress = true;
break;
}
if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
ctrl = ctrl->in(0)->in(0);
} else if (ctrl->is_Region()) {
Node* n = ctrl->as_Region()->is_copy();
if (n == NULL) {
break;
}
ctrl = n;
} else {
break;
}
}
parser = parser->parent_parser();
}
}
......
......@@ -1019,7 +1019,7 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
// be skipped. For example, range check predicate has two checks
// for lower and upper bounds.
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
prev_dom = idom;
// Now walk the current IfNode's projections.
......
......@@ -63,7 +63,7 @@ class LibraryIntrinsic : public InlineCallGenerator {
virtual bool is_virtual() const { return _is_virtual; }
virtual bool is_predicted() const { return _is_predicted; }
virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
virtual JVMState* generate(JVMState* jvms);
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
virtual Node* generate_predicate(JVMState* jvms);
vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
};
......@@ -556,7 +556,7 @@ void Compile::register_library_intrinsics() {
// Nothing to do here.
}
JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
LibraryCallKit kit(jvms, this);
Compile* C = kit.C;
int nodes = C->unique();
......
......@@ -41,63 +41,6 @@
* checks (such as null checks).
*/
//-------------------------------is_uncommon_trap_proj----------------------------
// Return true if proj is the form of "proj->[region->..]call_uct"
bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
int path_limit = 10;
assert(proj, "invalid argument");
Node* out = proj;
for (int ct = 0; ct < path_limit; ct++) {
out = out->unique_ctrl_out();
if (out == NULL)
return false;
if (out->is_CallStaticJava()) {
int req = out->as_CallStaticJava()->uncommon_trap_request();
if (req != 0) {
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
return true;
}
}
return false; // don't do further after call
}
if (out->Opcode() != Op_Region)
return false;
}
return false;
}
//-------------------------------is_uncommon_trap_if_pattern-------------------------
// Return true for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
//
// "must_reason_predicate" means the uct reason must be Reason_predicate
bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
Node *in0 = proj->in(0);
if (!in0->is_If()) return false;
// Variation of a dead If node.
if (in0->outcnt() < 2) return false;
IfNode* iff = in0->as_If();
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
if (reason != Deoptimization::Reason_none) {
if (iff->in(1)->Opcode() != Op_Conv2B ||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
return false;
}
}
ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
if (is_uncommon_trap_proj(other_proj, reason)) {
assert(reason == Deoptimization::Reason_none ||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
return true;
}
return false;
}
//-------------------------------register_control-------------------------
void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
assert(n->is_CFG(), "must be control node");
......@@ -147,7 +90,7 @@ void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred)
// This code is also used to clone predicates to clonned loops.
ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason) {
assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
IfNode* iff = cont_proj->in(0)->as_If();
ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
......@@ -235,7 +178,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason) {
assert(new_entry != 0, "only used for clone predicate");
assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
IfNode* iff = cont_proj->in(0)->as_If();
ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
......@@ -422,7 +365,7 @@ Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
if (start_c == NULL || !start_c->is_Proj())
return NULL;
if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
return start_c->as_Proj();
}
return NULL;
......@@ -773,7 +716,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
ProjNode* proj = if_proj_list.pop()->as_Proj();
IfNode* iff = proj->in(0)->as_If();
if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
if (!proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
if (loop->is_loop_exit(iff)) {
// stop processing the remaining projs in the list because the execution of them
// depends on the condition of "iff" (iff->in(1)).
......
......@@ -167,7 +167,7 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
// expensive nodes will notice the loop and skip over it to try to
// move the node further up.
if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) {
if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
break;
}
next = idom(ctl->in(1)->in(0));
......@@ -181,7 +181,7 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
} else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
} else if (parent_ctl->is_If()) {
if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) {
if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
break;
}
assert(idom(ctl) == parent_ctl, "strange");
......
......@@ -876,13 +876,6 @@ public:
// Return true if exp is a scaled induction var plus (or minus) constant
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
// Return true if proj is for "proj->[region->..]call_uct"
static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
// Return true for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
// Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason);
......
......@@ -238,7 +238,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
ProjNode* dp_proj = dp->as_Proj();
ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
if (exclude_loop_predicate &&
is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
return; // Let IGVN transformation change control dependence.
IdealLoopTree *old_loop = get_loop(dp);
......
......@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/matcher.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/multnode.hpp"
......@@ -150,3 +151,59 @@ const RegMask &ProjNode::out_RegMask() const {
uint ProjNode::ideal_reg() const {
return bottom_type()->ideal_reg();
}
//-------------------------------is_uncommon_trap_proj----------------------------
// Return true if proj is the form of "proj->[region->..]call_uct"
bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
int path_limit = 10;
Node* out = this;
for (int ct = 0; ct < path_limit; ct++) {
out = out->unique_ctrl_out();
if (out == NULL)
return false;
if (out->is_CallStaticJava()) {
int req = out->as_CallStaticJava()->uncommon_trap_request();
if (req != 0) {
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
return true;
}
}
return false; // don't do further after call
}
if (out->Opcode() != Op_Region)
return false;
}
return false;
}
//-------------------------------is_uncommon_trap_if_pattern-------------------------
// Return true for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
//
// "must_reason_predicate" means the uct reason must be Reason_predicate
bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
Node *in0 = in(0);
if (!in0->is_If()) return false;
// Variation of a dead If node.
if (in0->outcnt() < 2) return false;
IfNode* iff = in0->as_If();
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
if (reason != Deoptimization::Reason_none) {
if (iff->in(1)->Opcode() != Op_Conv2B ||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
return false;
}
}
ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj();
if (other_proj->is_uncommon_trap_proj(reason)) {
assert(reason == Deoptimization::Reason_none ||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
return true;
}
return false;
}
......@@ -88,6 +88,14 @@ public:
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
// Return true if proj is for "proj->[region->..]call_uct"
bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
// Return true for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
};
#endif // SHARE_VM_OPTO_MULTNODE_HPP
......@@ -349,13 +349,15 @@ class Parse : public GraphKit {
int _est_switch_depth; // Debugging SwitchRanges.
#endif
// parser for the caller of the method of this object
Parse* const _parent;
public:
// Constructor
Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
virtual Parse* is_Parse() const { return (Parse*)this; }
public:
// Accessors.
JVMState* caller() const { return _caller; }
float expected_uses() const { return _expected_uses; }
......@@ -407,6 +409,8 @@ class Parse : public GraphKit {
return block()->successor_for_bci(bci);
}
Parse* parent_parser() const { return _parent; }
private:
// Create a JVMS & map for the initial state of this method.
SafePointNode* create_entry_map();
......
......@@ -381,8 +381,8 @@ void Parse::load_interpreter_state(Node* osr_buf) {
//------------------------------Parse------------------------------------------
// Main parser constructor.
Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
: _exits(caller)
Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
: _exits(caller), _parent(parent)
{
// Init some variables
_caller = caller;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册