提交 2dcfb3bc 编写于 作者: K kvn

6667605: (Escape Analysis) inline java constructors when EA is on

Summary: java constructors should be inlined to be able scalar replace a new object
Reviewed-by: rasbold
上级 d7f4cf41
......@@ -79,8 +79,20 @@ static void print_indent(int depth) {
for (int i = depth; i != 0; --i) tty->print(" ");
}
static bool is_init_with_ea(ciMethod* callee_method,
ciMethod* caller_method, Compile* C) {
// True when EA is ON and a java constructor is called or
// a super constructor is called from an inlined java constructor.
return DoEscapeAnalysis && EliminateAllocations &&
( callee_method->is_initializer() ||
(caller_method->is_initializer() &&
caller_method != C->method() &&
caller_method->holder()->is_subclass_of(callee_method->holder()))
);
}
// positive filter: should send be inlined? returns NULL, if yes, or rejection msg
const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
// Allows targeted inlining
if(callee_method->should_inline()) {
*wci_result = *(WarmCallInfo::always_hot());
......@@ -97,7 +109,8 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
int size = callee_method->code_size();
// Check for too many throws (and not too huge)
if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) {
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
wci_result->set_profit(wci_result->profit() * 100);
if (PrintInlining && Verbose) {
print_indent(inline_depth());
......@@ -114,8 +127,12 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
int invoke_count = method()->interpreter_invocation_count();
assert( invoke_count != 0, "Require invokation count greater than zero");
int freq = call_site_count/invoke_count;
// bump the max size if the call is frequent
if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount)) {
if ((freq >= InlineFrequencyRatio) ||
(call_site_count >= InlineFrequencyCount) ||
is_init_with_ea(callee_method, caller_method, C)) {
max_size = C->freq_inline_size();
if (size <= max_size && TraceFrequencyInlining) {
print_indent(inline_depth());
......@@ -126,7 +143,8 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
}
} else {
// Not hot. Check for medium-sized pre-existing nmethod at cold sites.
if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode/4)
if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode/4)
return "already compiled into a medium method";
}
if (size > max_size) {
......@@ -139,7 +157,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg
const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* wci_result) const {
const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
if (!UseOldInlining) {
const char* fail = NULL;
......@@ -204,9 +222,23 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* w
// use frequency-based objections only for non-trivial methods
if (callee_method->code_size() <= MaxTrivialSize) return NULL;
if (UseInterpreter && !CompileTheWorld) { // don't use counts with -Xcomp or CTW
if (!callee_method->has_compiled_code() && !callee_method->was_executed_more_than(0)) return "never executed";
if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return "executed < MinInliningThreshold times";
// don't use counts with -Xcomp or CTW
if (UseInterpreter && !CompileTheWorld) {
if (!callee_method->has_compiled_code() &&
!callee_method->was_executed_more_than(0)) {
return "never executed";
}
if (is_init_with_ea(callee_method, caller_method, C)) {
// Escape Analysis: inline all executed constructors
} else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
CompileThreshold >> 1))) {
return "executed < MinInliningThreshold times";
}
}
if (callee_method->should_not_inline()) {
......@@ -219,8 +251,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* w
//-----------------------------try_to_inline-----------------------------------
// return NULL if ok, reason for not inlining otherwise
// Relocated from "InliningClosure::try_to_inline"
const char* InlineTree::try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) {
ciMethod* caller_method = method();
const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) {
// Old algorithm had funny accumulating BC-size counters
if (UseOldInlining && ClipInlining
......@@ -229,25 +260,47 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, int caller_bci, c
}
const char *msg = NULL;
if ((msg = shouldInline(callee_method, caller_bci, profile, wci_result)) != NULL) return msg;
if ((msg = shouldNotInline(callee_method, wci_result)) != NULL) return msg;
if ((msg = shouldInline(callee_method, caller_method, caller_bci,
profile, wci_result)) != NULL) {
return msg;
}
if ((msg = shouldNotInline(callee_method, caller_method,
wci_result)) != NULL) {
return msg;
}
bool is_accessor = InlineAccessors && callee_method->is_accessor();
// suppress a few checks for accessors and trivial methods
if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
// don't inline into giant methods
if (C->unique() > (uint)NodeCountInliningCutoff) return "NodeCountInliningCutoff";
if (C->unique() > (uint)NodeCountInliningCutoff) {
return "NodeCountInliningCutoff";
}
// don't inline unreached call sites
if (profile.count() == 0) return "call site not reached";
}
if ((!UseInterpreter || CompileTheWorld) &&
is_init_with_ea(callee_method, caller_method, C)) {
if (!C->do_inlining() && InlineAccessors && !is_accessor) return "not an accessor";
// Escape Analysis stress testing when running Xcomp or CTW:
// inline constructors even if they are not reached.
if( inline_depth() > MaxInlineLevel ) return "inlining too deep";
} else if (profile.count() == 0) {
// don't inline unreached call sites
return "call site not reached";
}
}
if (!C->do_inlining() && InlineAccessors && !is_accessor) {
return "not an accessor";
}
if( inline_depth() > MaxInlineLevel ) {
return "inlining too deep";
}
if( method() == callee_method &&
inline_depth() > MaxRecursiveInlineLevel ) return "recursively inlining too deep";
inline_depth() > MaxRecursiveInlineLevel ) {
return "recursively inlining too deep";
}
int size = callee_method->code_size();
......@@ -336,7 +389,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
// Check if inlining policy says no.
WarmCallInfo wci = *(initial_wci);
failure_msg = try_to_inline(callee_method, caller_bci, profile, &wci);
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
if (failure_msg != NULL && C->log() != NULL) {
C->log()->begin_elem("inline_fail reason='");
C->log()->text("%s", failure_msg);
......
......@@ -54,9 +54,9 @@ protected:
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
JVMState* caller_jvms,
int caller_bci);
const char* try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
const char* shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
const char* shouldNotInline(ciMethod* callee_method, WarmCallInfo* wci_result) const;
const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN;
InlineTree *caller_tree() const { return _caller_tree; }
......
......@@ -648,79 +648,9 @@ ConNode* PhaseTransform::zerocon(BasicType bt) {
//=============================================================================
//------------------------------transform--------------------------------------
// Return a node which computes the same function as this node, but in a
// faster or cheaper fashion. The Node passed in here must have no other
// pointers to it, as its storage will be reclaimed if the Node can be
// optimized away.
// faster or cheaper fashion.
Node *PhaseGVN::transform( Node *n ) {
NOT_PRODUCT( set_transforms(); )
// Apply the Ideal call in a loop until it no longer applies
Node *k = n;
NOT_PRODUCT( uint loop_count = 0; )
while( 1 ) {
Node *i = k->Ideal(this, /*can_reshape=*/false);
if( !i ) break;
assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
// Can never reclaim storage for Ideal calls, because the Ideal call
// returns a new Node, bumping the High Water Mark and our old Node
// is caught behind the new one.
//if( k != i ) {
//k->destruct(); // Reclaim storage for recent node
k = i;
//}
assert(loop_count++ < K, "infinite loop in PhaseGVN::transform");
}
NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } )
// If brand new node, make space in type array.
ensure_type_or_null(k);
// Cache result of Value call since it can be expensive
// (abstract interpretation of node 'k' using phase->_types[ inputs ])
const Type *t = k->Value(this); // Get runtime Value set
assert(t != NULL, "value sanity");
if (type_or_null(k) != t) {
#ifndef PRODUCT
// Do not record transformation or value construction on first visit
if (type_or_null(k) == NULL) {
inc_new_values();
set_progress();
}
#endif
set_type(k, t);
// If k is a TypeNode, capture any more-precise type permanently into Node
k->raise_bottom_type(t);
}
if( t->singleton() && !k->is_Con() ) {
//k->destruct(); // Reclaim storage for recent node
NOT_PRODUCT( set_progress(); )
return makecon(t); // Turn into a constant
}
// Now check for Identities
Node *i = k->Identity(this); // Look for a nearby replacement
if( i != k ) { // Found? Return replacement!
//k->destruct(); // Reclaim storage for recent node
NOT_PRODUCT( set_progress(); )
return i;
}
// Try Global Value Numbering
i = hash_find_insert(k); // Found older value when i != NULL
if( i && i != k ) { // Hit? Return the old guy
NOT_PRODUCT( set_progress(); )
return i;
}
// Collect points-to information for escape analysys
ConnectionGraph *cgr = C->congraph();
if (cgr != NULL) {
cgr->record_escape(k, this);
}
// Return Idealized original
return k;
return transform_no_reclaim(n);
}
//------------------------------transform--------------------------------------
......@@ -784,6 +714,12 @@ Node *PhaseGVN::transform_no_reclaim( Node *n ) {
return i;
}
// Collect points-to information for escape analysys
ConnectionGraph *cgr = C->congraph();
if (cgr != NULL) {
cgr->record_escape(k, this);
}
// Return Idealized original
return k;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册