提交 7689f3c9 编写于 作者: T twisti

7079673: JSR 292: C1 should inline bytecoded method handle adapters

Reviewed-by: never
上级 2ceba375
......@@ -28,8 +28,10 @@
#include "c1/c1_Compilation.hpp"
#include "c1/c1_GraphBuilder.hpp"
#include "c1/c1_InstructionPrinter.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciField.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMethodHandle.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp"
#include "runtime/sharedRuntime.hpp"
......@@ -1424,7 +1426,7 @@ void GraphBuilder::method_return(Value x) {
// See whether this is the first return; if so, store off some
// of the state for later examination
if (num_returns() == 0) {
set_inline_cleanup_info(_block, _last, state());
set_inline_cleanup_info();
}
// The current bci() is in the wrong scope, so use the bci() of
......@@ -1582,6 +1584,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = Bytecodes::_invokespecial;
}
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
// NEEDS_CLEANUP
// I've added the target-is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
......@@ -1693,26 +1697,31 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
&& target->will_link(klass, callee_holder, code)) {
// callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic
|| code == Bytecodes::_invokespecial
|| code == Bytecodes::_invokevirtual && target->is_final_method()
) {
// static binding => check if callee is ok
ciMethod* inline_target = (cha_monomorphic_target != NULL)
? cha_monomorphic_target
: target;
bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
if (code == Bytecodes::_invokestatic ||
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual && target->is_final_method() ||
code == Bytecodes::_invokedynamic) {
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
bool success = false;
if (target->is_method_handle_invoke()) {
// method handle invokes
success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
}
if (!success) {
// static binding => check if callee is ok
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
}
CHECK_BAILOUT();
#ifndef PRODUCT
// printing
if (PrintInlining && !res) {
if (PrintInlining && !success) {
// if it was successfully inlined, then it was already printed.
print_inline_result(inline_target, res);
print_inline_result(inline_target, success);
}
#endif
clear_inline_bailout();
if (res) {
if (success) {
// Register dependence if JVMTI has either breakpoint
// setting or hotswapping of methods capabilities since they may
// cause deoptimization.
......@@ -1740,7 +1749,6 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
ValueType* result_type = as_ValueType(target->return_type());
// We require the debug info to be the "state before" because
......@@ -3038,7 +3046,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
INLINE_BAILOUT("disallowed by CompilerOracle")
} else if (!callee->can_be_compiled()) {
// callee is not compilable (prob. has breakpoints)
INLINE_BAILOUT("not compilable")
INLINE_BAILOUT("not compilable (disabled)")
} else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
// intrinsics can be native or not
return true;
......@@ -3397,7 +3405,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
}
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
assert(!callee->is_native(), "callee must not be native");
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("inlining prohibited by policy");
......@@ -3468,7 +3476,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// Insert null check if necessary
Value recv = NULL;
if (code() != Bytecodes::_invokestatic) {
if (code() != Bytecodes::_invokestatic &&
code() != Bytecodes::_invokedynamic) {
// note: null check must happen even if first instruction of callee does
// an implicit null check since the callee is in a different scope
// and we must make sure exception handling does the right thing
......@@ -3496,7 +3505,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// fall-through of control flow, all return instructions of the
// callee will need to be replaced by Goto's pointing to this
// continuation point.
BlockBegin* cont = block_at(next_bci());
BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci());
bool continuation_existed = true;
if (cont == NULL) {
cont = new BlockBegin(next_bci());
......@@ -3608,27 +3617,29 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// block merging. This allows load elimination and CSE to take place
// across multiple callee scopes if they are relatively simple, and
// is currently essential to making inlining profitable.
if ( num_returns() == 1
&& block() == orig_block
&& block() == inline_cleanup_block()) {
_last = inline_cleanup_return_prev();
_state = inline_cleanup_state();
} else if (continuation_preds == cont->number_of_preds()) {
// Inlining caused that the instructions after the invoke in the
// caller are not reachable any more. So skip filling this block
// with instructions!
assert (cont == continuation(), "");
assert(_last && _last->as_BlockEnd(), "");
_skip_block = true;
} else {
// Resume parsing in continuation block unless it was already parsed.
// Note that if we don't change _last here, iteration in
// iterate_bytecodes_for_block will stop when we return.
if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
// add continuation to work list instead of parsing it immediately
if (cont_block == NULL) {
if (num_returns() == 1
&& block() == orig_block
&& block() == inline_cleanup_block()) {
_last = inline_cleanup_return_prev();
_state = inline_cleanup_state();
} else if (continuation_preds == cont->number_of_preds()) {
// Inlining caused that the instructions after the invoke in the
// caller are not reachable any more. So skip filling this block
// with instructions!
assert(cont == continuation(), "");
assert(_last && _last->as_BlockEnd(), "");
scope_data()->parent()->add_to_work_list(continuation());
_skip_block = true;
} else {
// Resume parsing in continuation block unless it was already parsed.
// Note that if we don't change _last here, iteration in
// iterate_bytecodes_for_block will stop when we return.
if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
// add continuation to work list instead of parsing it immediately
assert(_last && _last->as_BlockEnd(), "");
scope_data()->parent()->add_to_work_list(continuation());
_skip_block = true;
}
}
}
......@@ -3645,6 +3656,120 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
}
bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
assert(!callee->is_static(), "change next line");
int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1);
Value receiver = state()->stack_at(index);
if (receiver->type()->is_constant()) {
ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(method());
// Get an adapter for the MethodHandle.
ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter();
if (method_handle_adapter != NULL) {
return try_inline(method_handle_adapter, /*holder_known=*/ true);
}
} else if (receiver->as_CheckCast()) {
// Match MethodHandle.selectAlternative idiom
Phi* phi = receiver->as_CheckCast()->obj()->as_Phi();
if (phi != NULL && phi->operand_count() == 2) {
// Get the two MethodHandle inputs from the Phi.
Value op1 = phi->operand_at(0);
Value op2 = phi->operand_at(1);
ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
mh1->set_callee(callee);
mh1->set_caller(method());
mh2->set_callee(callee);
mh2->set_caller(method());
// Get adapters for the MethodHandles.
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
if (mh1_adapter != NULL && mh2_adapter != NULL) {
set_inline_cleanup_info();
// Build the If guard
BlockBegin* one = new BlockBegin(next_bci());
BlockBegin* two = new BlockBegin(next_bci());
BlockBegin* end = new BlockBegin(next_bci());
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
block()->set_end(iff->as_BlockEnd());
// Connect up the states
one->merge(block()->end()->state());
two->merge(block()->end()->state());
// Save the state for the second inlinee
ValueStack* state_before = copy_state_before();
// Parse first adapter
_last = _block = one;
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
// Parse second adapter
_last = _block = two;
_state = state_before;
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
connect_to_end(end);
return true;
}
}
}
return false;
}
bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
// Get the MethodHandle from the CallSite.
ciCallSite* call_site = stream()->get_call_site();
ciMethodHandle* method_handle = call_site->get_target();
// Inline constant and mutable call sites. We don't inline
// volatile call sites optimistically since they are specified
// to change their value often and that would result in a lot of
// deoptimizations and recompiles.
if (call_site->is_constant_call_site() || call_site->is_mutable_call_site()) {
// Set the callee to have access to the class and signature in the
// MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(method());
// Get an adapter for the MethodHandle.
ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter();
if (method_handle_adapter != NULL) {
if (try_inline(method_handle_adapter, /*holder_known=*/ true)) {
// Add a dependence for invalidation of the optimization.
if (!call_site->is_constant_call_site()) {
dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
}
return true;
}
}
}
return false;
}
void GraphBuilder::inline_bailout(const char* msg) {
assert(msg != NULL, "inline bailout msg must exist");
_inline_bailout_msg = msg;
......
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -315,9 +315,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
return_prev,
return_state); }
void set_inline_cleanup_info() {
set_inline_cleanup_info(_block, _last, _state);
}
BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); }
Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); }
ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); }
void restore_inline_cleanup_info() {
_block = inline_cleanup_block();
_last = inline_cleanup_return_prev();
_state = inline_cleanup_state();
}
void incr_num_returns() { scope_data()->incr_num_returns(); }
int num_returns() const { return scope_data()->num_returns(); }
intx max_inline_size() const { return scope_data()->max_inline_size(); }
......@@ -329,11 +337,15 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
// inliners
bool try_inline(ciMethod* callee, bool holder_known);
bool try_inline( ciMethod* callee, bool holder_known);
bool try_inline_intrinsics(ciMethod* callee);
bool try_inline_full (ciMethod* callee, bool holder_known);
bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
bool try_inline_jsr(int jsr_dest_bci);
// JSR 292 support
bool for_method_handle_inline(ciMethod* callee);
bool for_invokedynamic_inline(ciMethod* callee);
// helpers
void inline_bailout(const char* msg);
BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
......
......@@ -514,28 +514,17 @@ Constant::CompareResult Constant::compare(Instruction::Condition cond, Value rig
void BlockBegin::set_end(BlockEnd* end) {
assert(end != NULL, "should not reset block end to NULL");
BlockEnd* old_end = _end;
if (end == old_end) {
if (end == _end) {
return;
}
// Must make the predecessors/successors match up with the
// BlockEnd's notion.
int i, n;
if (old_end != NULL) {
// disconnect from the old end
old_end->set_begin(NULL);
clear_end();
// disconnect this block from it's current successors
for (i = 0; i < _successors.length(); i++) {
_successors.at(i)->remove_predecessor(this);
}
}
// Set the new end
_end = end;
_successors.clear();
// Now reset successors list based on BlockEnd
n = end->number_of_sux();
for (i = 0; i < n; i++) {
for (int i = 0; i < end->number_of_sux(); i++) {
BlockBegin* sux = end->sux_at(i);
_successors.append(sux);
sux->_predecessors.append(this);
......@@ -544,6 +533,22 @@ void BlockBegin::set_end(BlockEnd* end) {
}
void BlockBegin::clear_end() {
// Must make the predecessors/successors match up with the
// BlockEnd's notion.
if (_end != NULL) {
// disconnect from the old end
_end->set_begin(NULL);
// disconnect this block from it's current successors
for (int i = 0; i < _successors.length(); i++) {
_successors.at(i)->remove_predecessor(this);
}
_end = NULL;
}
}
void BlockBegin::disconnect_edge(BlockBegin* from, BlockBegin* to) {
// disconnect any edges between from and to
#ifndef PRODUCT
......
......@@ -1601,6 +1601,7 @@ LEAF(BlockBegin, StateSplit)
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
void set_end(BlockEnd* end);
void clear_end();
void disconnect_from_graph();
static void disconnect_edge(BlockBegin* from, BlockBegin* to);
BlockBegin* insert_block_between(BlockBegin* sux);
......
......@@ -28,6 +28,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
......@@ -2674,6 +2675,17 @@ void java_lang_invoke_CallSite::compute_offsets() {
if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
}
// Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
// (For C2: keep this until we have throttling logic for uncommon traps.)
if (k != NULL) {
instanceKlass* ik = instanceKlass::cast(k);
methodOop m_normal = ik->lookup_method(vmSymbols::setTargetNormal_name(), vmSymbols::setTarget_signature());
methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
guarantee(m_normal && m_volatile, "must exist");
m_normal->set_not_compilable_quietly();
m_volatile->set_not_compilable_quietly();
}
}
oop java_lang_invoke_CallSite::target(oop site) {
......
......@@ -258,6 +258,9 @@
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
template(setTargetNormal_name, "setTargetNormal") \
template(setTargetVolatile_name, "setTargetVolatile") \
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
\
......
......@@ -394,6 +394,16 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
return true;
}
//------------------------------check_can_parse--------------------------------
const char* InlineTree::check_can_parse(ciMethod* callee) {
// Certain methods cannot be parsed at all:
if ( callee->is_native()) return "native method";
if (!callee->can_be_compiled()) return "not compilable (disabled)";
if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)";
if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)";
return NULL;
}
//------------------------------print_inlining---------------------------------
// Really, the failure_msg can be a success message also.
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
......@@ -423,14 +433,22 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
int caller_bci = jvms->bci();
ciMethod *caller_method = jvms->method();
if( !pass_initial_checks(caller_method, caller_bci, callee_method)) {
if( PrintInlining ) {
// Do some initial checks.
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
if (PrintInlining) {
failure_msg = "failed_initial_checks";
print_inlining( callee_method, caller_bci, failure_msg);
print_inlining(callee_method, caller_bci, failure_msg);
}
return NULL;
}
// Do some parse checks.
failure_msg = check_can_parse(callee_method);
if (failure_msg != NULL) {
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
return NULL;
}
// Check if inlining policy says no.
WarmCallInfo wci = *(initial_wci);
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
......@@ -471,7 +489,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
if (failure_msg == NULL) failure_msg = "inline (hot)";
// Inline!
if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
if (UseOldInlining)
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
if (InlineWarmCalls && !wci.is_hot())
......@@ -481,7 +499,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
// Do not inline
if (failure_msg == NULL) failure_msg = "too cold to inline";
if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
return NULL;
}
......
......@@ -61,12 +61,9 @@ public:
{
_is_osr = is_osr;
_expected_uses = expected_uses;
assert(can_parse(method, is_osr), "parse must be possible");
assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
}
// Can we build either an OSR or a regular parser for this method?
static bool can_parse(ciMethod* method, int is_osr = false);
virtual bool is_parse() const { return true; }
virtual JVMState* generate(JVMState* jvms);
int is_osr() { return _is_osr; }
......@@ -303,20 +300,8 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
return kit.transfer_exceptions_into_jvms();
}
bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
// Certain methods cannot be parsed at all:
if (!m->can_be_compiled()) return false;
if (!m->has_balanced_monitors()) return false;
if (m->get_flow_analysis()->failing()) return false;
// (Methods may bail out for other reasons, after the parser is run.
// We try to avoid this, but if forced, we must return (Node*)NULL.
// The user of the CallGenerator must check for this condition.)
return true;
}
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
if (!ParseGenerator::can_parse(m)) return NULL;
if (InlineTree::check_can_parse(m) != NULL) return NULL;
return new ParseGenerator(m, expected_uses);
}
......@@ -324,7 +309,7 @@ CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
// for the method execution already in progress, not just the JVMS
// of the caller. Thus, this CallGenerator cannot be mixed with others!
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
if (!ParseGenerator::can_parse(m, true)) return NULL;
if (InlineTree::check_can_parse(m) != NULL) return NULL;
float past_uses = m->interpreter_invocation_count();
float expected_uses = past_uses;
return new ParseGenerator(m, expected_uses, true);
......
......@@ -78,6 +78,8 @@ protected:
int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
public:
static const char* check_can_parse(ciMethod* callee);
static InlineTree* build_inline_tree_root();
static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册