提交 c613356d 编写于 作者: K kvn

Merge

...@@ -98,10 +98,20 @@ BasicObjectLock* frame::interpreter_frame_monitor_end() const { ...@@ -98,10 +98,20 @@ BasicObjectLock* frame::interpreter_frame_monitor_end() const {
#endif // CC_INTERP #endif // CC_INTERP
void frame::patch_pc(Thread* thread, address pc) { void frame::patch_pc(Thread* thread, address pc) {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it. if (pc != NULL) {
assert(pc == NULL, "should be"); _cb = CodeCache::find_blob(pc);
get_interpreterState()->set_thread((JavaThread *) thread); SharkFrame* sharkframe = zeroframe()->as_shark_frame();
sharkframe->set_pc(pc);
_pc = pc;
_deopt_state = is_deoptimized;
} else {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it.
assert(pc == NULL, "should be");
get_interpreterState()->set_thread((JavaThread *) thread);
}
} }
bool frame::safe_for_sender(JavaThread *thread) { bool frame::safe_for_sender(JavaThread *thread) {
......
...@@ -45,27 +45,36 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) { ...@@ -45,27 +45,36 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
case ZeroFrame::ENTRY_FRAME: case ZeroFrame::ENTRY_FRAME:
_pc = StubRoutines::call_stub_return_pc(); _pc = StubRoutines::call_stub_return_pc();
_cb = NULL; _cb = NULL;
_deopt_state = not_deoptimized;
break; break;
case ZeroFrame::INTERPRETER_FRAME: case ZeroFrame::INTERPRETER_FRAME:
_pc = NULL; _pc = NULL;
_cb = NULL; _cb = NULL;
_deopt_state = not_deoptimized;
break; break;
case ZeroFrame::SHARK_FRAME: case ZeroFrame::SHARK_FRAME: {
_pc = zero_sharkframe()->pc(); _pc = zero_sharkframe()->pc();
_cb = CodeCache::find_blob_unsafe(pc()); _cb = CodeCache::find_blob_unsafe(pc());
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
break; break;
}
case ZeroFrame::FAKE_STUB_FRAME: case ZeroFrame::FAKE_STUB_FRAME:
_pc = NULL; _pc = NULL;
_cb = NULL; _cb = NULL;
_deopt_state = not_deoptimized;
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
_deopt_state = not_deoptimized;
} }
// Accessors // Accessors
......
...@@ -68,6 +68,10 @@ class SharkFrame : public ZeroFrame { ...@@ -68,6 +68,10 @@ class SharkFrame : public ZeroFrame {
return (address) value_of_word(pc_off); return (address) value_of_word(pc_off);
} }
void set_pc(address pc) const {
*((address*) addr_of_word(pc_off)) = pc;
}
intptr_t* unextended_sp() const { intptr_t* unextended_sp() const {
return (intptr_t *) value_of_word(unextended_sp_off); return (intptr_t *) value_of_word(unextended_sp_off);
} }
......
...@@ -3223,7 +3223,12 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co ...@@ -3223,7 +3223,12 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
} }
if (try_inline_full(callee, holder_known, bc, receiver)) if (try_inline_full(callee, holder_known, bc, receiver))
return true; return true;
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
// Entire compilation could fail during try_inline_full call.
// In that case printing inlining decision info is useless.
if (!bailed_out())
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
return false; return false;
} }
...@@ -3753,7 +3758,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode ...@@ -3753,7 +3758,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
push_scope(callee, cont); push_scope(callee, cont);
// the BlockListBuilder for the callee could have bailed out // the BlockListBuilder for the callee could have bailed out
CHECK_BAILOUT_(false); if (bailed_out())
return false;
// Temporarily set up bytecode stream so we can append instructions // Temporarily set up bytecode stream so we can append instructions
// (only using the bci of this stream) // (only using the bci of this stream)
...@@ -3819,7 +3825,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode ...@@ -3819,7 +3825,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
iterate_all_blocks(callee_start_block == NULL); iterate_all_blocks(callee_start_block == NULL);
// If we bailed out during parsing, return immediately (this is bad news) // If we bailed out during parsing, return immediately (this is bad news)
if (bailed_out()) return false; if (bailed_out())
return false;
// iterate_all_blocks theoretically traverses in random order; in // iterate_all_blocks theoretically traverses in random order; in
// practice, we have only traversed the continuation if we are // practice, we have only traversed the continuation if we are
...@@ -3828,9 +3835,6 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode ...@@ -3828,9 +3835,6 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
!continuation()->is_set(BlockBegin::was_visited_flag), !continuation()->is_set(BlockBegin::was_visited_flag),
"continuation should not have been parsed yet if we created it"); "continuation should not have been parsed yet if we created it");
// If we bailed out during parsing, return immediately (this is bad news)
CHECK_BAILOUT_(false);
// At this point we are almost ready to return and resume parsing of // At this point we are almost ready to return and resume parsing of
// the caller back in the GraphBuilder. The only thing we want to do // the caller back in the GraphBuilder. The only thing we want to do
// first is an optimization: during parsing of the callee we // first is an optimization: during parsing of the callee we
...@@ -4171,7 +4175,10 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes ...@@ -4171,7 +4175,10 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
else else
log->inline_success("receiver is statically known"); log->inline_success("receiver is statically known");
} else { } else {
log->inline_fail(msg); if (msg != NULL)
log->inline_fail(msg);
else
log->inline_fail("reason unknown");
} }
} }
......
...@@ -59,6 +59,19 @@ bool ciType::is_subtype_of(ciType* type) { ...@@ -59,6 +59,19 @@ bool ciType::is_subtype_of(ciType* type) {
return false; return false;
} }
// ------------------------------------------------------------------
// ciType::name
//
// Return the name of this type
const char* ciType::name() {
if (is_primitive_type()) {
return type2name(basic_type());
} else {
assert(is_klass(), "must be");
return as_klass()->name()->as_utf8();
}
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciType::print_impl // ciType::print_impl
// //
...@@ -73,7 +86,8 @@ void ciType::print_impl(outputStream* st) { ...@@ -73,7 +86,8 @@ void ciType::print_impl(outputStream* st) {
// //
// Print the name of this type // Print the name of this type
void ciType::print_name_on(outputStream* st) { void ciType::print_name_on(outputStream* st) {
st->print(type2name(basic_type())); ResourceMark rm;
st->print(name());
} }
......
...@@ -77,6 +77,7 @@ public: ...@@ -77,6 +77,7 @@ public:
bool is_type() const { return true; } bool is_type() const { return true; }
bool is_classless() const { return is_primitive_type(); } bool is_classless() const { return is_primitive_type(); }
const char* name();
virtual void print_name_on(outputStream* st); virtual void print_name_on(outputStream* st);
void print_name() { void print_name() {
print_name_on(tty); print_name_on(tty);
......
...@@ -50,6 +50,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> { ...@@ -50,6 +50,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
// Missing feature tests // Missing feature tests
virtual bool supports_native() { return true; } virtual bool supports_native() { return true; }
virtual bool supports_osr () { return true; } virtual bool supports_osr () { return true; }
virtual bool can_compile_method(methodHandle method) { return true; }
#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)) #if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
virtual bool is_c1 () { return false; } virtual bool is_c1 () { return false; }
virtual bool is_c2 () { return false; } virtual bool is_c2 () { return false; }
......
...@@ -1218,7 +1218,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, ...@@ -1218,7 +1218,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// lock, make sure that the compilation // lock, make sure that the compilation
// isn't prohibited in a straightforward way. // isn't prohibited in a straightforward way.
if (compiler(comp_level) == NULL || compilation_is_prohibited(method, osr_bci, comp_level)) { if (compiler(comp_level) == NULL || !compiler(comp_level)->can_compile_method(method) || compilation_is_prohibited(method, osr_bci, comp_level)) {
return NULL; return NULL;
} }
......
...@@ -692,7 +692,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr ...@@ -692,7 +692,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size); PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn); set_initial_gvn(&gvn);
if (PrintInlining) { if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer()); _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
} }
{ // Scope for timing the parser { // Scope for timing the parser
...@@ -2049,7 +2049,7 @@ void Compile::Optimize() { ...@@ -2049,7 +2049,7 @@ void Compile::Optimize() {
} // (End scope of igvn; run destructor if necessary for asserts.) } // (End scope of igvn; run destructor if necessary for asserts.)
dump_inlining(); dump_inlining();
// A method with only infinite loops has no edges entering loops from root // A method with only infinite loops has no edges entering loops from root
{ {
NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); ) NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
...@@ -3497,7 +3497,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n ...@@ -3497,7 +3497,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
} }
void Compile::dump_inlining() { void Compile::dump_inlining() {
if (PrintInlining) { if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
// Print inlining message for candidates that we couldn't inline // Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver // for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) { for (int i = 0; i < _late_inlines.length(); i++) {
......
...@@ -553,7 +553,13 @@ void Parse::do_call() { ...@@ -553,7 +553,13 @@ void Parse::do_call() {
rtype = ctype; rtype = ctype;
} }
} else { } else {
assert(rtype == ctype, "mismatched return types"); // symbolic resolution enforces this // Symbolic resolution enforces the types to be the same.
// NOTE: We must relax the assert for unloaded types because two
// different ciType instances of the same unloaded class type
// can appear to be "loaded" by different loaders (depending on
// the accessing class).
assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
} }
// If the return type of the method is not loaded, assert that the // If the return type of the method is not loaded, assert that the
......
...@@ -3559,7 +3559,6 @@ bool LibraryCallKit::inline_native_getLength() { ...@@ -3559,7 +3559,6 @@ bool LibraryCallKit::inline_native_getLength() {
// public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType); // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType); // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
return false;
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
// Get the arguments. // Get the arguments.
......
...@@ -1032,7 +1032,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) { ...@@ -1032,7 +1032,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
check_null(value); check_null(value);
object = value->generic_value(); object = value->generic_value();
} }
if (is_get && field->is_constant()) { if (is_get && field->is_constant() && field->is_static()) {
SharkConstant *constant = SharkConstant::for_field(iter()); SharkConstant *constant = SharkConstant::for_field(iter());
if (constant->is_loaded()) if (constant->is_loaded())
value = constant->value(builder()); value = constant->value(builder());
...@@ -1044,10 +1044,17 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) { ...@@ -1044,10 +1044,17 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
BasicType basic_type = field->type()->basic_type(); BasicType basic_type = field->type()->basic_type();
Type *stack_type = SharkType::to_stackType(basic_type); Type *stack_type = SharkType::to_stackType(basic_type);
Type *field_type = SharkType::to_arrayType(basic_type); Type *field_type = SharkType::to_arrayType(basic_type);
Type *type = field_type;
if (field->is_volatile()) {
if (field_type == SharkType::jfloat_type()) {
type = SharkType::jint_type();
} else if (field_type == SharkType::jdouble_type()) {
type = SharkType::jlong_type();
}
}
Value *addr = builder()->CreateAddressOfStructEntry( Value *addr = builder()->CreateAddressOfStructEntry(
object, in_ByteSize(field->offset_in_bytes()), object, in_ByteSize(field->offset_in_bytes()),
PointerType::getUnqual(field_type), PointerType::getUnqual(type),
"addr"); "addr");
// Do the access // Do the access
...@@ -1055,6 +1062,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) { ...@@ -1055,6 +1062,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
Value* field_value; Value* field_value;
if (field->is_volatile()) { if (field->is_volatile()) {
field_value = builder()->CreateAtomicLoad(addr); field_value = builder()->CreateAtomicLoad(addr);
field_value = builder()->CreateBitCast(field_value, field_type);
} else { } else {
field_value = builder()->CreateLoad(addr); field_value = builder()->CreateLoad(addr);
} }
...@@ -1074,6 +1082,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) { ...@@ -1074,6 +1082,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
} }
if (field->is_volatile()) { if (field->is_volatile()) {
field_value = builder()->CreateBitCast(field_value, type);
builder()->CreateAtomicStore(field_value, addr); builder()->CreateAtomicStore(field_value, addr);
} else { } else {
builder()->CreateStore(field_value, addr); builder()->CreateStore(field_value, addr);
......
...@@ -185,6 +185,9 @@ void SharkCompiler::compile_method(ciEnv* env, ...@@ -185,6 +185,9 @@ void SharkCompiler::compile_method(ciEnv* env,
// Build the LLVM IR for the method // Build the LLVM IR for the method
Function *function = SharkFunction::build(env, &builder, flow, name); Function *function = SharkFunction::build(env, &builder, flow, name);
if (env->failing()) {
return;
}
// Generate native code. It's unpleasant that we have to drop into // Generate native code. It's unpleasant that we have to drop into
// the VM to do this -- it blocks safepoints -- but I can't see any // the VM to do this -- it blocks safepoints -- but I can't see any
......
...@@ -46,6 +46,9 @@ class SharkCompiler : public AbstractCompiler { ...@@ -46,6 +46,9 @@ class SharkCompiler : public AbstractCompiler {
// Missing feature tests // Missing feature tests
bool supports_native() { return true; } bool supports_native() { return true; }
bool supports_osr() { return true; } bool supports_osr() { return true; }
bool can_compile_method(methodHandle method) {
return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
}
// Customization // Customization
bool needs_adapters() { return false; } bool needs_adapters() { return false; }
......
...@@ -37,7 +37,12 @@ SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) { ...@@ -37,7 +37,12 @@ SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) {
ciType *type = NULL; ciType *type = NULL;
if (constant.basic_type() == T_OBJECT) { if (constant.basic_type() == T_OBJECT) {
ciEnv *env = ciEnv::current(); ciEnv *env = ciEnv::current();
assert(constant.as_object()->klass() == env->String_klass() || constant.as_object()->klass() == env->Class_klass(), "should be");
assert(constant.as_object()->klass() == env->String_klass()
|| constant.as_object()->klass() == env->Class_klass()
|| constant.as_object()->klass()->is_subtype_of(env->MethodType_klass())
|| constant.as_object()->klass()->is_subtype_of(env->MethodHandle_klass()), "should be");
type = constant.as_object()->klass(); type = constant.as_object()->klass();
} }
return new SharkConstant(constant, type); return new SharkConstant(constant, type);
......
...@@ -77,6 +77,10 @@ void SharkFunction::initialize(const char *name) { ...@@ -77,6 +77,10 @@ void SharkFunction::initialize(const char *name) {
// Walk the tree from the start block to determine which // Walk the tree from the start block to determine which
// blocks are entered and which blocks require phis // blocks are entered and which blocks require phis
SharkTopLevelBlock *start_block = block(flow()->start_block_num()); SharkTopLevelBlock *start_block = block(flow()->start_block_num());
if (is_osr() && start_block->stack_depth_at_entry() != 0) {
env()->record_method_not_compilable("can't compile OSR block with incoming stack-depth > 0");
return;
}
assert(start_block->start() == flow()->start_bci(), "blocks out of order"); assert(start_block->start() == flow()->start_bci(), "blocks out of order");
start_block->enter(); start_block->enter();
......
...@@ -725,7 +725,7 @@ bool SharkInlinerHelper::do_field_access(bool is_get, bool is_field) { ...@@ -725,7 +725,7 @@ bool SharkInlinerHelper::do_field_access(bool is_get, bool is_field) {
// Push the result if necessary // Push the result if necessary
if (is_get) { if (is_get) {
bool result_pushed = false; bool result_pushed = false;
if (field->is_constant()) { if (field->is_constant() && field->is_static()) {
SharkConstant *sc = SharkConstant::for_field(iter()); SharkConstant *sc = SharkConstant::for_field(iter());
if (sc->is_loaded()) { if (sc->is_loaded()) {
push(sc->is_nonzero()); push(sc->is_nonzero());
......
...@@ -68,7 +68,7 @@ class SharkCompileInvariants : public ResourceObj { ...@@ -68,7 +68,7 @@ class SharkCompileInvariants : public ResourceObj {
// //
// Accessing this directly is kind of ugly, so it's private. Add // Accessing this directly is kind of ugly, so it's private. Add
// new accessors below if you need something from it. // new accessors below if you need something from it.
private: protected:
ciEnv* env() const { ciEnv* env() const {
assert(_env != NULL, "env not available"); assert(_env != NULL, "env not available");
return _env; return _env;
...@@ -99,13 +99,15 @@ class SharkCompileInvariants : public ResourceObj { ...@@ -99,13 +99,15 @@ class SharkCompileInvariants : public ResourceObj {
DebugInformationRecorder* debug_info() const { DebugInformationRecorder* debug_info() const {
return env()->debug_info(); return env()->debug_info();
} }
Dependencies* dependencies() const {
return env()->dependencies();
}
SharkCodeBuffer* code_buffer() const { SharkCodeBuffer* code_buffer() const {
return builder()->code_buffer(); return builder()->code_buffer();
} }
public:
Dependencies* dependencies() const {
return env()->dependencies();
}
// Commonly used classes // Commonly used classes
protected: protected:
ciInstanceKlass* java_lang_Object_klass() const { ciInstanceKlass* java_lang_Object_klass() const {
......
...@@ -113,7 +113,19 @@ void SharkTopLevelBlock::scan_for_traps() { ...@@ -113,7 +113,19 @@ void SharkTopLevelBlock::scan_for_traps() {
ciSignature* sig; ciSignature* sig;
method = iter()->get_method(will_link, &sig); method = iter()->get_method(will_link, &sig);
assert(will_link, "typeflow responsibility"); assert(will_link, "typeflow responsibility");
// We can't compile calls to method handle intrinsics, because we use
// the interpreter entry points and they expect the top frame to be an
// interpreter frame. We need to implement the intrinsics for Shark.
if (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form()) {
if (SharkPerformanceWarnings) {
warning("JSR292 optimization not yet implemented in Shark");
}
set_trap(
Deoptimization::make_trap_request(
Deoptimization::Reason_unhandled,
Deoptimization::Action_make_not_compilable), bci());
return;
}
if (!method->holder()->is_linked()) { if (!method->holder()->is_linked()) {
set_trap( set_trap(
Deoptimization::make_trap_request( Deoptimization::make_trap_request(
...@@ -158,6 +170,16 @@ void SharkTopLevelBlock::scan_for_traps() { ...@@ -158,6 +170,16 @@ void SharkTopLevelBlock::scan_for_traps() {
return; return;
} }
break; break;
case Bytecodes::_invokedynamic:
case Bytecodes::_invokehandle:
if (SharkPerformanceWarnings) {
warning("JSR292 optimization not yet implemented in Shark");
}
set_trap(
Deoptimization::make_trap_request(
Deoptimization::Reason_unhandled,
Deoptimization::Action_make_not_compilable), bci());
return;
} }
} }
...@@ -1030,7 +1052,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller, ...@@ -1030,7 +1052,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
dest_method->holder() == java_lang_Object_klass()) dest_method->holder() == java_lang_Object_klass())
return dest_method; return dest_method;
#ifdef SHARK_CAN_DEOPTIMIZE_ANYWHERE
// This code can replace a virtual call with a direct call if this // This code can replace a virtual call with a direct call if this
// class is the only one in the entire set of loaded classes that // class is the only one in the entire set of loaded classes that
// implements this method. This makes the compiled code dependent // implements this method. This makes the compiled code dependent
...@@ -1064,6 +1085,8 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller, ...@@ -1064,6 +1085,8 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
if (monomorphic_target != NULL) { if (monomorphic_target != NULL) {
assert(!monomorphic_target->is_abstract(), "shouldn't be"); assert(!monomorphic_target->is_abstract(), "shouldn't be");
function()->dependencies()->assert_unique_concrete_method(actual_receiver, monomorphic_target);
// Opto has a bunch of type checking here that I don't // Opto has a bunch of type checking here that I don't
// understand. It's to inhibit casting in one direction, // understand. It's to inhibit casting in one direction,
// possibly because objects in Opto can have inexact // possibly because objects in Opto can have inexact
...@@ -1097,7 +1120,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller, ...@@ -1097,7 +1120,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
// with non-monomorphic targets if the receiver has an exact // with non-monomorphic targets if the receiver has an exact
// type. We don't mark types this way, so we can't do this. // type. We don't mark types this way, so we can't do this.
#endif // SHARK_CAN_DEOPTIMIZE_ANYWHERE
return NULL; return NULL;
} }
...@@ -1298,8 +1320,9 @@ void SharkTopLevelBlock::do_call() { ...@@ -1298,8 +1320,9 @@ void SharkTopLevelBlock::do_call() {
// Try to inline the call // Try to inline the call
if (!call_is_virtual) { if (!call_is_virtual) {
if (SharkInliner::attempt_inline(call_method, current_state())) if (SharkInliner::attempt_inline(call_method, current_state())) {
return; return;
}
} }
// Find the method we are calling // Find the method we are calling
......
/* /*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -23,7 +23,16 @@ ...@@ -23,7 +23,16 @@
*/ */
/* /*
* Manual test * @test
* @bug 7190310
* @summary Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
* @run main/othervm/timeout=600 -Xbatch Test7190310
*/
/*
* Note bug exhibits as infinite loop, timeout is helpful.
* It should normally finish pretty quickly, but on some especially slow machines
* it may not. The companion _unsafe test lacks a timeout, but that is okay.
*/ */
import java.lang.ref.*; import java.lang.ref.*;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册