提交 c613356d 编写于 作者: K kvn

Merge

......@@ -98,10 +98,20 @@ BasicObjectLock* frame::interpreter_frame_monitor_end() const {
#endif // CC_INTERP
void frame::patch_pc(Thread* thread, address pc) {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it.
assert(pc == NULL, "should be");
get_interpreterState()->set_thread((JavaThread *) thread);
if (pc != NULL) {
_cb = CodeCache::find_blob(pc);
SharkFrame* sharkframe = zeroframe()->as_shark_frame();
sharkframe->set_pc(pc);
_pc = pc;
_deopt_state = is_deoptimized;
} else {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it.
assert(pc == NULL, "should be");
get_interpreterState()->set_thread((JavaThread *) thread);
}
}
bool frame::safe_for_sender(JavaThread *thread) {
......
......@@ -45,27 +45,36 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
case ZeroFrame::ENTRY_FRAME:
_pc = StubRoutines::call_stub_return_pc();
_cb = NULL;
_deopt_state = not_deoptimized;
break;
case ZeroFrame::INTERPRETER_FRAME:
_pc = NULL;
_cb = NULL;
_deopt_state = not_deoptimized;
break;
case ZeroFrame::SHARK_FRAME:
case ZeroFrame::SHARK_FRAME: {
_pc = zero_sharkframe()->pc();
_cb = CodeCache::find_blob_unsafe(pc());
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
break;
}
case ZeroFrame::FAKE_STUB_FRAME:
_pc = NULL;
_cb = NULL;
_deopt_state = not_deoptimized;
break;
default:
ShouldNotReachHere();
}
_deopt_state = not_deoptimized;
}
// Accessors
......
......@@ -68,6 +68,10 @@ class SharkFrame : public ZeroFrame {
return (address) value_of_word(pc_off);
}
void set_pc(address pc) const {
*((address*) addr_of_word(pc_off)) = pc;
}
intptr_t* unextended_sp() const {
return (intptr_t *) value_of_word(unextended_sp_off);
}
......
......@@ -3223,7 +3223,12 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
}
if (try_inline_full(callee, holder_known, bc, receiver))
return true;
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
// Entire compilation could fail during try_inline_full call.
// In that case printing inlining decision info is useless.
if (!bailed_out())
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
return false;
}
......@@ -3753,7 +3758,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
push_scope(callee, cont);
// the BlockListBuilder for the callee could have bailed out
CHECK_BAILOUT_(false);
if (bailed_out())
return false;
// Temporarily set up bytecode stream so we can append instructions
// (only using the bci of this stream)
......@@ -3819,7 +3825,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
iterate_all_blocks(callee_start_block == NULL);
// If we bailed out during parsing, return immediately (this is bad news)
if (bailed_out()) return false;
if (bailed_out())
return false;
// iterate_all_blocks theoretically traverses in random order; in
// practice, we have only traversed the continuation if we are
......@@ -3828,9 +3835,6 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
!continuation()->is_set(BlockBegin::was_visited_flag),
"continuation should not have been parsed yet if we created it");
// If we bailed out during parsing, return immediately (this is bad news)
CHECK_BAILOUT_(false);
// At this point we are almost ready to return and resume parsing of
// the caller back in the GraphBuilder. The only thing we want to do
// first is an optimization: during parsing of the callee we
......@@ -4171,7 +4175,10 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
else
log->inline_success("receiver is statically known");
} else {
log->inline_fail(msg);
if (msg != NULL)
log->inline_fail(msg);
else
log->inline_fail("reason unknown");
}
}
......
......@@ -59,6 +59,19 @@ bool ciType::is_subtype_of(ciType* type) {
return false;
}
// ------------------------------------------------------------------
// ciType::name
//
// Return the name of this type
const char* ciType::name() {
if (is_primitive_type()) {
return type2name(basic_type());
} else {
assert(is_klass(), "must be");
return as_klass()->name()->as_utf8();
}
}
// ------------------------------------------------------------------
// ciType::print_impl
//
......@@ -73,7 +86,8 @@ void ciType::print_impl(outputStream* st) {
//
// Print the name of this type
void ciType::print_name_on(outputStream* st) {
st->print(type2name(basic_type()));
ResourceMark rm;
st->print(name());
}
......
......@@ -77,6 +77,7 @@ public:
bool is_type() const { return true; }
bool is_classless() const { return is_primitive_type(); }
const char* name();
virtual void print_name_on(outputStream* st);
void print_name() {
print_name_on(tty);
......
......@@ -50,6 +50,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
// Missing feature tests
virtual bool supports_native() { return true; }
virtual bool supports_osr () { return true; }
virtual bool can_compile_method(methodHandle method) { return true; }
#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
virtual bool is_c1 () { return false; }
virtual bool is_c2 () { return false; }
......
......@@ -1218,7 +1218,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// lock, make sure that the compilation
// isn't prohibited in a straightforward way.
if (compiler(comp_level) == NULL || compilation_is_prohibited(method, osr_bci, comp_level)) {
if (compiler(comp_level) == NULL || !compiler(comp_level)->can_compile_method(method) || compilation_is_prohibited(method, osr_bci, comp_level)) {
return NULL;
}
......
......@@ -692,7 +692,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
if (PrintInlining) {
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
......@@ -2049,7 +2049,7 @@ void Compile::Optimize() {
} // (End scope of igvn; run destructor if necessary for asserts.)
dump_inlining();
dump_inlining();
// A method with only infinite loops has no edges entering loops from root
{
NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
......@@ -3497,7 +3497,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
}
void Compile::dump_inlining() {
if (PrintInlining) {
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
......
......@@ -553,7 +553,13 @@ void Parse::do_call() {
rtype = ctype;
}
} else {
assert(rtype == ctype, "mismatched return types"); // symbolic resolution enforces this
// Symbolic resolution enforces the types to be the same.
// NOTE: We must relax the assert for unloaded types because two
// different ciType instances of the same unloaded class type
// can appear to be "loaded" by different loaders (depending on
// the accessing class).
assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
}
// If the return type of the method is not loaded, assert that the
......
......@@ -3559,7 +3559,6 @@ bool LibraryCallKit::inline_native_getLength() {
// public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
return false;
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
// Get the arguments.
......
......@@ -1032,7 +1032,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
check_null(value);
object = value->generic_value();
}
if (is_get && field->is_constant()) {
if (is_get && field->is_constant() && field->is_static()) {
SharkConstant *constant = SharkConstant::for_field(iter());
if (constant->is_loaded())
value = constant->value(builder());
......@@ -1044,10 +1044,17 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
BasicType basic_type = field->type()->basic_type();
Type *stack_type = SharkType::to_stackType(basic_type);
Type *field_type = SharkType::to_arrayType(basic_type);
Type *type = field_type;
if (field->is_volatile()) {
if (field_type == SharkType::jfloat_type()) {
type = SharkType::jint_type();
} else if (field_type == SharkType::jdouble_type()) {
type = SharkType::jlong_type();
}
}
Value *addr = builder()->CreateAddressOfStructEntry(
object, in_ByteSize(field->offset_in_bytes()),
PointerType::getUnqual(field_type),
PointerType::getUnqual(type),
"addr");
// Do the access
......@@ -1055,6 +1062,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
Value* field_value;
if (field->is_volatile()) {
field_value = builder()->CreateAtomicLoad(addr);
field_value = builder()->CreateBitCast(field_value, field_type);
} else {
field_value = builder()->CreateLoad(addr);
}
......@@ -1074,6 +1082,7 @@ void SharkBlock::do_field_access(bool is_get, bool is_field) {
}
if (field->is_volatile()) {
field_value = builder()->CreateBitCast(field_value, type);
builder()->CreateAtomicStore(field_value, addr);
} else {
builder()->CreateStore(field_value, addr);
......
......@@ -185,6 +185,9 @@ void SharkCompiler::compile_method(ciEnv* env,
// Build the LLVM IR for the method
Function *function = SharkFunction::build(env, &builder, flow, name);
if (env->failing()) {
return;
}
// Generate native code. It's unpleasant that we have to drop into
// the VM to do this -- it blocks safepoints -- but I can't see any
......
......@@ -46,6 +46,9 @@ class SharkCompiler : public AbstractCompiler {
// Missing feature tests
bool supports_native() { return true; }
bool supports_osr() { return true; }
bool can_compile_method(methodHandle method) {
return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
}
// Customization
bool needs_adapters() { return false; }
......
......@@ -37,7 +37,12 @@ SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) {
ciType *type = NULL;
if (constant.basic_type() == T_OBJECT) {
ciEnv *env = ciEnv::current();
assert(constant.as_object()->klass() == env->String_klass() || constant.as_object()->klass() == env->Class_klass(), "should be");
assert(constant.as_object()->klass() == env->String_klass()
|| constant.as_object()->klass() == env->Class_klass()
|| constant.as_object()->klass()->is_subtype_of(env->MethodType_klass())
|| constant.as_object()->klass()->is_subtype_of(env->MethodHandle_klass()), "should be");
type = constant.as_object()->klass();
}
return new SharkConstant(constant, type);
......
......@@ -77,6 +77,10 @@ void SharkFunction::initialize(const char *name) {
// Walk the tree from the start block to determine which
// blocks are entered and which blocks require phis
SharkTopLevelBlock *start_block = block(flow()->start_block_num());
if (is_osr() && start_block->stack_depth_at_entry() != 0) {
env()->record_method_not_compilable("can't compile OSR block with incoming stack-depth > 0");
return;
}
assert(start_block->start() == flow()->start_bci(), "blocks out of order");
start_block->enter();
......
......@@ -725,7 +725,7 @@ bool SharkInlinerHelper::do_field_access(bool is_get, bool is_field) {
// Push the result if necessary
if (is_get) {
bool result_pushed = false;
if (field->is_constant()) {
if (field->is_constant() && field->is_static()) {
SharkConstant *sc = SharkConstant::for_field(iter());
if (sc->is_loaded()) {
push(sc->is_nonzero());
......
......@@ -68,7 +68,7 @@ class SharkCompileInvariants : public ResourceObj {
//
// Accessing this directly is kind of ugly, so it's private. Add
// new accessors below if you need something from it.
private:
protected:
ciEnv* env() const {
assert(_env != NULL, "env not available");
return _env;
......@@ -99,13 +99,15 @@ class SharkCompileInvariants : public ResourceObj {
DebugInformationRecorder* debug_info() const {
return env()->debug_info();
}
Dependencies* dependencies() const {
return env()->dependencies();
}
SharkCodeBuffer* code_buffer() const {
return builder()->code_buffer();
}
public:
Dependencies* dependencies() const {
return env()->dependencies();
}
// Commonly used classes
protected:
ciInstanceKlass* java_lang_Object_klass() const {
......
......@@ -113,7 +113,19 @@ void SharkTopLevelBlock::scan_for_traps() {
ciSignature* sig;
method = iter()->get_method(will_link, &sig);
assert(will_link, "typeflow responsibility");
// We can't compile calls to method handle intrinsics, because we use
// the interpreter entry points and they expect the top frame to be an
// interpreter frame. We need to implement the intrinsics for Shark.
if (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form()) {
if (SharkPerformanceWarnings) {
warning("JSR292 optimization not yet implemented in Shark");
}
set_trap(
Deoptimization::make_trap_request(
Deoptimization::Reason_unhandled,
Deoptimization::Action_make_not_compilable), bci());
return;
}
if (!method->holder()->is_linked()) {
set_trap(
Deoptimization::make_trap_request(
......@@ -158,6 +170,16 @@ void SharkTopLevelBlock::scan_for_traps() {
return;
}
break;
case Bytecodes::_invokedynamic:
case Bytecodes::_invokehandle:
if (SharkPerformanceWarnings) {
warning("JSR292 optimization not yet implemented in Shark");
}
set_trap(
Deoptimization::make_trap_request(
Deoptimization::Reason_unhandled,
Deoptimization::Action_make_not_compilable), bci());
return;
}
}
......@@ -1030,7 +1052,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
dest_method->holder() == java_lang_Object_klass())
return dest_method;
#ifdef SHARK_CAN_DEOPTIMIZE_ANYWHERE
// This code can replace a virtual call with a direct call if this
// class is the only one in the entire set of loaded classes that
// implements this method. This makes the compiled code dependent
......@@ -1064,6 +1085,8 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
if (monomorphic_target != NULL) {
assert(!monomorphic_target->is_abstract(), "shouldn't be");
function()->dependencies()->assert_unique_concrete_method(actual_receiver, monomorphic_target);
// Opto has a bunch of type checking here that I don't
// understand. It's to inhibit casting in one direction,
// possibly because objects in Opto can have inexact
......@@ -1097,7 +1120,6 @@ ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
// with non-monomorphic targets if the receiver has an exact
// type. We don't mark types this way, so we can't do this.
#endif // SHARK_CAN_DEOPTIMIZE_ANYWHERE
return NULL;
}
......@@ -1298,8 +1320,9 @@ void SharkTopLevelBlock::do_call() {
// Try to inline the call
if (!call_is_virtual) {
if (SharkInliner::attempt_inline(call_method, current_state()))
if (SharkInliner::attempt_inline(call_method, current_state())) {
return;
}
}
// Find the method we are calling
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -23,7 +23,16 @@
*/
/*
* Manual test
* @test
* @bug 7190310
* @summary Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
* @run main/othervm/timeout=600 -Xbatch Test7190310
*/
/*
* Note bug exhibits as infinite loop, timeout is helpful.
* It should normally finish pretty quickly, but on some especially slow machines
* it may not. The companion _unsafe test lacks a timeout, but that is okay.
*/
import java.lang.ref.*;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册