提交 ddc27095 编写于 作者: T twisti

7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely

Reviewed-by: never, bdelsart
上级 a1b67d19
...@@ -367,10 +367,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -367,10 +367,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
void DeoptimizeStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
__ call(SharedRuntime::deopt_blob()->unpack_with_reexecution()); __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here()); DEBUG_ONLY(__ should_not_reach_here());
} }
......
...@@ -766,7 +766,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -766,7 +766,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ret(); __ ret();
__ delayed()->restore(); __ delayed()->restore();
}
break;
case deoptimize_id:
{
__ set_info("deoptimize", dont_gc_arguments);
OopMap* oop_map = save_live_registers(sasm);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
AddressLiteral dest(deopt_blob->unpack_with_reexecution());
__ jump_to(dest, O0);
__ delayed()->restore();
} }
break; break;
......
...@@ -387,9 +387,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -387,9 +387,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
void DeoptimizeStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution())); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here()); DEBUG_ONLY(__ should_not_reach_here());
} }
......
...@@ -1447,7 +1447,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -1447,7 +1447,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm, save_fpu_registers); restore_live_registers(sasm, save_fpu_registers);
}
break;
case deoptimize_id:
{
StubFrame f(sasm, "deoptimize", dont_gc_arguments);
const int num_rt_args = 1; // thread
OopMap* oop_map = save_live_registers(sasm, num_rt_args);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
__ leave();
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
} }
break; break;
......
...@@ -681,6 +681,23 @@ JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock)) ...@@ -681,6 +681,23 @@ JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
} }
JRT_END JRT_END
// Cf. OptoRuntime::deoptimize_caller_frame
JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
// Called from within the owner thread, so no need for safepoint
RegisterMap reg_map(thread, false);
frame stub_frame = thread->last_frame();
assert(stub_frame.is_runtime_frame(), "sanity check");
frame caller_frame = stub_frame.sender(&reg_map);
// We are coming from a compiled method; check this is true.
assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");
// Deoptimize the caller frame.
Deoptimization::deoptimize_frame(thread, caller_frame.id());
// Return to the now deoptimized frame.
JRT_END
static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecode_field field_access(caller, bci); Bytecode_field field_access(caller, bci);
......
...@@ -63,6 +63,7 @@ class StubAssembler; ...@@ -63,6 +63,7 @@ class StubAssembler;
stub(monitorenter_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(monitorenter_nofpu) /* optimized version that does not preserve fpu registers */ \
stub(monitorexit) \ stub(monitorexit) \
stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \
stub(deoptimize) \
stub(access_field_patching) \ stub(access_field_patching) \
stub(load_klass_patching) \ stub(load_klass_patching) \
stub(g1_pre_barrier_slow) \ stub(g1_pre_barrier_slow) \
...@@ -152,6 +153,8 @@ class Runtime1: public AllStatic { ...@@ -152,6 +153,8 @@ class Runtime1: public AllStatic {
static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock); static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock);
static void monitorexit (JavaThread* thread, BasicObjectLock* lock); static void monitorexit (JavaThread* thread, BasicObjectLock* lock);
static void deoptimize(JavaThread* thread);
static int access_field_patching(JavaThread* thread); static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread);
......
...@@ -1130,7 +1130,7 @@ void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { ...@@ -1130,7 +1130,7 @@ void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
frame caller_frame = stub_frame.sender(&reg_map); frame caller_frame = stub_frame.sender(&reg_map);
// bypass VM_DeoptimizeFrame and deoptimize the frame directly // Deoptimize the caller frame.
Deoptimization::deoptimize_frame(thread, caller_frame.id()); Deoptimization::deoptimize_frame(thread, caller_frame.id());
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册