提交 eb144b4a 编写于 作者: R roland

7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked

Summary: Do patching rather bailing out for unlinked call with appendix
Reviewed-by: twisti, kvn
上级 55987f81
...@@ -307,7 +307,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -307,7 +307,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(a_byte == *start++, "should be the same code"); assert(a_byte == *start++, "should be the same code");
} }
#endif #endif
} else if (_id == load_mirror_id) { } else if (_id == load_mirror_id || _id == load_appendix_id) {
// produce a copy of the load mirror instruction for use by the being initialized case // produce a copy of the load mirror instruction for use by the being initialized case
#ifdef ASSERT #ifdef ASSERT
address start = __ pc(); address start = __ pc();
...@@ -384,6 +384,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -384,6 +384,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ bind(call_patch); __ bind(call_patch);
...@@ -397,7 +398,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -397,7 +398,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
__ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
__ delayed()->nop(); __ delayed()->nop();
if (_id == load_klass_id || _id == load_mirror_id) { if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section(); CodeSection* cs = __ code_section();
address pc = (address)_pc_start; address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1); RelocIterator iter(cs, pc, pc + 1);
......
...@@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) { ...@@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched // Allocate a new index in table to hold the object once it's been patched
int oop_index = __ oop_recorder()->allocate_oop_index(NULL); int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index); PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
......
...@@ -804,6 +804,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -804,6 +804,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case load_appendix_patching_id:
{ __ set_info("load_appendix_patching", dont_gc_arguments);
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // O0: object { // O0: object
__ set_info("dtrace_object_alloc", dont_gc_arguments); __ set_info("dtrace_object_alloc", dont_gc_arguments);
......
...@@ -402,6 +402,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -402,6 +402,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ bind(call_patch); __ bind(call_patch);
...@@ -419,7 +420,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -419,7 +420,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop(); __ nop();
} }
if (_id == load_klass_id || _id == load_mirror_id) { if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section(); CodeSection* cs = __ code_section();
RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
......
...@@ -362,7 +362,7 @@ int LIR_Assembler::check_icache() { ...@@ -362,7 +362,7 @@ int LIR_Assembler::check_icache() {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL; jobject o = NULL;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id); PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o); __ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
} }
......
...@@ -1499,6 +1499,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -1499,6 +1499,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case load_appendix_patching_id:
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
// we should set up register map
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // rax,: object { // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
......
...@@ -364,7 +364,8 @@ class PatchingStub: public CodeStub { ...@@ -364,7 +364,8 @@ class PatchingStub: public CodeStub {
enum PatchID { enum PatchID {
access_field_id, access_field_id,
load_klass_id, load_klass_id,
load_mirror_id load_mirror_id,
load_appendix_id
}; };
enum constants { enum constants {
patch_info_size = 3 patch_info_size = 3
...@@ -417,7 +418,7 @@ class PatchingStub: public CodeStub { ...@@ -417,7 +418,7 @@ class PatchingStub: public CodeStub {
} }
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start()); NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset); n_move->set_offset(field_offset);
} else if (_id == load_klass_id || _id == load_mirror_id) { } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror"); assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT #ifdef ASSERT
// verify that we're pointing at a NativeMovConstReg // verify that we're pointing at a NativeMovConstReg
......
...@@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null"); assert(declared_signature != NULL, "cannot be null");
// FIXME bail out for now if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
} }
// we have to make sure the argument size (incl. the receiver) // we have to make sure the argument size (incl. the receiver)
...@@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break; break;
} }
} else {
if (bc_raw == Bytecodes::_invokehandle) {
assert(!will_link, "should come here only for unlinked call");
code = Bytecodes::_invokespecial;
}
} }
// Push appendix argument (MethodType, CallSite, etc.), if one. // Push appendix argument (MethodType, CallSite, etc.), if one.
if (stream()->has_appendix()) { bool patch_for_appendix = false;
int patching_appendix_arg = 0;
if (C1PatchInvokeDynamic &&
(Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
apush(arg);
patch_for_appendix = true;
patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
} else if (stream()->has_appendix()) {
ciObject* appendix = stream()->get_appendix(); ciObject* appendix = stream()->get_appendix();
Value arg = append(new Constant(new ObjectConstant(appendix))); Value arg = append(new Constant(new ObjectConstant(appendix)));
apush(arg); apush(arg);
...@@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!(// %%% FIXME: Are both of these relevant? !(// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() || target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form())) { target->is_compiled_lambda_form()) &&
!patch_for_appendix) {
Value receiver = NULL; Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL; ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false; bool type_is_exact = false;
...@@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// check if we could do inlining // check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() && if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
&& target->is_loaded()) { && target->is_loaded()
&& !patch_for_appendix) {
// callee is known => check if we have static binding // callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known"); assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic || if (code == Bytecodes::_invokestatic ||
...@@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) { ...@@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial || code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual || code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface; code == Bytecodes::_invokeinterface;
Values* args = state()->pop_arguments(target->arg_size_no_receiver()); Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
Value recv = has_receiver ? apop() : NULL; Value recv = has_receiver ? apop() : NULL;
int vtable_index = Method::invalid_vtable_index; int vtable_index = Method::invalid_vtable_index;
......
...@@ -1211,8 +1211,6 @@ class LIR_OpJavaCall: public LIR_OpCall { ...@@ -1211,8 +1211,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
bool is_invokedynamic() const { return code() == lir_dynamic_call; } bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const { bool is_method_handle_invoke() const {
return return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
method()->is_compiled_lambda_form() // Java-generated adapter method()->is_compiled_lambda_form() // Java-generated adapter
|| ||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
......
...@@ -93,12 +93,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod ...@@ -93,12 +93,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
} else if (patch->id() == PatchingStub::load_appendix_id) {
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
#endif #endif
} }
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
IRScope* scope = info->scope();
Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
if (Bytecodes::has_optional_appendix(bc_raw)) {
return PatchingStub::load_appendix_id;
}
return PatchingStub::load_mirror_id;
}
//--------------------------------------------------------------- //---------------------------------------------------------------
......
...@@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj { ...@@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op); void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
PatchingStub::PatchID patching_id(CodeEmitInfo* info);
public: public:
LIR_Assembler(Compilation* c); LIR_Assembler(Compilation* c);
~LIR_Assembler(); ~LIR_Assembler();
......
...@@ -819,6 +819,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -819,6 +819,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
bool load_klass_or_mirror_patch_id = bool load_klass_or_mirror_patch_id =
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
...@@ -888,10 +889,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -888,10 +889,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
mirror = Handle(THREAD, m); mirror = Handle(THREAD, m);
} }
break; break;
default: Unimplemented(); default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
} }
// convert to handle // convert to handle
load_klass = KlassHandle(THREAD, k); load_klass = KlassHandle(THREAD, k);
} else if (stub_id == load_appendix_patching_id) {
Bytecode_invoke bytecode(caller_method, bci);
Bytecodes::Code bc = bytecode.invoke_code();
CallInfo info;
constantPoolHandle pool(thread, caller_method->constants());
int index = bytecode.index();
LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
appendix = info.resolved_appendix();
switch (bc) {
case Bytecodes::_invokehandle: {
int cache_index = ConstantPool::decode_cpcache_index(index, true);
assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
break;
}
case Bytecodes::_invokedynamic: {
pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
break;
}
default: fatal("unexpected bytecode for load_appendix_patching_id");
}
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
...@@ -992,8 +1015,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -992,8 +1015,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->data() == (intptr_t)Universe::non_oop_word(), n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value"); "illegal init value");
if (stub_id == Runtime1::load_klass_patching_id) { if (stub_id == Runtime1::load_klass_patching_id) {
assert(load_klass() != NULL, "klass not set"); assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass())); n_copy->set_data((intx) (load_klass()));
} else { } else {
assert(mirror() != NULL, "klass not set"); assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror())); n_copy->set_data((intx) (mirror()));
...@@ -1002,43 +1025,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -1002,43 +1025,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (TracePatching) { if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
} }
}
#if defined(SPARC) || defined(PPC) } else if (stub_id == Runtime1::load_appendix_patching_id) {
// Update the location in the nmethod with the proper NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
// metadata. When the code was generated, a NULL was stuffed assert(n_copy->data() == 0 ||
// in the metadata table and that table needs to be update to n_copy->data() == (intptr_t)Universe::non_oop_word(),
// have the right value. On intel the value is kept "illegal init value");
// directly in the instruction instead of in the metadata n_copy->set_data((intx) (appendix()));
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc); if (TracePatching) {
assert(nm != NULL, "invalid nmethod_pc"); Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = mirror();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
#endif
} }
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
#if defined(SPARC) || defined(PPC)
if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
// Update the location in the nmethod with the proper
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc);
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id ||
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
}
#endif
if (do_patch) { if (do_patch) {
// replace instructions // replace instructions
// first replace the tail, then the call // first replace the tail, then the call
...@@ -1077,7 +1112,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -1077,7 +1112,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ICache::invalidate_range(instr_pc, *byte_count); ICache::invalidate_range(instr_pc, *byte_count);
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
if (load_klass_or_mirror_patch_id) { if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
relocInfo::relocType rtype = relocInfo::relocType rtype =
(stub_id == Runtime1::load_klass_patching_id) ? (stub_id == Runtime1::load_klass_patching_id) ?
relocInfo::metadata_type : relocInfo::metadata_type :
...@@ -1118,7 +1154,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i ...@@ -1118,7 +1154,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// If we are patching in a non-perm oop, make sure the nmethod // If we are patching in a non-perm oop, make sure the nmethod
// is on the right list. // is on the right list.
if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
(appendix.not_null() && appendix->is_scavengable()))) {
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops"); guarantee(nm != NULL, "only nmethods can contain non-perm oops");
...@@ -1179,6 +1216,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) { ...@@ -1179,6 +1216,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) {
return caller_is_deopted(); return caller_is_deopted();
} }
int Runtime1::move_appendix_patching(JavaThread* thread) {
//
// NOTE: we are still in Java
//
Thread* THREAD = thread;
debug_only(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;
patch_code(thread, load_appendix_patching_id);
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted();
}
// //
// Entry point for compiled code. We want to patch a nmethod. // Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to // We don't do a normal VM transition here because we want to
......
...@@ -67,6 +67,7 @@ class StubAssembler; ...@@ -67,6 +67,7 @@ class StubAssembler;
stub(access_field_patching) \ stub(access_field_patching) \
stub(load_klass_patching) \ stub(load_klass_patching) \
stub(load_mirror_patching) \ stub(load_mirror_patching) \
stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \ stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \ stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \ stub(fpu2long_stub) \
...@@ -160,6 +161,7 @@ class Runtime1: public AllStatic { ...@@ -160,6 +161,7 @@ class Runtime1: public AllStatic {
static int access_field_patching(JavaThread* thread); static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread);
static int move_mirror_patching(JavaThread* thread); static int move_mirror_patching(JavaThread* thread);
static int move_appendix_patching(JavaThread* thread);
static void patch_code(JavaThread* thread, StubID stub_id); static void patch_code(JavaThread* thread, StubID stub_id);
......
...@@ -25,4 +25,4 @@ ...@@ -25,4 +25,4 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "c1/c1_globals.hpp" #include "c1/c1_globals.hpp"
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
// //
// Defines all global flags used by the client compiler. // Defines all global flags used by the client compiler.
// //
#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ #define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\ \
/* Printing */ \ /* Printing */ \
notproduct(bool, PrintC1Statistics, false, \ notproduct(bool, PrintC1Statistics, false, \
...@@ -333,15 +333,19 @@ ...@@ -333,15 +333,19 @@
"Use CHA and exact type results at call sites when updating MDOs")\ "Use CHA and exact type results at call sites when updating MDOs")\
\ \
product(bool, C1UpdateMethodData, trueInTiered, \ product(bool, C1UpdateMethodData, trueInTiered, \
"Update MethodData*s in Tier1-generated code") \ "Update MethodData*s in Tier1-generated code") \
\ \
develop(bool, PrintCFGToFile, false, \ develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \ "print control flow graph to a separate file during compilation") \
\ \
diagnostic(bool, C1PatchInvokeDynamic, true, \
"Patch invokedynamic appendix not known at compile time") \
\
\
// Read default values for c1 globals // Read default values for c1 globals
C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG) C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
#endif // SHARE_VM_C1_C1_GLOBALS_HPP #endif // SHARE_VM_C1_C1_GLOBALS_HPP
...@@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() { ...@@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() {
record_method_not_compilable("out of memory"); record_method_not_compilable("out of memory");
} }
ciInstance* ciEnv::unloaded_ciinstance() {
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
}
void ciEnv::dump_replay_data(outputStream* out) { void ciEnv::dump_replay_data(outputStream* out) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
MutexLocker ml(Compile_lock); MutexLocker ml(Compile_lock);
......
...@@ -400,6 +400,7 @@ public: ...@@ -400,6 +400,7 @@ public:
static ciInstanceKlass* unloaded_ciinstance_klass() { static ciInstanceKlass* unloaded_ciinstance_klass() {
return _unloaded_ciinstance_klass; return _unloaded_ciinstance_klass;
} }
ciInstance* unloaded_ciinstance();
ciKlass* find_system_klass(ciSymbol* klass_name); ciKlass* find_system_klass(ciSymbol* klass_name);
// Note: To find a class from its name string, use ciSymbol::make, // Note: To find a class from its name string, use ciSymbol::make,
......
...@@ -177,6 +177,10 @@ class ciMethod : public ciMetadata { ...@@ -177,6 +177,10 @@ class ciMethod : public ciMetadata {
address bcp = code() + bci; address bcp = code() + bci;
return Bytecodes::java_code_at(NULL, bcp); return Bytecodes::java_code_at(NULL, bcp);
} }
Bytecodes::Code raw_code_at_bci(int bci) {
address bcp = code() + bci;
return Bytecodes::code_at(NULL, bcp);
}
BCEscapeAnalyzer *get_bcea(); BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks(); ciMethodBlocks *get_method_blocks();
......
...@@ -563,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu ...@@ -563,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
} }
ciInstance* ciObjectFactory::get_unloaded_object_constant() {
if (ciEnv::_Object_klass == NULL) return NULL;
return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
}
//------------------------------------------------------------------ //------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData // ciObjectFactory::get_empty_methodData
......
...@@ -131,6 +131,8 @@ public: ...@@ -131,6 +131,8 @@ public:
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
ciInstance* get_unloaded_object_constant();
// Get the ciMethodData representing the methodData for a method // Get the ciMethodData representing the methodData for a method
// with none. // with none.
ciMethodData* get_empty_methodData(); ciMethodData* get_empty_methodData();
......
...@@ -205,6 +205,7 @@ void Flag::print_as_flag(outputStream* st) { ...@@ -205,6 +205,7 @@ void Flag::print_as_flag(outputStream* st) {
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT }, #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT }, #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
...@@ -260,7 +261,7 @@ static Flag flagTable[] = { ...@@ -260,7 +261,7 @@ static Flag flagTable[] = {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
#ifdef COMPILER1 #ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT) C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
...@@ -99,7 +100,7 @@ typedef enum { ...@@ -99,7 +100,7 @@ typedef enum {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
#ifdef COMPILER1 #ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER) C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
...@@ -131,6 +132,7 @@ typedef enum { ...@@ -131,6 +132,7 @@ typedef enum {
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
...@@ -204,6 +206,7 @@ typedef enum { ...@@ -204,6 +206,7 @@ typedef enum {
C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
C1_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2
......
...@@ -1051,7 +1051,8 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, ...@@ -1051,7 +1051,8 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
// Find receiver for non-static call // Find receiver for non-static call
if (bc != Bytecodes::_invokestatic && if (bc != Bytecodes::_invokestatic &&
bc != Bytecodes::_invokedynamic) { bc != Bytecodes::_invokedynamic &&
bc != Bytecodes::_invokehandle) {
// This register map must be update since we need to find the receiver for // This register map must be update since we need to find the receiver for
// compiled frames. The receiver might be in a register. // compiled frames. The receiver might be in a register.
RegisterMap reg_map2(thread); RegisterMap reg_map2(thread);
...@@ -1078,7 +1079,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, ...@@ -1078,7 +1079,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
#ifdef ASSERT #ifdef ASSERT
// Check that the receiver klass is of the right subtype and that it is initialized for virtual calls // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
assert(receiver.not_null(), "should have thrown exception"); assert(receiver.not_null(), "should have thrown exception");
KlassHandle receiver_klass(THREAD, receiver->klass()); KlassHandle receiver_klass(THREAD, receiver->klass());
Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
...@@ -1240,9 +1241,9 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, ...@@ -1240,9 +1241,9 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
#endif #endif
if (is_virtual) { if (is_virtual) {
assert(receiver.not_null(), "sanity check"); assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
bool static_bound = call_info.resolved_method()->can_be_statically_bound(); bool static_bound = call_info.resolved_method()->can_be_statically_bound();
KlassHandle h_klass(THREAD, receiver->klass()); KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
CompiledIC::compute_monomorphic_entry(callee_method, h_klass, CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
is_optimized, static_bound, virtual_call_info, is_optimized, static_bound, virtual_call_info,
CHECK_(methodHandle())); CHECK_(methodHandle()));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册