提交 f7d6edac 编写于 作者: N never

6939861: JVM should handle more conversion operations

Reviewed-by: twisti, jrose
上级 e7645472
......@@ -234,6 +234,20 @@ class Address VALUE_OBJ_CLASS_SPEC {
a._disp += disp;
return a;
}
Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
Address a = (*this);
a._disp += disp.constant_or_zero() * scale_size(scale);
if (disp.is_register()) {
assert(!a.index()->is_valid(), "competing indexes");
a._index = disp.as_register();
a._scale = scale;
}
return a;
}
bool is_same_address(Address a) const {
// disregard _rspec
return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
}
// The following two overloads are used in connection with the
// ByteSize type (see sizes.hpp). They simplify the use of
......@@ -2029,6 +2043,10 @@ class MacroAssembler: public Assembler {
void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
void addptr(Register dst, int32_t src);
void addptr(Register dst, Register src);
void addptr(Register dst, RegisterOrConstant src) {
if (src.is_constant()) addptr(dst, (int) src.as_constant());
else addptr(dst, src.as_register());
}
void andptr(Register dst, int32_t src);
void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
......@@ -2090,7 +2108,10 @@ class MacroAssembler: public Assembler {
void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
void subptr(Register dst, int32_t src);
void subptr(Register dst, Register src);
void subptr(Register dst, RegisterOrConstant src) {
if (src.is_constant()) subptr(dst, (int) src.as_constant());
else subptr(dst, src.as_register());
}
void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
......@@ -2288,6 +2309,11 @@ public:
void movptr(Address dst, Register src);
void movptr(Register dst, RegisterOrConstant src) {
if (src.is_constant()) movptr(dst, src.as_constant());
else movptr(dst, src.as_register());
}
#ifdef _LP64
// Generally the next two are only used for moving NULL
// Although there are situations in initializing the mark word where
......
......@@ -339,7 +339,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
return fr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
......@@ -361,41 +360,35 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// SP is the raw SP from the sender after adapter or interpreter
// extension.
intptr_t* sender_sp = this->sender_sp();
// This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp();
// Stored FP.
intptr_t* saved_fp = link();
address sender_pc = this->sender_pc();
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
assert(sender_cb, "sanity");
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
// frame::adjust_unextended_sp
void frame::adjust_unextended_sp() {
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
unextended_sp = saved_fp;
if (sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
_unextended_sp = _fp;
}
else if (sender_nm->is_deopt_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
else if (sender_nm->is_deopt_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
}
else if (sender_nm->is_method_handle_return(sender_pc)) {
unextended_sp = saved_fp;
else if (sender_nm->is_method_handle_return(_pc)) {
_unextended_sp = _fp;
}
}
}
//------------------------------------------------------------------------------
// frame::update_map_with_saved_link
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
// The interpreter and compiler(s) always save EBP/RBP in a known
// location on entry. We must record where that location is
// so this if EBP/RBP was live on callout from c2 we can find
......@@ -404,22 +397,36 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// Since the interpreter always saves EBP/RBP if we record where it is then
// we don't have to always save EBP/RBP on entry and exit to c2 compiled
// code, on entry will be enough.
#ifdef COMPILER2
if (map->update_map()) {
map->set_location(rbp->as_VMReg(), (address) addr_at(link_offset));
map->set_location(rbp->as_VMReg(), (address) link_addr);
#ifdef AMD64
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if (true) {
map->set_location(rbp->as_VMReg()->next(), (address)addr_at(link_offset));
}
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if (true) {
map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
}
#endif // AMD64
}
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// SP is the raw SP from the sender after adapter or interpreter
// extension.
intptr_t* sender_sp = this->sender_sp();
// This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp();
#ifdef COMPILER2
if (map->update_map()) {
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
}
#endif // COMPILER2
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
return frame(sender_sp, unextended_sp, link(), sender_pc());
}
......@@ -427,6 +434,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// frame::sender_for_compiled_frame
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(!is_ricochet_frame(), "caller must handle this");
// frame owned by optimizing compiler
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
......@@ -438,31 +446,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// This is the saved value of EBP which may or may not really be an FP.
// It is only an FP if the sender is an interpreter frame (or C1?).
intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
assert(sender_cb, "sanity");
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
unextended_sp = saved_fp;
}
else if (sender_nm->is_deopt_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
}
else if (sender_nm->is_method_handle_return(sender_pc)) {
unextended_sp = saved_fp;
}
}
intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
......@@ -472,23 +456,15 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
if (_cb->oop_maps() != NULL) {
OopMapSet::update_register_map(this, map);
}
// Since the prolog does the save and restore of EBP there is no oopmap
// for it so we must fill in its location as if there was an oopmap entry
// since if our caller was compiled code there could be live jvm state in it.
map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
#ifdef AMD64
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if (true) {
map->set_location(rbp->as_VMReg()->next(), (address) (sender_sp - frame::sender_sp_offset));
}
#endif // AMD64
update_map_with_saved_link(map, saved_fp_addr);
}
assert(sender_sp != sp(), "must have changed");
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
}
......@@ -502,6 +478,7 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame()) return sender_for_entry_frame(map);
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
if (_cb != NULL) {
return sender_for_compiled_frame(map);
......
......@@ -164,6 +164,7 @@
// original sp we use that convention.
intptr_t* _unextended_sp;
void adjust_unextended_sp();
intptr_t* ptr_at_addr(int offset) const {
return (intptr_t*) addr_at(offset);
......@@ -197,6 +198,9 @@
// expression stack tos if we are nested in a java call
intptr_t* interpreter_frame_last_sp() const;
// helper to update a map with callee-saved RBP
static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
#ifndef CC_INTERP
// deoptimization support
void interpreter_frame_set_last_sp(intptr_t* sp);
......
......@@ -62,6 +62,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_pc = pc;
assert(pc != NULL, "no pc?");
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
......
......@@ -26,7 +26,9 @@
#define CPU_X86_VM_INTERPRETER_X86_HPP
public:
static Address::ScaleFactor stackElementScale() { return Address::times_4; }
static Address::ScaleFactor stackElementScale() {
return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
}
// Offset from rsp (which points to the last stack element)
static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
......
/*
* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.
public:
// The stack just after the recursive call from a ricochet frame
// looks something like this. Offsets are marked in words, not bytes.
// rsi (r13 on LP64) is part of the interpreter calling sequence
// which tells the callee where is my real rsp (for frame walking).
// (...lower memory addresses)
// rsp: [ return pc ] always the global RicochetBlob::bounce_addr
// rsp+1: [ recursive arg N ]
// rsp+2: [ recursive arg N-1 ]
// ...
// rsp+N: [ recursive arg 1 ]
// rsp+N+1: [ recursive method handle ]
// ...
// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame)
// rbp-5: [ saved target MH ] the MH we will call on the saved args
// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout
// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0)
// rbp-2: [ conversion ] information about how the return value is used
// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame
// rbp+0: [ saved sender fp ] (for original sender of AMH)
// rbp+1: [ saved sender pc ] (back to original sender of AMH)
// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender)
// rbp+3: [ transformed adapter arg M-1]
// ...
// rbp+M+1: [ transformed adapter arg 1 ]
// rbp+M+2: [ padding ] <-- (rbp + saved args base offset)
// ... [ optional padding]
// (higher memory addresses...)
//
// The arguments originally passed by the original sender
// are lost, and arbitrary amounts of stack motion might have
// happened due to argument transformation.
// (This is done by C2I/I2C adapters and non-direct method handles.)
// This is why there is an unpredictable amount of memory between
// the extended and exact TOS of the sender.
// The ricochet adapter itself will also (in general) perform
// transformations before the recursive call.
//
// The transformed and saved arguments, immediately above the saved
// return PC, are a well-formed method handle invocation ready to execute.
// When the GC needs to walk the stack, these arguments are described
// via the saved arg types oop, an int[] array with a private format.
// This array is derived from the type of the transformed adapter
// method handle, which also sits at the base of the saved argument
// bundle. Since the GC may not be able to fish out the int[]
// array, so it is pushed explicitly on the stack. This may be
// an unnecessary expense.
//
// The following register conventions are significant at this point:
// rsp the thread stack, as always; preserved by caller
// rsi/r13 exact TOS of recursive frame (contents of [rbp-2])
// rcx recursive method handle (contents of [rsp+N+1])
// rbp preserved by caller (not used by caller)
// Unless otherwise specified, all registers can be blown by the call.
//
// If this frame must be walked, the transformed adapter arguments
// will be found with the help of the saved arguments descriptor.
//
// Therefore, the descriptor must match the referenced arguments.
// The arguments must be followed by at least one word of padding,
// which will be necessary to complete the final method handle call.
// That word is not treated as holding an oop. Neither is the word
//
// The word pointed to by the return argument pointer is not
// treated as an oop, even if points to a saved argument.
// This allows the saved argument list to have a "hole" in it
// to receive an oop from the recursive call.
// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
//
// When the recursive callee returns, RicochetBlob::bounce_addr will
// immediately jump to the continuation stored in the RF.
// This continuation will merge the recursive return value
// into the saved argument list. At that point, the original
// rsi, rbp, and rsp will be reloaded, the ricochet frame will
// disappear, and the final target of the adapter method handle
// will be invoked on the transformed argument list.
class RicochetFrame {
friend class MethodHandles;
private:
intptr_t* _continuation; // what to do when control gets back here
oopDesc* _saved_target; // target method handle to invoke on saved_args
oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3)
intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2)
intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1)
intptr_t* _sender_link; // *must* coincide with frame::link_offset (0)
address _sender_pc; // *must* coincide with frame::return_addr_offset (1)
public:
intptr_t* continuation() const { return _continuation; }
oop saved_target() const { return _saved_target; }
oop saved_args_layout() const { return _saved_args_layout; }
intptr_t* saved_args_base() const { return _saved_args_base; }
intptr_t conversion() const { return _conversion; }
intptr_t* exact_sender_sp() const { return _exact_sender_sp; }
intptr_t* sender_link() const { return _sender_link; }
address sender_pc() const { return _sender_pc; }
intptr_t* extended_sender_sp() const { return saved_args_base(); }
intptr_t return_value_slot_number() const {
return adapter_conversion_vminfo(conversion());
}
BasicType return_value_type() const {
return adapter_conversion_dest_type(conversion());
}
bool has_return_value_slot() const {
return return_value_type() != T_VOID;
}
intptr_t* return_value_slot_addr() const {
assert(has_return_value_slot(), "");
return saved_arg_slot_addr(return_value_slot_number());
}
intptr_t* saved_target_slot_addr() const {
return saved_arg_slot_addr(saved_args_length());
}
intptr_t* saved_arg_slot_addr(int slot) const {
assert(slot >= 0, "");
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
}
jint saved_args_length() const;
jint saved_arg_offset(int arg) const;
// GC interface
oop* saved_target_addr() { return (oop*)&_saved_target; }
oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; }
oop compute_saved_args_layout(bool read_cache, bool write_cache);
// Compiler/assembler interface.
static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); }
static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); }
static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); }
static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); }
static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); }
static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); }
static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); }
static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); }
// This value is not used for much, but it apparently must be nonzero.
static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); }
#ifdef ASSERT
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
static int magic_number_1_offset_in_bytes() { return -wordSize; }
static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); }
intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); };
intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); };
#endif //ASSERT
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
static void verify_offsets() NOT_DEBUG_RETURN;
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
void zap_arguments() NOT_DEBUG_RETURN;
static void generate_ricochet_blob(MacroAssembler* _masm,
// output params:
int* frame_size_in_words, int* bounce_offset, int* exception_offset);
static void enter_ricochet_frame(MacroAssembler* _masm,
Register rcx_recv,
Register rax_argv,
address return_handler,
Register rbx_temp);
static void leave_ricochet_frame(MacroAssembler* _masm,
Register rcx_recv,
Register new_sp_reg,
Register sender_pc_reg);
static Address frame_address(int offset = 0) {
// The RicochetFrame is found by subtracting a constant offset from rbp.
return Address(rbp, - sender_link_offset_in_bytes() + offset);
}
static RicochetFrame* from_frame(const frame& fr) {
address bp = (address) fr.fp();
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
rf->verify();
return rf;
}
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
};
// Additional helper methods for MethodHandles code generation:
public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
static void load_stack_move(MacroAssembler* _masm,
Register rdi_stack_move,
Register rcx_amh,
bool might_be_negative);
static void insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp);
static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp);
static void push_arg_slots(MacroAssembler* _masm,
Register rax_argslot,
RegisterOrConstant slot_count,
int skip_words_count,
Register rbx_temp, Register rdx_temp);
static void move_arg_slots_up(MacroAssembler* _masm,
Register rbx_bottom, // invariant
Address top_addr, // can use rax_temp
RegisterOrConstant positive_distance_in_slots,
Register rax_temp, Register rdx_temp);
static void move_arg_slots_down(MacroAssembler* _masm,
Address bottom_addr, // can use rax_temp
Register rbx_top, // invariant
RegisterOrConstant negative_distance_in_slots,
Register rax_temp, Register rdx_temp);
static void move_typed_arg(MacroAssembler* _masm,
BasicType type, bool is_element,
Address slot_dest, Address value_src,
Register rbx_temp, Register rdx_temp);
static void move_return_value(MacroAssembler* _masm, BasicType type,
Address return_slot);
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_argslots(MacroAssembler* _masm,
RegisterOrConstant argslot_count,
Register argslot_reg,
bool negate_argslot,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_stack_move(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int direction) NOT_DEBUG_RETURN;
static void verify_klass(MacroAssembler* _masm,
Register obj, KlassHandle klass,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
"reference is a MH");
}
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
static Register saved_last_sp_register() {
// Should be in sharedRuntime, not here.
return LP64_ONLY(r13) NOT_LP64(rsi);
}
......@@ -2253,6 +2253,31 @@ uint SharedRuntime::out_preserve_stack_slots() {
return 0;
}
//----------------------------generate_ricochet_blob---------------------------
void SharedRuntime::generate_ricochet_blob() {
if (!EnableInvokeDynamic) return; // leave it as a null
// allocate space for the code
ResourceMark rm;
// setup code generation tools
CodeBuffer buffer("ricochet_blob", 256, 256);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
// -------------
// make sure all code is generated
masm->flush();
// failed to generate?
if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
assert(false, "bad ricochet blob");
return;
}
_ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
}
//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
......@@ -2996,6 +3021,8 @@ void SharedRuntime::generate_stubs() {
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), true);
generate_ricochet_blob();
generate_deopt_blob();
#ifdef COMPILER2
generate_uncommon_trap_blob();
......
......@@ -2530,6 +2530,32 @@ uint SharedRuntime::out_preserve_stack_slots() {
}
//----------------------------generate_ricochet_blob---------------------------
void SharedRuntime::generate_ricochet_blob() {
if (!EnableInvokeDynamic) return; // leave it as a null
// allocate space for the code
ResourceMark rm;
// setup code generation tools
CodeBuffer buffer("ricochet_blob", 512, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
// -------------
// make sure all code is generated
masm->flush();
// failed to generate?
if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
assert(false, "bad ricochet blob");
return;
}
_ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
}
//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code
......@@ -3205,6 +3231,8 @@ void SharedRuntime::generate_stubs() {
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), true);
generate_ricochet_blob();
generate_deopt_blob();
#ifdef COMPILER2
......
......@@ -36,7 +36,7 @@ enum platform_dependent_constants {
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 10000
method_handles_adapters_code_size = 30000 DEBUG_ONLY(+ 10000)
};
class x86 {
......
......@@ -38,7 +38,7 @@ enum platform_dependent_constants {
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 40000
method_handles_adapters_code_size = 80000 DEBUG_ONLY(+ 120000)
};
class x86 {
......
......@@ -2602,6 +2602,7 @@ int java_lang_invoke_MethodType::ptype_count(oop mt) {
// Support for java_lang_invoke_MethodTypeForm
int java_lang_invoke_MethodTypeForm::_vmslots_offset;
int java_lang_invoke_MethodTypeForm::_vmlayout_offset;
int java_lang_invoke_MethodTypeForm::_erasedType_offset;
int java_lang_invoke_MethodTypeForm::_genericInvoker_offset;
......@@ -2609,6 +2610,7 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() {
klassOop k = SystemDictionary::MethodTypeForm_klass();
if (k != NULL) {
compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true);
compute_optional_offset(_vmlayout_offset, k, vmSymbols::vmlayout_name(), vmSymbols::object_signature());
compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true);
compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true);
if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value
......@@ -2617,9 +2619,31 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() {
int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
assert(_vmslots_offset > 0, "");
return mtform->int_field(_vmslots_offset);
}
oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
assert(_vmlayout_offset > 0, "");
return mtform->obj_field(_vmlayout_offset);
}
oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
oop previous = vmlayout(mtform);
if (previous != NULL) {
return previous; // someone else beat us to it
}
HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr<oop>(_vmlayout_offset);
OrderAccess::storestore(); // make sure our copy is fully committed
previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous);
if (previous != NULL) {
return previous; // someone else beat us to it
}
return cookie;
}
oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
return mtform->obj_field(_erasedType_offset);
......
......@@ -949,18 +949,19 @@ class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodH
OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument
OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another
OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive
OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper (NYI)
OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper
OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg)
OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg)
OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS)
OP_DROP_ARGS = 0x9, // remove one or more argument slots
OP_COLLECT_ARGS = 0xA, // combine one or more arguments into a varargs (NYI)
OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function
OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size)
OP_FLYBY = 0xC, // operate first on reified argument list (NYI)
OP_RICOCHET = 0xD, // run an adapter chain on the return value (NYI)
OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result
//OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists
CONV_OP_LIMIT = 0xE, // limit of CONV_OP enumeration
CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field
CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below
CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use
CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK
CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK
......@@ -1089,6 +1090,7 @@ class java_lang_invoke_MethodTypeForm: AllStatic {
private:
static int _vmslots_offset; // number of argument slots needed
static int _vmlayout_offset; // object describing internal calling sequence
static int _erasedType_offset; // erasedType = canonical MethodType
static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric
......@@ -1100,8 +1102,12 @@ class java_lang_invoke_MethodTypeForm: AllStatic {
static oop erasedType(oop mtform);
static oop genericInvoker(oop mtform);
static oop vmlayout(oop mtform);
static oop init_vmlayout(oop mtform, oop cookie);
// Accessors for code generation:
static int vmslots_offset_in_bytes() { return _vmslots_offset; }
static int vmlayout_offset_in_bytes() { return _vmlayout_offset; }
static int erasedType_offset_in_bytes() { return _erasedType_offset; }
static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; }
};
......
......@@ -2362,8 +2362,15 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
if (spe == NULL)
spe = invoke_method_table()->add_entry(index, hash, signature, name_id);
if (spe->property_oop() == NULL)
if (spe->property_oop() == NULL) {
spe->set_property_oop(m());
// Link m to his method type, if it is suitably generic.
oop mtform = java_lang_invoke_MethodType::form(mt());
if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform)
&& java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) {
java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m());
}
}
} else {
non_cached_result = m;
}
......
......@@ -341,6 +341,7 @@
template(vmtarget_name, "vmtarget") \
template(vmentry_name, "vmentry") \
template(vmslots_name, "vmslots") \
template(vmlayout_name, "vmlayout") \
template(vmindex_name, "vmindex") \
template(vmargslot_name, "vmargslot") \
template(flags_name, "flags") \
......@@ -393,6 +394,7 @@
template(void_signature, "V") \
template(byte_array_signature, "[B") \
template(char_array_signature, "[C") \
template(int_array_signature, "[I") \
template(object_void_signature, "(Ljava/lang/Object;)V") \
template(object_int_signature, "(Ljava/lang/Object;)I") \
template(object_boolean_signature, "(Ljava/lang/Object;)Z") \
......
......@@ -152,6 +152,32 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
}
void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
// Do not hold the CodeCache lock during name formatting.
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
if (stub != NULL) {
char stub_id[256];
assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
Disassembler::decode(stub->code_begin(), stub->code_end());
}
Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
const char* stub_name = name2;
if (name2[0] == '\0') stub_name = name1;
JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
}
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
......@@ -312,23 +338,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
}
// Do not hold the CodeCache lock during name formatting.
if (stub != NULL) {
char stub_id[256];
jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name);
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
Disassembler::decode(stub->code_begin(), stub->code_end());
}
Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(stub, "RuntimeStub - ", stub_name);
return stub;
}
......@@ -340,6 +350,50 @@ void* RuntimeStub::operator new(size_t s, unsigned size) {
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
//----------------------------------------------------------------------------------------------------
// Implementation of RicochetBlob
RicochetBlob::RicochetBlob(
CodeBuffer* cb,
int size,
int bounce_offset,
int exception_offset,
int frame_size
)
: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL)
{
_bounce_offset = bounce_offset;
_exception_offset = exception_offset;
}
RicochetBlob* RicochetBlob::create(
CodeBuffer* cb,
int bounce_offset,
int exception_offset,
int frame_size)
{
RicochetBlob* blob = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(RicochetBlob));
blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size);
}
trace_new_stub(blob, "RicochetBlob");
return blob;
}
//----------------------------------------------------------------------------------------------------
// Implementation of DeoptimizationBlob
......@@ -386,34 +440,12 @@ DeoptimizationBlob* DeoptimizationBlob::create(
frame_size);
}
// Do not hold the CodeCache lock during name formatting.
if (blob != NULL) {
char blob_id[256];
jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin());
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->code_begin(), blob->code_end());
}
Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "DeoptimizationBlob");
return blob;
}
void* DeoptimizationBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
//----------------------------------------------------------------------------------------------------
// Implementation of UncommonTrapBlob
......@@ -441,33 +473,12 @@ UncommonTrapBlob* UncommonTrapBlob::create(
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
}
// Do not hold the CodeCache lock during name formatting.
if (blob != NULL) {
char blob_id[256];
jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin());
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->code_begin(), blob->code_end());
}
Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "UncommonTrapBlob");
return blob;
}
void* UncommonTrapBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
#endif // COMPILER2
......@@ -498,33 +509,12 @@ ExceptionBlob* ExceptionBlob::create(
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
}
// We do not need to hold the CodeCache lock during name formatting
if (blob != NULL) {
char blob_id[256];
jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin());
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->code_begin(), blob->code_end());
}
Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "ExceptionBlob");
return blob;
}
void* ExceptionBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
#endif // COMPILER2
......@@ -554,35 +544,12 @@ SafepointBlob* SafepointBlob::create(
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
}
// We do not need to hold the CodeCache lock during name formatting.
if (blob != NULL) {
char blob_id[256];
jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin());
if (PrintStubCode) {
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->code_begin(), blob->code_end());
}
Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "SafepointBlob");
return blob;
}
void* SafepointBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
//----------------------------------------------------------------------------------------------------
// Verification and printing
......
......@@ -35,6 +35,7 @@
// Suptypes are:
// nmethod : Compiled Java methods (include method that calls to native code)
// RuntimeStub : Call to VM runtime methods
// RicochetBlob : Used for blocking MethodHandle adapters
// DeoptimizationBlob : Used for deoptimizatation
// ExceptionBlob : Used for stack unrolling
// SafepointBlob : Used to handle illegal instruction exceptions
......@@ -95,12 +96,13 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
void flush();
// Typing
virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; }
virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; }
virtual bool is_ricochet_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
......@@ -182,6 +184,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
virtual void print_on(outputStream* st) const;
virtual void print_value_on(outputStream* st) const;
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = "");
// Print the comment associated with offset on stream, if there is one
virtual void print_block_comment(outputStream* stream, address block_begin) {
intptr_t offset = (intptr_t)(block_begin - code_begin());
......@@ -318,7 +323,11 @@ class RuntimeStub: public CodeBlob {
class SingletonBlob: public CodeBlob {
friend class VMStructs;
public:
protected:
void* operator new(size_t s, unsigned size);
public:
SingletonBlob(
const char* name,
CodeBuffer* cb,
......@@ -340,6 +349,50 @@ class SingletonBlob: public CodeBlob {
};
//----------------------------------------------------------------------------------------------------
// RicochetBlob
// Holds an arbitrary argument list indefinitely while Java code executes recursively.
class RicochetBlob: public SingletonBlob {
friend class VMStructs;
private:
int _bounce_offset;
int _exception_offset;
// Creation support
RicochetBlob(
CodeBuffer* cb,
int size,
int bounce_offset,
int exception_offset,
int frame_size
);
public:
// Creation
static RicochetBlob* create(
CodeBuffer* cb,
int bounce_offset,
int exception_offset,
int frame_size
);
// Typing
bool is_ricochet_stub() const { return true; }
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
address bounce_addr() const { return code_begin() + _bounce_offset; }
address exception_addr() const { return code_begin() + _exception_offset; }
bool returns_to_bounce_addr(address pc) const {
address bounce_pc = bounce_addr();
return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc);
}
};
//----------------------------------------------------------------------------------------------------
// DeoptimizationBlob
......@@ -363,8 +416,6 @@ class DeoptimizationBlob: public SingletonBlob {
int frame_size
);
void* operator new(size_t s, unsigned size);
public:
// Creation
static DeoptimizationBlob* create(
......@@ -378,7 +429,6 @@ class DeoptimizationBlob: public SingletonBlob {
// Typing
bool is_deoptimization_stub() const { return true; }
const DeoptimizationBlob *as_deoptimization_stub() const { return this; }
bool exception_address_is_unpack_entry(address pc) const {
address unpack_pc = unpack();
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
......@@ -426,8 +476,6 @@ class UncommonTrapBlob: public SingletonBlob {
int frame_size
);
void* operator new(size_t s, unsigned size);
public:
// Creation
static UncommonTrapBlob* create(
......@@ -458,8 +506,6 @@ class ExceptionBlob: public SingletonBlob {
int frame_size
);
void* operator new(size_t s, unsigned size);
public:
// Creation
static ExceptionBlob* create(
......@@ -491,8 +537,6 @@ class SafepointBlob: public SingletonBlob {
int frame_size
);
void* operator new(size_t s, unsigned size);
public:
// Creation
static SafepointBlob* create(
......
......@@ -796,6 +796,7 @@ void CodeCache::print_internals() {
int nmethodCount = 0;
int runtimeStubCount = 0;
int adapterCount = 0;
int ricochetStubCount = 0;
int deoptimizationStubCount = 0;
int uncommonTrapStubCount = 0;
int bufferBlobCount = 0;
......@@ -840,6 +841,8 @@ void CodeCache::print_internals() {
}
} else if (cb->is_runtime_stub()) {
runtimeStubCount++;
} else if (cb->is_ricochet_stub()) {
ricochetStubCount++;
} else if (cb->is_deoptimization_stub()) {
deoptimizationStubCount++;
} else if (cb->is_uncommon_trap_stub()) {
......@@ -876,6 +879,7 @@ void CodeCache::print_internals() {
tty->print_cr("runtime_stubs: %d",runtimeStubCount);
tty->print_cr("adapters: %d",adapterCount);
tty->print_cr("buffer blobs: %d",bufferBlobCount);
tty->print_cr("ricochet_stubs: %d",ricochetStubCount);
tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
tty->print_cr("\nnmethod size distribution (non-zombie java)");
......
......@@ -283,10 +283,10 @@ void decode_env::print_address(address adr) {
st->print("Stub::%s", desc->name());
if (desc->begin() != adr)
st->print("%+d 0x%p",adr - desc->begin(), adr);
else if (WizardMode) st->print(" " INTPTR_FORMAT, adr);
else if (WizardMode) st->print(" " PTR_FORMAT, adr);
return;
}
st->print("Stub::<unknown> " INTPTR_FORMAT, adr);
st->print("Stub::<unknown> " PTR_FORMAT, adr);
return;
}
......@@ -314,8 +314,8 @@ void decode_env::print_address(address adr) {
}
}
// Fall through to a simple numeral.
st->print(INTPTR_FORMAT, (intptr_t)adr);
// Fall through to a simple (hexadecimal) numeral.
st->print(PTR_FORMAT, adr);
}
void decode_env::print_insn_labels() {
......@@ -326,7 +326,7 @@ void decode_env::print_insn_labels() {
cb->print_block_comment(st, p);
}
if (_print_pc) {
st->print(" " INTPTR_FORMAT ": ", (intptr_t) p);
st->print(" " PTR_FORMAT ": ", p);
}
}
......@@ -432,7 +432,7 @@ address decode_env::decode_instructions(address start, address end) {
void Disassembler::decode(CodeBlob* cb, outputStream* st) {
if (!load_library()) return;
decode_env env(cb, st);
env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb);
env.output()->print_cr("Decoding CodeBlob " PTR_FORMAT, cb);
env.decode_instructions(cb->code_begin(), cb->code_end());
}
......@@ -446,7 +446,7 @@ void Disassembler::decode(address start, address end, outputStream* st) {
void Disassembler::decode(nmethod* nm, outputStream* st) {
if (!load_library()) return;
decode_env env(nm, st);
env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm);
env.output()->print_cr("Decoding compiled method " PTR_FORMAT ":", nm);
env.output()->print_cr("Code:");
#ifdef SHARK
......@@ -478,9 +478,9 @@ void Disassembler::decode(nmethod* nm, outputStream* st) {
int offset = 0;
for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) {
if ((offset % 8) == 0) {
env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p));
env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p));
} else {
env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT, (intptr_t) p, offset, *((int32_t*) p));
env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT, p, offset, *((int32_t*) p));
}
}
}
......
......@@ -3158,6 +3158,9 @@ inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread,
if (fr->is_entry_frame()) {
last_entry_frame = fr;
}
if (fr->is_ricochet_frame()) {
fr->oops_ricochet_do(blk, vf->register_map());
}
}
vf = vf->sender();
......
......@@ -409,6 +409,11 @@ MethodHandleWalker::walk(TRAPS) {
break;
}
case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: { //NYI, may GC
lose("unimplemented", CHECK_(empty));
break;
}
case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: {
klassOop array_klass_oop = NULL;
BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(),
......@@ -452,9 +457,18 @@ MethodHandleWalker::walk(TRAPS) {
Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
// Spread out the array elements.
Bytecodes::Code aload_op = Bytecodes::_aaload;
if (element_type != T_OBJECT) {
lose("primitive array NYI", CHECK_(empty));
Bytecodes::Code aload_op = Bytecodes::_nop;
switch (element_type) {
case T_INT: aload_op = Bytecodes::_iaload; break;
case T_LONG: aload_op = Bytecodes::_laload; break;
case T_FLOAT: aload_op = Bytecodes::_faload; break;
case T_DOUBLE: aload_op = Bytecodes::_daload; break;
case T_OBJECT: aload_op = Bytecodes::_aaload; break;
case T_BOOLEAN: // fall through:
case T_BYTE: aload_op = Bytecodes::_baload; break;
case T_CHAR: aload_op = Bytecodes::_caload; break;
case T_SHORT: aload_op = Bytecodes::_saload; break;
default: lose("primitive array NYI", CHECK_(empty));
}
int ap = arg_slot;
for (int i = 0; i < spread_length; i++) {
......@@ -467,11 +481,6 @@ MethodHandleWalker::walk(TRAPS) {
break;
}
case java_lang_invoke_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code
case java_lang_invoke_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code
lose("unimplemented", CHECK_(empty));
break;
default:
lose("bad adapter conversion", CHECK_(empty));
break;
......
......@@ -33,6 +33,7 @@
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
......@@ -169,6 +170,11 @@ void frame::set_pc(address newpc ) {
}
// type testers
bool frame::is_ricochet_frame() const {
RicochetBlob* rcb = SharedRuntime::ricochet_blob();
return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc));
}
bool frame::is_deoptimized_frame() const {
assert(_deopt_state != unknown, "not answerable");
return _deopt_state == is_deoptimized;
......@@ -341,12 +347,18 @@ frame frame::java_sender() const {
frame frame::real_sender(RegisterMap* map) const {
frame result = sender(map);
while (result.is_runtime_frame()) {
while (result.is_runtime_frame() ||
result.is_ricochet_frame()) {
result = result.sender(map);
}
return result;
}
frame frame::sender_for_ricochet_frame(RegisterMap* map) const {
assert(is_ricochet_frame(), "");
return MethodHandles::ricochet_frame_sender(*this, map);
}
// Note: called by profiler - NOT for current thread
frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
// If we don't recognize this frame, walk back up the stack until we do
......@@ -529,6 +541,7 @@ jint frame::interpreter_frame_expression_stack_size() const {
const char* frame::print_name() const {
if (is_native_frame()) return "Native";
if (is_interpreted_frame()) return "Interpreted";
if (is_ricochet_frame()) return "Ricochet";
if (is_compiled_frame()) {
if (is_deoptimized_frame()) return "Deoptimized";
return "Compiled";
......@@ -715,6 +728,8 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
} else if (_cb->is_deoptimization_stub()) {
st->print("v ~DeoptimizationBlob");
} else if (_cb->is_ricochet_stub()) {
st->print("v ~RichochetBlob");
} else if (_cb->is_exception_stub()) {
st->print("v ~ExceptionBlob");
} else if (_cb->is_safepoint_stub()) {
......@@ -978,6 +993,9 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver,
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
assert(_cb != NULL, "sanity check");
if (_cb == SharedRuntime::ricochet_blob()) {
oops_ricochet_do(f, reg_map);
}
if (_cb->oop_maps() != NULL) {
OopMapSet::oops_do(this, reg_map, f);
......@@ -996,6 +1014,11 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const Register
cf->do_code_blob(_cb);
}
void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) {
assert(is_ricochet_frame(), "");
MethodHandles::ricochet_frame_oops_do(*this, f, map);
}
class CompiledArgumentOopFinder: public SignatureInfo {
protected:
OopClosure* _f;
......
......@@ -135,6 +135,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
bool is_interpreted_frame() const;
bool is_java_frame() const;
bool is_entry_frame() const; // Java frame called from C?
bool is_ricochet_frame() const;
bool is_native_frame() const;
bool is_runtime_frame() const;
bool is_compiled_frame() const;
......@@ -175,6 +176,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
// Helper methods for better factored code in frame::sender
frame sender_for_compiled_frame(RegisterMap* map) const;
frame sender_for_entry_frame(RegisterMap* map) const;
frame sender_for_ricochet_frame(RegisterMap* map) const;
frame sender_for_interpreter_frame(RegisterMap* map) const;
frame sender_for_native_frame(RegisterMap* map) const;
......@@ -400,6 +402,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
// Oops-do's
void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
void oops_ricochet_do(OopClosure* f, const RegisterMap* map);
private:
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
......
......@@ -3708,6 +3708,10 @@ class CommandLineFlags {
diagnostic(bool, OptimizeMethodHandles, true, \
"when constructing method handles, try to improve them") \
\
diagnostic(bool, UseRicochetFrames, true, \
"use ricochet stack frames for method handle combination, " \
"if the platform supports them") \
\
experimental(bool, TrustFinalNonStaticFields, false, \
"trust final non-static declarations for constant folding") \
\
......
......@@ -88,6 +88,8 @@ HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
char*, int, char*, int, char*, int);
RicochetBlob* SharedRuntime::_ricochet_blob = NULL;
// Implementation of SharedRuntime
#ifndef PRODUCT
......@@ -460,6 +462,10 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre
if (Interpreter::contains(return_address)) {
return Interpreter::rethrow_exception_entry();
}
// Ricochet frame unwind code
if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) {
return SharedRuntime::ricochet_blob()->exception_addr();
}
guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
......@@ -1174,6 +1180,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread*
assert(stub_frame.is_runtime_frame(), "sanity check");
frame caller_frame = stub_frame.sender(&reg_map);
assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
assert(!caller_frame.is_ricochet_frame(), "unexpected frame");
#endif /* ASSERT */
methodHandle callee_method;
......@@ -1222,6 +1229,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
if (caller_frame.is_interpreted_frame() ||
caller_frame.is_entry_frame() ||
caller_frame.is_ricochet_frame() ||
is_mh_invoke_via_adapter) {
methodOop callee = thread->callee_target();
guarantee(callee != NULL && callee->is_method(), "bad handshake");
......
......@@ -58,6 +58,8 @@ class SharedRuntime: AllStatic {
static RuntimeStub* _resolve_virtual_call_blob;
static RuntimeStub* _resolve_static_call_blob;
static RicochetBlob* _ricochet_blob;
static SafepointBlob* _polling_page_safepoint_handler_blob;
static SafepointBlob* _polling_page_return_handler_blob;
#ifdef COMPILER2
......@@ -213,6 +215,16 @@ class SharedRuntime: AllStatic {
return _resolve_static_call_blob->entry_point();
}
static RicochetBlob* ricochet_blob() {
#ifdef X86
// Currently only implemented on x86
assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops");
#endif
return _ricochet_blob;
}
static void generate_ricochet_blob();
static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; }
static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; }
......
......@@ -1649,6 +1649,9 @@ int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
if (fr->is_entry_frame()) {
last_entry_frame = fr;
}
if (fr->is_ricochet_frame()) {
fr->oops_ricochet_do(&blk, vf->register_map());
}
}
vf = vf->sender();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册