Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
2b587c38
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2b587c38
编写于
5月 06, 2011
作者:
N
never
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
6939861: JVM should handle more conversion operations
Reviewed-by: twisti, jrose
上级
bb8afa46
变更
29
隐藏空白更改
内联
并排
Showing
29 changed file
with
3003 addition
and
637 deletion
+3003
-637
src/cpu/x86/vm/assembler_x86.hpp
src/cpu/x86/vm/assembler_x86.hpp
+27
-1
src/cpu/x86/vm/frame_x86.cpp
src/cpu/x86/vm/frame_x86.cpp
+49
-72
src/cpu/x86/vm/frame_x86.hpp
src/cpu/x86/vm/frame_x86.hpp
+4
-0
src/cpu/x86/vm/frame_x86.inline.hpp
src/cpu/x86/vm/frame_x86.inline.hpp
+1
-0
src/cpu/x86/vm/interpreter_x86.hpp
src/cpu/x86/vm/interpreter_x86.hpp
+3
-1
src/cpu/x86/vm/methodHandles_x86.cpp
src/cpu/x86/vm/methodHandles_x86.cpp
+1500
-294
src/cpu/x86/vm/methodHandles_x86.hpp
src/cpu/x86/vm/methodHandles_x86.hpp
+292
-0
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+27
-0
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+28
-0
src/cpu/x86/vm/stubRoutines_x86_32.hpp
src/cpu/x86/vm/stubRoutines_x86_32.hpp
+1
-1
src/cpu/x86/vm/stubRoutines_x86_64.hpp
src/cpu/x86/vm/stubRoutines_x86_64.hpp
+1
-1
src/share/vm/classfile/javaClasses.cpp
src/share/vm/classfile/javaClasses.cpp
+24
-0
src/share/vm/classfile/javaClasses.hpp
src/share/vm/classfile/javaClasses.hpp
+10
-4
src/share/vm/classfile/systemDictionary.cpp
src/share/vm/classfile/systemDictionary.cpp
+8
-1
src/share/vm/classfile/vmSymbols.hpp
src/share/vm/classfile/vmSymbols.hpp
+2
-0
src/share/vm/code/codeBlob.cpp
src/share/vm/code/codeBlob.cpp
+75
-108
src/share/vm/code/codeBlob.hpp
src/share/vm/code/codeBlob.hpp
+60
-16
src/share/vm/code/codeCache.cpp
src/share/vm/code/codeCache.cpp
+4
-0
src/share/vm/compiler/disassembler.cpp
src/share/vm/compiler/disassembler.cpp
+9
-9
src/share/vm/prims/jvmtiTagMap.cpp
src/share/vm/prims/jvmtiTagMap.cpp
+3
-0
src/share/vm/prims/methodHandleWalk.cpp
src/share/vm/prims/methodHandleWalk.cpp
+17
-8
src/share/vm/prims/methodHandles.cpp
src/share/vm/prims/methodHandles.cpp
+473
-87
src/share/vm/prims/methodHandles.hpp
src/share/vm/prims/methodHandles.hpp
+331
-33
src/share/vm/runtime/frame.cpp
src/share/vm/runtime/frame.cpp
+24
-1
src/share/vm/runtime/frame.hpp
src/share/vm/runtime/frame.hpp
+3
-0
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+4
-0
src/share/vm/runtime/sharedRuntime.cpp
src/share/vm/runtime/sharedRuntime.cpp
+8
-0
src/share/vm/runtime/sharedRuntime.hpp
src/share/vm/runtime/sharedRuntime.hpp
+12
-0
src/share/vm/services/heapDumper.cpp
src/share/vm/services/heapDumper.cpp
+3
-0
未找到文件。
src/cpu/x86/vm/assembler_x86.hpp
浏览文件 @
2b587c38
...
...
@@ -234,6 +234,20 @@ class Address VALUE_OBJ_CLASS_SPEC {
a
.
_disp
+=
disp
;
return
a
;
}
Address
plus_disp
(
RegisterOrConstant
disp
,
ScaleFactor
scale
=
times_1
)
const
{
Address
a
=
(
*
this
);
a
.
_disp
+=
disp
.
constant_or_zero
()
*
scale_size
(
scale
);
if
(
disp
.
is_register
())
{
assert
(
!
a
.
index
()
->
is_valid
(),
"competing indexes"
);
a
.
_index
=
disp
.
as_register
();
a
.
_scale
=
scale
;
}
return
a
;
}
bool
is_same_address
(
Address
a
)
const
{
// disregard _rspec
return
_base
==
a
.
_base
&&
_disp
==
a
.
_disp
&&
_index
==
a
.
_index
&&
_scale
==
a
.
_scale
;
}
// The following two overloads are used in connection with the
// ByteSize type (see sizes.hpp). They simplify the use of
...
...
@@ -2029,6 +2043,10 @@ class MacroAssembler: public Assembler {
void
addptr
(
Register
dst
,
Address
src
)
{
LP64_ONLY
(
addq
(
dst
,
src
))
NOT_LP64
(
addl
(
dst
,
src
));
}
void
addptr
(
Register
dst
,
int32_t
src
);
void
addptr
(
Register
dst
,
Register
src
);
void
addptr
(
Register
dst
,
RegisterOrConstant
src
)
{
if
(
src
.
is_constant
())
addptr
(
dst
,
(
int
)
src
.
as_constant
());
else
addptr
(
dst
,
src
.
as_register
());
}
void
andptr
(
Register
dst
,
int32_t
src
);
void
andptr
(
Register
src1
,
Register
src2
)
{
LP64_ONLY
(
andq
(
src1
,
src2
))
NOT_LP64
(
andl
(
src1
,
src2
))
;
}
...
...
@@ -2090,7 +2108,10 @@ class MacroAssembler: public Assembler {
void
subptr
(
Register
dst
,
Address
src
)
{
LP64_ONLY
(
subq
(
dst
,
src
))
NOT_LP64
(
subl
(
dst
,
src
));
}
void
subptr
(
Register
dst
,
int32_t
src
);
void
subptr
(
Register
dst
,
Register
src
);
void
subptr
(
Register
dst
,
RegisterOrConstant
src
)
{
if
(
src
.
is_constant
())
subptr
(
dst
,
(
int
)
src
.
as_constant
());
else
subptr
(
dst
,
src
.
as_register
());
}
void
sbbptr
(
Address
dst
,
int32_t
src
)
{
LP64_ONLY
(
sbbq
(
dst
,
src
))
NOT_LP64
(
sbbl
(
dst
,
src
));
}
void
sbbptr
(
Register
dst
,
int32_t
src
)
{
LP64_ONLY
(
sbbq
(
dst
,
src
))
NOT_LP64
(
sbbl
(
dst
,
src
));
}
...
...
@@ -2288,6 +2309,11 @@ public:
void
movptr
(
Address
dst
,
Register
src
);
void
movptr
(
Register
dst
,
RegisterOrConstant
src
)
{
if
(
src
.
is_constant
())
movptr
(
dst
,
src
.
as_constant
());
else
movptr
(
dst
,
src
.
as_register
());
}
#ifdef _LP64
// Generally the next two are only used for moving NULL
// Although there are situations in initializing the mark word where
...
...
src/cpu/x86/vm/frame_x86.cpp
浏览文件 @
2b587c38
...
...
@@ -339,7 +339,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
return
fr
;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
...
...
@@ -361,41 +360,35 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame
frame
::
sender_for_interpreter_frame
(
RegisterMap
*
map
)
const
{
// SP is the raw SP from the sender after adapter or interpreter
// extension.
intptr_t
*
sender_sp
=
this
->
sender_sp
();
// This is the sp before any possible extension (adapter/locals).
intptr_t
*
unextended_sp
=
interpreter_frame_sender_sp
();
// Stored FP.
intptr_t
*
saved_fp
=
link
();
address
sender_pc
=
this
->
sender_pc
();
CodeBlob
*
sender_cb
=
CodeCache
::
find_blob_unsafe
(
sender_pc
);
assert
(
sender_cb
,
"sanity"
);
nmethod
*
sender_nm
=
sender_cb
->
as_nmethod_or_null
();
// frame::adjust_unextended_sp
void
frame
::
adjust_unextended_sp
()
{
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
nmethod
*
sender_nm
=
(
_cb
==
NULL
)
?
NULL
:
_cb
->
as_nmethod_or_null
();
if
(
sender_nm
!=
NULL
)
{
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if
(
sender_nm
->
is_deopt_mh_entry
(
sender
_pc
))
{
DEBUG_ONLY
(
verify_deopt_mh_original_pc
(
sender_nm
,
saved
_fp
));
unextended_sp
=
saved
_fp
;
if
(
sender_nm
->
is_deopt_mh_entry
(
_pc
))
{
DEBUG_ONLY
(
verify_deopt_mh_original_pc
(
sender_nm
,
_fp
));
_unextended_sp
=
_fp
;
}
else
if
(
sender_nm
->
is_deopt_entry
(
sender
_pc
))
{
DEBUG_ONLY
(
verify_deopt_original_pc
(
sender_nm
,
unextended_sp
));
else
if
(
sender_nm
->
is_deopt_entry
(
_pc
))
{
DEBUG_ONLY
(
verify_deopt_original_pc
(
sender_nm
,
_
unextended_sp
));
}
else
if
(
sender_nm
->
is_method_handle_return
(
sender
_pc
))
{
unextended_sp
=
saved
_fp
;
else
if
(
sender_nm
->
is_method_handle_return
(
_pc
))
{
_unextended_sp
=
_fp
;
}
}
}
//------------------------------------------------------------------------------
// frame::update_map_with_saved_link
void
frame
::
update_map_with_saved_link
(
RegisterMap
*
map
,
intptr_t
**
link_addr
)
{
// The interpreter and compiler(s) always save EBP/RBP in a known
// location on entry. We must record where that location is
// so this if EBP/RBP was live on callout from c2 we can find
...
...
@@ -404,22 +397,36 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// Since the interpreter always saves EBP/RBP if we record where it is then
// we don't have to always save EBP/RBP on entry and exit to c2 compiled
// code, on entry will be enough.
#ifdef COMPILER2
if
(
map
->
update_map
())
{
map
->
set_location
(
rbp
->
as_VMReg
(),
(
address
)
addr_at
(
link_offset
));
map
->
set_location
(
rbp
->
as_VMReg
(),
(
address
)
link_addr
);
#ifdef AMD64
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if
(
true
)
{
map
->
set_location
(
rbp
->
as_VMReg
()
->
next
(),
(
address
)
addr_at
(
link_offset
)
);
}
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if
(
true
)
{
map
->
set_location
(
rbp
->
as_VMReg
()
->
next
(),
(
address
)
link_addr
);
}
#endif // AMD64
}
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame
frame
::
sender_for_interpreter_frame
(
RegisterMap
*
map
)
const
{
// SP is the raw SP from the sender after adapter or interpreter
// extension.
intptr_t
*
sender_sp
=
this
->
sender_sp
();
// This is the sp before any possible extension (adapter/locals).
intptr_t
*
unextended_sp
=
interpreter_frame_sender_sp
();
#ifdef COMPILER2
if
(
map
->
update_map
())
{
update_map_with_saved_link
(
map
,
(
intptr_t
**
)
addr_at
(
link_offset
));
}
#endif // COMPILER2
return
frame
(
sender_sp
,
unextended_sp
,
saved_fp
,
sender_pc
);
return
frame
(
sender_sp
,
unextended_sp
,
link
(),
sender_pc
()
);
}
...
...
@@ -427,6 +434,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// frame::sender_for_compiled_frame
frame
frame
::
sender_for_compiled_frame
(
RegisterMap
*
map
)
const
{
assert
(
map
!=
NULL
,
"map must be set"
);
assert
(
!
is_ricochet_frame
(),
"caller must handle this"
);
// frame owned by optimizing compiler
assert
(
_cb
->
frame_size
()
>=
0
,
"must have non-zero frame size"
);
...
...
@@ -438,31 +446,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// This is the saved value of EBP which may or may not really be an FP.
// It is only an FP if the sender is an interpreter frame (or C1?).
intptr_t
*
saved_fp
=
(
intptr_t
*
)
*
(
sender_sp
-
frame
::
sender_sp_offset
);
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
CodeBlob
*
sender_cb
=
CodeCache
::
find_blob_unsafe
(
sender_pc
);
assert
(
sender_cb
,
"sanity"
);
nmethod
*
sender_nm
=
sender_cb
->
as_nmethod_or_null
();
if
(
sender_nm
!=
NULL
)
{
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if
(
sender_nm
->
is_deopt_mh_entry
(
sender_pc
))
{
DEBUG_ONLY
(
verify_deopt_mh_original_pc
(
sender_nm
,
saved_fp
));
unextended_sp
=
saved_fp
;
}
else
if
(
sender_nm
->
is_deopt_entry
(
sender_pc
))
{
DEBUG_ONLY
(
verify_deopt_original_pc
(
sender_nm
,
unextended_sp
));
}
else
if
(
sender_nm
->
is_method_handle_return
(
sender_pc
))
{
unextended_sp
=
saved_fp
;
}
}
intptr_t
**
saved_fp_addr
=
(
intptr_t
**
)
(
sender_sp
-
frame
::
sender_sp_offset
);
if
(
map
->
update_map
())
{
// Tell GC to use argument oopmaps for some runtime stubs that need it.
...
...
@@ -472,23 +456,15 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
if
(
_cb
->
oop_maps
()
!=
NULL
)
{
OopMapSet
::
update_register_map
(
this
,
map
);
}
// Since the prolog does the save and restore of EBP there is no oopmap
// for it so we must fill in its location as if there was an oopmap entry
// since if our caller was compiled code there could be live jvm state in it.
map
->
set_location
(
rbp
->
as_VMReg
(),
(
address
)
(
sender_sp
-
frame
::
sender_sp_offset
));
#ifdef AMD64
// this is weird "H" ought to be at a higher address however the
// oopMaps seems to have the "H" regs at the same address and the
// vanilla register.
// XXXX make this go away
if
(
true
)
{
map
->
set_location
(
rbp
->
as_VMReg
()
->
next
(),
(
address
)
(
sender_sp
-
frame
::
sender_sp_offset
));
}
#endif // AMD64
update_map_with_saved_link
(
map
,
saved_fp_addr
);
}
assert
(
sender_sp
!=
sp
(),
"must have changed"
);
return
frame
(
sender_sp
,
unextended_sp
,
saved_fp
,
sender_pc
);
return
frame
(
sender_sp
,
unextended_sp
,
*
saved_fp_addr
,
sender_pc
);
}
...
...
@@ -502,6 +478,7 @@ frame frame::sender(RegisterMap* map) const {
if
(
is_entry_frame
())
return
sender_for_entry_frame
(
map
);
if
(
is_interpreted_frame
())
return
sender_for_interpreter_frame
(
map
);
assert
(
_cb
==
CodeCache
::
find_blob
(
pc
()),
"Must be the same"
);
if
(
is_ricochet_frame
())
return
sender_for_ricochet_frame
(
map
);
if
(
_cb
!=
NULL
)
{
return
sender_for_compiled_frame
(
map
);
...
...
src/cpu/x86/vm/frame_x86.hpp
浏览文件 @
2b587c38
...
...
@@ -164,6 +164,7 @@
// original sp we use that convention.
intptr_t
*
_unextended_sp
;
void
adjust_unextended_sp
();
intptr_t
*
ptr_at_addr
(
int
offset
)
const
{
return
(
intptr_t
*
)
addr_at
(
offset
);
...
...
@@ -197,6 +198,9 @@
// expression stack tos if we are nested in a java call
intptr_t
*
interpreter_frame_last_sp
()
const
;
// helper to update a map with callee-saved RBP
static
void
update_map_with_saved_link
(
RegisterMap
*
map
,
intptr_t
**
link_addr
);
#ifndef CC_INTERP
// deoptimization support
void
interpreter_frame_set_last_sp
(
intptr_t
*
sp
);
...
...
src/cpu/x86/vm/frame_x86.inline.hpp
浏览文件 @
2b587c38
...
...
@@ -62,6 +62,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_pc
=
pc
;
assert
(
pc
!=
NULL
,
"no pc?"
);
_cb
=
CodeCache
::
find_blob
(
pc
);
adjust_unextended_sp
();
address
original_pc
=
nmethod
::
get_deopt_original_pc
(
this
);
if
(
original_pc
!=
NULL
)
{
...
...
src/cpu/x86/vm/interpreter_x86.hpp
浏览文件 @
2b587c38
...
...
@@ -26,7 +26,9 @@
#define CPU_X86_VM_INTERPRETER_X86_HPP
public
:
static
Address
::
ScaleFactor
stackElementScale
()
{
return
Address
::
times_4
;
}
static
Address
::
ScaleFactor
stackElementScale
()
{
return
NOT_LP64
(
Address
::
times_4
)
LP64_ONLY
(
Address
::
times_8
);
}
// Offset from rsp (which points to the last stack element)
static
int
expr_offset_in_bytes
(
int
i
)
{
return
stackElementSize
*
i
;
}
...
...
src/cpu/x86/vm/methodHandles_x86.cpp
浏览文件 @
2b587c38
...
...
@@ -69,23 +69,475 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
return
me
;
}
// stack walking support
frame
MethodHandles
::
ricochet_frame_sender
(
const
frame
&
fr
,
RegisterMap
*
map
)
{
RicochetFrame
*
f
=
RicochetFrame
::
from_frame
(
fr
);
if
(
map
->
update_map
())
frame
::
update_map_with_saved_link
(
map
,
&
f
->
_sender_link
);
return
frame
(
f
->
extended_sender_sp
(),
f
->
exact_sender_sp
(),
f
->
sender_link
(),
f
->
sender_pc
());
}
void
MethodHandles
::
ricochet_frame_oops_do
(
const
frame
&
fr
,
OopClosure
*
blk
,
const
RegisterMap
*
reg_map
)
{
RicochetFrame
*
f
=
RicochetFrame
::
from_frame
(
fr
);
// pick up the argument type descriptor:
Thread
*
thread
=
Thread
::
current
();
Handle
cookie
(
thread
,
f
->
compute_saved_args_layout
(
true
,
true
));
// process fixed part
blk
->
do_oop
((
oop
*
)
f
->
saved_target_addr
());
blk
->
do_oop
((
oop
*
)
f
->
saved_args_layout_addr
());
// process variable arguments:
if
(
cookie
.
is_null
())
return
;
// no arguments to describe
// the cookie is actually the invokeExact method for my target
// his argument signature is what I'm interested in
assert
(
cookie
->
is_method
(),
""
);
methodHandle
invoker
(
thread
,
methodOop
(
cookie
()));
assert
(
invoker
->
name
()
==
vmSymbols
::
invokeExact_name
(),
"must be this kind of method"
);
assert
(
!
invoker
->
is_static
(),
"must have MH argument"
);
int
slot_count
=
invoker
->
size_of_parameters
();
assert
(
slot_count
>=
1
,
"must include 'this'"
);
intptr_t
*
base
=
f
->
saved_args_base
();
intptr_t
*
retval
=
NULL
;
if
(
f
->
has_return_value_slot
())
retval
=
f
->
return_value_slot_addr
();
int
slot_num
=
slot_count
;
intptr_t
*
loc
=
&
base
[
slot_num
-=
1
];
//blk->do_oop((oop*) loc); // original target, which is irrelevant
int
arg_num
=
0
;
for
(
SignatureStream
ss
(
invoker
->
signature
());
!
ss
.
is_done
();
ss
.
next
())
{
if
(
ss
.
at_return_type
())
continue
;
BasicType
ptype
=
ss
.
type
();
if
(
ptype
==
T_ARRAY
)
ptype
=
T_OBJECT
;
// fold all refs to T_OBJECT
assert
(
ptype
>=
T_BOOLEAN
&&
ptype
<=
T_OBJECT
,
"not array or void"
);
loc
=
&
base
[
slot_num
-=
type2size
[
ptype
]];
bool
is_oop
=
(
ptype
==
T_OBJECT
&&
loc
!=
retval
);
if
(
is_oop
)
blk
->
do_oop
((
oop
*
)
loc
);
arg_num
+=
1
;
}
assert
(
slot_num
==
0
,
"must have processed all the arguments"
);
}
oop
MethodHandles
::
RicochetFrame
::
compute_saved_args_layout
(
bool
read_cache
,
bool
write_cache
)
{
oop
cookie
=
NULL
;
if
(
read_cache
)
{
cookie
=
saved_args_layout
();
if
(
cookie
!=
NULL
)
return
cookie
;
}
oop
target
=
saved_target
();
oop
mtype
=
java_lang_invoke_MethodHandle
::
type
(
target
);
oop
mtform
=
java_lang_invoke_MethodType
::
form
(
mtype
);
cookie
=
java_lang_invoke_MethodTypeForm
::
vmlayout
(
mtform
);
if
(
write_cache
)
{
(
*
saved_args_layout_addr
())
=
cookie
;
}
return
cookie
;
}
void
MethodHandles
::
RicochetFrame
::
generate_ricochet_blob
(
MacroAssembler
*
_masm
,
// output params:
int
*
frame_size_in_words
,
int
*
bounce_offset
,
int
*
exception_offset
)
{
(
*
frame_size_in_words
)
=
RicochetFrame
::
frame_size_in_bytes
()
/
wordSize
;
address
start
=
__
pc
();
#ifdef ASSERT
__
hlt
();
__
hlt
();
__
hlt
();
// here's a hint of something special:
__
push
(
MAGIC_NUMBER_1
);
__
push
(
MAGIC_NUMBER_2
);
#endif //ASSERT
__
hlt
();
// not reached
// A return PC has just been popped from the stack.
// Return values are in registers.
// The ebp points into the RicochetFrame, which contains
// a cleanup continuation we must return to.
(
*
bounce_offset
)
=
__
pc
()
-
start
;
BLOCK_COMMENT
(
"ricochet_blob.bounce"
);
if
(
VerifyMethodHandles
)
RicochetFrame
::
verify_clean
(
_masm
);
trace_method_handle
(
_masm
,
"ricochet_blob.bounce"
);
__
jmp
(
frame_address
(
continuation_offset_in_bytes
()));
__
hlt
();
DEBUG_ONLY
(
__
push
(
MAGIC_NUMBER_2
));
(
*
exception_offset
)
=
__
pc
()
-
start
;
BLOCK_COMMENT
(
"ricochet_blob.exception"
);
// compare this to Interpreter::rethrow_exception_entry, which is parallel code
// for example, see TemplateInterpreterGenerator::generate_throw_exception
// Live registers in:
// rax: exception
// rdx: return address/pc that threw exception (ignored, always equal to bounce addr)
__
verify_oop
(
rax
);
// no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed
// Take down the frame.
// Cf. InterpreterMacroAssembler::remove_activation.
leave_ricochet_frame
(
_masm
,
/*rcx_recv=*/
noreg
,
saved_last_sp_register
(),
/*sender_pc_reg=*/
rdx
);
// In between activations - previous activation type unknown yet
// compute continuation point - the continuation point expects the
// following registers set up:
//
// rax: exception
// rdx: return address/pc that threw exception
// rsp: expression stack of caller
// rbp: ebp of caller
__
push
(
rax
);
// save exception
__
push
(
rdx
);
// save return address
Register
thread_reg
=
LP64_ONLY
(
r15_thread
)
NOT_LP64
(
rdi
);
NOT_LP64
(
__
get_thread
(
thread_reg
));
__
call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
SharedRuntime
::
exception_handler_for_return_address
),
thread_reg
,
rdx
);
__
mov
(
rbx
,
rax
);
// save exception handler
__
pop
(
rdx
);
// restore return address
__
pop
(
rax
);
// restore exception
__
jmp
(
rbx
);
// jump to exception
// handler of caller
}
void
MethodHandles
::
RicochetFrame
::
enter_ricochet_frame
(
MacroAssembler
*
_masm
,
Register
rcx_recv
,
Register
rax_argv
,
address
return_handler
,
Register
rbx_temp
)
{
const
Register
saved_last_sp
=
saved_last_sp_register
();
Address
rcx_mh_vmtarget
(
rcx_recv
,
java_lang_invoke_MethodHandle
::
vmtarget_offset_in_bytes
()
);
Address
rcx_amh_conversion
(
rcx_recv
,
java_lang_invoke_AdapterMethodHandle
::
conversion_offset_in_bytes
()
);
// Push the RicochetFrame a word at a time.
// This creates something similar to an interpreter frame.
// Cf. TemplateInterpreterGenerator::generate_fixed_frame.
BLOCK_COMMENT
(
"push RicochetFrame {"
);
DEBUG_ONLY
(
int
rfo
=
(
int
)
sizeof
(
RicochetFrame
));
assert
((
rfo
-=
wordSize
)
==
RicochetFrame
::
sender_pc_offset_in_bytes
(),
""
);
#define RF_FIELD(push_value, name) \
{ push_value; \
assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); }
RF_FIELD
(
__
push
(
rbp
),
sender_link
);
RF_FIELD
(
__
push
(
saved_last_sp
),
exact_sender_sp
);
// rsi/r13
RF_FIELD
(
__
pushptr
(
rcx_amh_conversion
),
conversion
);
RF_FIELD
(
__
push
(
rax_argv
),
saved_args_base
);
// can be updated if args are shifted
RF_FIELD
(
__
push
((
int32_t
)
NULL_WORD
),
saved_args_layout
);
// cache for GC layout cookie
if
(
UseCompressedOops
)
{
__
load_heap_oop
(
rbx_temp
,
rcx_mh_vmtarget
);
RF_FIELD
(
__
push
(
rbx_temp
),
saved_target
);
}
else
{
RF_FIELD
(
__
pushptr
(
rcx_mh_vmtarget
),
saved_target
);
}
__
lea
(
rbx_temp
,
ExternalAddress
(
return_handler
));
RF_FIELD
(
__
push
(
rbx_temp
),
continuation
);
#undef RF_FIELD
assert
(
rfo
==
0
,
"fully initialized the RicochetFrame"
);
// compute new frame pointer:
__
lea
(
rbp
,
Address
(
rsp
,
RicochetFrame
::
sender_link_offset_in_bytes
()));
// Push guard word #1 in debug mode.
DEBUG_ONLY
(
__
push
((
int32_t
)
RicochetFrame
::
MAGIC_NUMBER_1
));
// For debugging, leave behind an indication of which stub built this frame.
DEBUG_ONLY
({
Label
L
;
__
call
(
L
,
relocInfo
::
none
);
__
bind
(
L
);
});
BLOCK_COMMENT
(
"} RicochetFrame"
);
}
void
MethodHandles
::
RicochetFrame
::
leave_ricochet_frame
(
MacroAssembler
*
_masm
,
Register
rcx_recv
,
Register
new_sp_reg
,
Register
sender_pc_reg
)
{
assert_different_registers
(
rcx_recv
,
new_sp_reg
,
sender_pc_reg
);
const
Register
saved_last_sp
=
saved_last_sp_register
();
// Take down the frame.
// Cf. InterpreterMacroAssembler::remove_activation.
BLOCK_COMMENT
(
"end_ricochet_frame {"
);
// TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down.
// This will keep stack in bounds even with unlimited tailcalls, each with an adapter.
if
(
rcx_recv
->
is_valid
())
__
movptr
(
rcx_recv
,
RicochetFrame
::
frame_address
(
RicochetFrame
::
saved_target_offset_in_bytes
()));
__
movptr
(
sender_pc_reg
,
RicochetFrame
::
frame_address
(
RicochetFrame
::
sender_pc_offset_in_bytes
()));
__
movptr
(
saved_last_sp
,
RicochetFrame
::
frame_address
(
RicochetFrame
::
exact_sender_sp_offset_in_bytes
()));
__
movptr
(
rbp
,
RicochetFrame
::
frame_address
(
RicochetFrame
::
sender_link_offset_in_bytes
()));
__
mov
(
rsp
,
new_sp_reg
);
BLOCK_COMMENT
(
"} end_ricochet_frame"
);
}
// Emit code to verify that RBP is pointing at a valid ricochet frame.
#ifdef ASSERT
enum
{
ARG_LIMIT
=
255
,
SLOP
=
4
,
// use this parameter for checking for garbage stack movements:
UNREASONABLE_STACK_MOVE
=
(
ARG_LIMIT
+
SLOP
)
// the slop defends against false alarms due to fencepost errors
};
void
MethodHandles
::
RicochetFrame
::
verify_clean
(
MacroAssembler
*
_masm
)
{
// The stack should look like this:
// ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
// Check various invariants.
verify_offsets
();
Register
rdi_temp
=
rdi
;
Register
rcx_temp
=
rcx
;
{
__
push
(
rdi_temp
);
__
push
(
rcx_temp
);
}
#define UNPUSH_TEMPS \
{ __ pop(rcx_temp); __ pop(rdi_temp); }
Address
magic_number_1_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
magic_number_1_offset_in_bytes
());
Address
magic_number_2_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
magic_number_2_offset_in_bytes
());
Address
continuation_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
continuation_offset_in_bytes
());
Address
conversion_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
conversion_offset_in_bytes
());
Address
saved_args_base_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
saved_args_base_offset_in_bytes
());
Label
L_bad
,
L_ok
;
BLOCK_COMMENT
(
"verify_clean {"
);
// Magic numbers must check out:
__
cmpptr
(
magic_number_1_addr
,
(
int32_t
)
MAGIC_NUMBER_1
);
__
jcc
(
Assembler
::
notEqual
,
L_bad
);
__
cmpptr
(
magic_number_2_addr
,
(
int32_t
)
MAGIC_NUMBER_2
);
__
jcc
(
Assembler
::
notEqual
,
L_bad
);
// Arguments pointer must look reasonable:
__
movptr
(
rcx_temp
,
saved_args_base_addr
);
__
cmpptr
(
rcx_temp
,
rbp
);
__
jcc
(
Assembler
::
below
,
L_bad
);
__
subptr
(
rcx_temp
,
UNREASONABLE_STACK_MOVE
*
Interpreter
::
stackElementSize
);
__
cmpptr
(
rcx_temp
,
rbp
);
__
jcc
(
Assembler
::
above
,
L_bad
);
load_conversion_dest_type
(
_masm
,
rdi_temp
,
conversion_addr
);
__
cmpl
(
rdi_temp
,
T_VOID
);
__
jcc
(
Assembler
::
equal
,
L_ok
);
__
movptr
(
rcx_temp
,
saved_args_base_addr
);
load_conversion_vminfo
(
_masm
,
rdi_temp
,
conversion_addr
);
__
cmpptr
(
Address
(
rcx_temp
,
rdi_temp
,
Interpreter
::
stackElementScale
()),
(
int32_t
)
RETURN_VALUE_PLACEHOLDER
);
__
jcc
(
Assembler
::
equal
,
L_ok
);
__
BIND
(
L_bad
);
UNPUSH_TEMPS
;
__
stop
(
"damaged ricochet frame"
);
__
BIND
(
L_ok
);
UNPUSH_TEMPS
;
BLOCK_COMMENT
(
"} verify_clean"
);
#undef UNPUSH_TEMPS
}
#endif //ASSERT
void
MethodHandles
::
load_klass_from_Class
(
MacroAssembler
*
_masm
,
Register
klass_reg
)
{
if
(
VerifyMethodHandles
)
verify_klass
(
_masm
,
klass_reg
,
SystemDictionaryHandles
::
Class_klass
(),
"AMH argument is a Class"
);
__
load_heap_oop
(
klass_reg
,
Address
(
klass_reg
,
java_lang_Class
::
klass_offset_in_bytes
()));
}
void
MethodHandles
::
load_conversion_vminfo
(
MacroAssembler
*
_masm
,
Register
reg
,
Address
conversion_field_addr
)
{
int
bits
=
BitsPerByte
;
int
offset
=
(
CONV_VMINFO_SHIFT
/
bits
);
int
shift
=
(
CONV_VMINFO_SHIFT
%
bits
);
__
load_unsigned_byte
(
reg
,
conversion_field_addr
.
plus_disp
(
offset
));
assert
(
CONV_VMINFO_MASK
==
right_n_bits
(
bits
-
shift
),
"else change type of previous load"
);
assert
(
shift
==
0
,
"no shift needed"
);
}
void
MethodHandles
::
load_conversion_dest_type
(
MacroAssembler
*
_masm
,
Register
reg
,
Address
conversion_field_addr
)
{
int
bits
=
BitsPerByte
;
int
offset
=
(
CONV_DEST_TYPE_SHIFT
/
bits
);
int
shift
=
(
CONV_DEST_TYPE_SHIFT
%
bits
);
__
load_unsigned_byte
(
reg
,
conversion_field_addr
.
plus_disp
(
offset
));
assert
(
CONV_TYPE_MASK
==
right_n_bits
(
bits
-
shift
),
"else change type of previous load"
);
__
shrl
(
reg
,
shift
);
DEBUG_ONLY
(
int
conv_type_bits
=
(
int
)
exact_log2
(
CONV_TYPE_MASK
+
1
));
assert
((
shift
+
conv_type_bits
)
==
bits
,
"left justified in byte"
);
}
void
MethodHandles
::
load_stack_move
(
MacroAssembler
*
_masm
,
Register
rdi_stack_move
,
Register
rcx_amh
,
bool
might_be_negative
)
{
BLOCK_COMMENT
(
"load_stack_move"
);
Address
rcx_amh_conversion
(
rcx_amh
,
java_lang_invoke_AdapterMethodHandle
::
conversion_offset_in_bytes
());
__
movl
(
rdi_stack_move
,
rcx_amh_conversion
);
__
sarl
(
rdi_stack_move
,
CONV_STACK_MOVE_SHIFT
);
#ifdef _LP64
if
(
might_be_negative
)
{
// clean high bits of stack motion register (was loaded as an int)
__
movslq
(
rdi_stack_move
,
rdi_stack_move
);
}
#endif //_LP64
if
(
VerifyMethodHandles
)
{
Label
L_ok
,
L_bad
;
int32_t
stack_move_limit
=
0x4000
;
// extra-large
__
cmpptr
(
rdi_stack_move
,
stack_move_limit
);
__
jcc
(
Assembler
::
greaterEqual
,
L_bad
);
__
cmpptr
(
rdi_stack_move
,
-
stack_move_limit
);
__
jcc
(
Assembler
::
greater
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
"load_stack_move of garbage value"
);
__
BIND
(
L_ok
);
}
}
#ifndef PRODUCT
void
MethodHandles
::
RicochetFrame
::
verify_offsets
()
{
// Check compatibility of this struct with the more generally used offsets of class frame:
int
ebp_off
=
sender_link_offset_in_bytes
();
// offset from struct base to local rbp value
assert
(
ebp_off
+
wordSize
*
frame
::
interpreter_frame_method_offset
==
saved_args_base_offset_in_bytes
(),
""
);
assert
(
ebp_off
+
wordSize
*
frame
::
interpreter_frame_last_sp_offset
==
conversion_offset_in_bytes
(),
""
);
assert
(
ebp_off
+
wordSize
*
frame
::
interpreter_frame_sender_sp_offset
==
exact_sender_sp_offset_in_bytes
(),
""
);
// These last two have to be exact:
assert
(
ebp_off
+
wordSize
*
frame
::
link_offset
==
sender_link_offset_in_bytes
(),
""
);
assert
(
ebp_off
+
wordSize
*
frame
::
return_addr_offset
==
sender_pc_offset_in_bytes
(),
""
);
}
void
MethodHandles
::
RicochetFrame
::
verify
()
const
{
verify_offsets
();
assert
(
magic_number_1
()
==
MAGIC_NUMBER_1
,
""
);
assert
(
magic_number_2
()
==
MAGIC_NUMBER_2
,
""
);
if
(
!
Universe
::
heap
()
->
is_gc_active
())
{
if
(
saved_args_layout
()
!=
NULL
)
{
assert
(
saved_args_layout
()
->
is_method
(),
"must be valid oop"
);
}
if
(
saved_target
()
!=
NULL
)
{
assert
(
java_lang_invoke_MethodHandle
::
is_instance
(
saved_target
()),
"checking frame value"
);
}
}
int
conv_op
=
adapter_conversion_op
(
conversion
());
assert
(
conv_op
==
java_lang_invoke_AdapterMethodHandle
::
OP_COLLECT_ARGS
||
conv_op
==
java_lang_invoke_AdapterMethodHandle
::
OP_FOLD_ARGS
||
conv_op
==
java_lang_invoke_AdapterMethodHandle
::
OP_PRIM_TO_REF
,
"must be a sane conversion"
);
if
(
has_return_value_slot
())
{
assert
(
*
return_value_slot_addr
()
==
RETURN_VALUE_PLACEHOLDER
,
""
);
}
}
#endif //PRODUCT
#ifdef ASSERT
static
void
verify_argslot
(
MacroAssembler
*
_masm
,
Register
argslot_reg
,
const
char
*
error_message
)
{
void
MethodHandles
::
verify_argslot
(
MacroAssembler
*
_masm
,
Register
argslot_reg
,
const
char
*
error_message
)
{
// Verify that argslot lies within (rsp, rbp].
Label
L_ok
,
L_bad
;
BLOCK_COMMENT
(
"
{ verify_argslot
"
);
BLOCK_COMMENT
(
"
verify_argslot {
"
);
__
cmpptr
(
argslot_reg
,
rbp
);
__
jccb
(
Assembler
::
above
,
L_bad
);
__
cmpptr
(
rsp
,
argslot_reg
);
__
jccb
(
Assembler
::
below
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
error_message
);
__
bind
(
L_ok
);
__
BIND
(
L_ok
);
BLOCK_COMMENT
(
"} verify_argslot"
);
}
#endif
void
MethodHandles
::
verify_argslots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
Register
arg_slot_base_reg
,
bool
negate_argslots
,
const
char
*
error_message
)
{
// Verify that [argslot..argslot+size) lies within (rsp, rbp).
Label
L_ok
,
L_bad
;
Register
rdi_temp
=
rdi
;
BLOCK_COMMENT
(
"verify_argslots {"
);
__
push
(
rdi_temp
);
if
(
negate_argslots
)
{
if
(
arg_slots
.
is_constant
())
{
arg_slots
=
-
1
*
arg_slots
.
as_constant
();
}
else
{
__
movptr
(
rdi_temp
,
arg_slots
);
__
negptr
(
rdi_temp
);
arg_slots
=
rdi_temp
;
}
}
__
lea
(
rdi_temp
,
Address
(
arg_slot_base_reg
,
arg_slots
,
Interpreter
::
stackElementScale
()));
__
cmpptr
(
rdi_temp
,
rbp
);
__
pop
(
rdi_temp
);
__
jcc
(
Assembler
::
above
,
L_bad
);
__
cmpptr
(
rsp
,
arg_slot_base_reg
);
__
jcc
(
Assembler
::
below
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
error_message
);
__
BIND
(
L_ok
);
BLOCK_COMMENT
(
"} verify_argslots"
);
}
// Make sure that arg_slots has the same sign as the given direction.
// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
void
MethodHandles
::
verify_stack_move
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
int
direction
)
{
bool
allow_zero
=
arg_slots
.
is_constant
();
if
(
direction
==
0
)
{
direction
=
+
1
;
allow_zero
=
true
;
}
assert
(
stack_move_unit
()
==
-
1
,
"else add extra checks here"
);
if
(
arg_slots
.
is_register
())
{
Label
L_ok
,
L_bad
;
BLOCK_COMMENT
(
"verify_stack_move {"
);
// testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need
// jcc(Assembler::notZero, L_bad);
__
cmpptr
(
arg_slots
.
as_register
(),
(
int32_t
)
NULL_WORD
);
if
(
direction
>
0
)
{
__
jcc
(
allow_zero
?
Assembler
::
less
:
Assembler
::
lessEqual
,
L_bad
);
__
cmpptr
(
arg_slots
.
as_register
(),
(
int32_t
)
UNREASONABLE_STACK_MOVE
);
__
jcc
(
Assembler
::
less
,
L_ok
);
}
else
{
__
jcc
(
allow_zero
?
Assembler
::
greater
:
Assembler
::
greaterEqual
,
L_bad
);
__
cmpptr
(
arg_slots
.
as_register
(),
(
int32_t
)
-
UNREASONABLE_STACK_MOVE
);
__
jcc
(
Assembler
::
greater
,
L_ok
);
}
__
bind
(
L_bad
);
if
(
direction
>
0
)
__
stop
(
"assert arg_slots > 0"
);
else
__
stop
(
"assert arg_slots < 0"
);
__
BIND
(
L_ok
);
BLOCK_COMMENT
(
"} verify_stack_move"
);
}
else
{
intptr_t
size
=
arg_slots
.
as_constant
();
if
(
direction
<
0
)
size
=
-
size
;
assert
(
size
>=
0
,
"correct direction of constant move"
);
assert
(
size
<
UNREASONABLE_STACK_MOVE
,
"reasonable size of constant move"
);
}
}
void
MethodHandles
::
verify_klass
(
MacroAssembler
*
_masm
,
Register
obj
,
KlassHandle
klass
,
const
char
*
error_message
)
{
oop
*
klass_addr
=
klass
.
raw_value
();
assert
(
klass_addr
>=
SystemDictionaryHandles
::
Object_klass
().
raw_value
()
&&
klass_addr
<=
SystemDictionaryHandles
::
Long_klass
().
raw_value
(),
"must be one of the SystemDictionaryHandles"
);
Register
temp
=
rdi
;
Label
L_ok
,
L_bad
;
BLOCK_COMMENT
(
"verify_klass {"
);
__
verify_oop
(
obj
);
__
testptr
(
obj
,
obj
);
__
jcc
(
Assembler
::
zero
,
L_bad
);
__
push
(
temp
);
__
load_klass
(
temp
,
obj
);
__
cmpptr
(
temp
,
ExternalAddress
((
address
)
klass_addr
));
__
jcc
(
Assembler
::
equal
,
L_ok
);
intptr_t
super_check_offset
=
klass
->
super_check_offset
();
__
movptr
(
temp
,
Address
(
temp
,
super_check_offset
));
__
cmpptr
(
temp
,
ExternalAddress
((
address
)
klass_addr
));
__
jcc
(
Assembler
::
equal
,
L_ok
);
__
pop
(
temp
);
__
bind
(
L_bad
);
__
stop
(
error_message
);
__
BIND
(
L_ok
);
__
pop
(
temp
);
BLOCK_COMMENT
(
"} verify_klass"
);
}
#endif //ASSERT
// Code generation
address
MethodHandles
::
generate_method_handle_interpreter_entry
(
MacroAssembler
*
_masm
)
{
...
...
@@ -116,6 +568,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
address
entry_point
=
__
pc
();
// fetch the MethodType from the method handle into rax (the 'check' register)
// FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
// This would simplify several touchy bits of code.
// See 6984712: JSR 292 method handle calls need a clean argument base pointer
{
Register
tem
=
rbx_method
;
for
(
jint
*
pchase
=
methodOopDesc
::
method_type_offsets_chain
();
(
*
pchase
)
!=
-
1
;
pchase
++
)
{
...
...
@@ -128,17 +583,23 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
__
load_heap_oop
(
rdx_temp
,
Address
(
rax_mtype
,
__
delayed_value
(
java_lang_invoke_MethodType
::
form_offset_in_bytes
,
rdi_temp
)));
Register
rdx_vmslots
=
rdx_temp
;
__
movl
(
rdx_vmslots
,
Address
(
rdx_temp
,
__
delayed_value
(
java_lang_invoke_MethodTypeForm
::
vmslots_offset_in_bytes
,
rdi_temp
)));
__
movptr
(
rcx_recv
,
__
argument_address
(
rdx_vmslots
));
Address
mh_receiver_slot_addr
=
__
argument_address
(
rdx_vmslots
);
__
movptr
(
rcx_recv
,
mh_receiver_slot_addr
);
trace_method_handle
(
_masm
,
"invokeExact"
);
__
check_method_handle_type
(
rax_mtype
,
rcx_recv
,
rdi_temp
,
wrong_method_type
);
// Nobody uses the MH receiver slot after this. Make sure.
DEBUG_ONLY
(
__
movptr
(
mh_receiver_slot_addr
,
(
int32_t
)
0x999999
));
__
jump_to_method_handle_entry
(
rcx_recv
,
rdi_temp
);
// for invokeGeneric (only), apply argument and result conversions on the fly
__
bind
(
invoke_generic_slow_path
);
#ifdef ASSERT
{
Label
L
;
if
(
VerifyMethodHandles
)
{
Label
L
;
__
cmpb
(
Address
(
rbx_method
,
methodOopDesc
::
intrinsic_id_offset_in_bytes
()),
(
int
)
vmIntrinsics
::
_invokeGeneric
);
__
jcc
(
Assembler
::
equal
,
L
);
__
stop
(
"bad methodOop::intrinsic_id"
);
...
...
@@ -150,7 +611,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// make room on the stack for another pointer:
Register
rcx_argslot
=
rcx_recv
;
__
lea
(
rcx_argslot
,
__
argument_address
(
rdx_vmslots
,
1
));
insert_arg_slots
(
_masm
,
2
*
stack_move_unit
(),
_INSERT_REF_MASK
,
insert_arg_slots
(
_masm
,
2
*
stack_move_unit
(),
rcx_argslot
,
rbx_temp
,
rdx_temp
);
// load up an adapter from the calling type (Java weaves this)
...
...
@@ -185,40 +646,28 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
return
entry_point
;
}
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static
RegisterOrConstant
constant
(
int
value
)
{
return
RegisterOrConstant
(
value
);
}
// Helper to insert argument slots into the stack.
// arg_slots must be a multiple of stack_move_unit() and <= 0
// arg_slots must be a multiple of stack_move_unit() and < 0
// rax_argslot is decremented to point to the new (shifted) location of the argslot
// But, rdx_temp ends up holding the original value of rax_argslot.
void
MethodHandles
::
insert_arg_slots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
int
arg_mask
,
Register
rax_argslot
,
Register
rbx_temp
,
Register
rdx_temp
,
Register
temp3_reg
)
{
assert
(
temp3_reg
==
noreg
,
"temp3 not required"
);
Register
rbx_temp
,
Register
rdx_temp
)
{
// allow constant zero
if
(
arg_slots
.
is_constant
()
&&
arg_slots
.
as_constant
()
==
0
)
return
;
assert_different_registers
(
rax_argslot
,
rbx_temp
,
rdx_temp
,
(
!
arg_slots
.
is_register
()
?
rsp
:
arg_slots
.
as_register
()));
#ifdef ASSERT
verify_argslot
(
_masm
,
rax_argslot
,
"insertion point must fall within current frame"
);
if
(
arg_slots
.
is_register
())
{
Label
L_ok
,
L_bad
;
__
cmpptr
(
arg_slots
.
as_register
(),
(
int32_t
)
NULL_WORD
);
__
jccb
(
Assembler
::
greater
,
L_bad
);
__
testl
(
arg_slots
.
as_register
(),
-
stack_move_unit
()
-
1
);
__
jccb
(
Assembler
::
zero
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
"assert arg_slots <= 0 and clear low bits"
);
__
bind
(
L_ok
);
}
else
{
assert
(
arg_slots
.
as_constant
()
<=
0
,
""
);
assert
(
arg_slots
.
as_constant
()
%
-
stack_move_unit
()
==
0
,
""
);
}
#endif //ASSERT
#ifdef _LP64
if
(
arg_slots
.
is_register
())
{
// clean high bits of stack motion register (was loaded as an int)
__
movslq
(
arg_slots
.
as_register
(),
arg_slots
.
as_register
());
}
#endif
if
(
VerifyMethodHandles
)
verify_argslot
(
_masm
,
rax_argslot
,
"insertion point must fall within current frame"
);
if
(
VerifyMethodHandles
)
verify_stack_move
(
_masm
,
arg_slots
,
-
1
);
// Make space on the stack for the inserted argument(s).
// Then pull down everything shallower than rax_argslot.
...
...
@@ -230,59 +679,39 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
// argslot -= size;
BLOCK_COMMENT
(
"insert_arg_slots {"
);
__
mov
(
rdx_temp
,
rsp
);
// source pointer for copy
__
lea
(
rsp
,
Address
(
rsp
,
arg_slots
,
Address
::
times_ptr
));
__
lea
(
rsp
,
Address
(
rsp
,
arg_slots
,
Interpreter
::
stackElementScale
()
));
{
Label
loop
;
__
BIND
(
loop
);
// pull one word down each time through the loop
__
movptr
(
rbx_temp
,
Address
(
rdx_temp
,
0
));
__
movptr
(
Address
(
rdx_temp
,
arg_slots
,
Address
::
times_ptr
),
rbx_temp
);
__
movptr
(
Address
(
rdx_temp
,
arg_slots
,
Interpreter
::
stackElementScale
()
),
rbx_temp
);
__
addptr
(
rdx_temp
,
wordSize
);
__
cmpptr
(
rdx_temp
,
rax_argslot
);
__
jcc
b
(
Assembler
::
less
,
loop
);
__
jcc
(
Assembler
::
less
,
loop
);
}
// Now move the argslot down, to point to the opened-up space.
__
lea
(
rax_argslot
,
Address
(
rax_argslot
,
arg_slots
,
Address
::
times_ptr
));
__
lea
(
rax_argslot
,
Address
(
rax_argslot
,
arg_slots
,
Interpreter
::
stackElementScale
()
));
BLOCK_COMMENT
(
"} insert_arg_slots"
);
}
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >
=
0
// arg_slots must be a multiple of stack_move_unit() and > 0
void
MethodHandles
::
remove_arg_slots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
Register
rax_argslot
,
Register
rbx_temp
,
Register
rdx_temp
,
Register
temp3_reg
)
{
assert
(
temp3_reg
==
noreg
,
"temp3 not required"
);
RegisterOrConstant
arg_slots
,
Register
rax_argslot
,
Register
rbx_temp
,
Register
rdx_temp
)
{
// allow constant zero
if
(
arg_slots
.
is_constant
()
&&
arg_slots
.
as_constant
()
==
0
)
return
;
assert_different_registers
(
rax_argslot
,
rbx_temp
,
rdx_temp
,
(
!
arg_slots
.
is_register
()
?
rsp
:
arg_slots
.
as_register
()));
#ifdef ASSERT
// Verify that [argslot..argslot+size) lies within (rsp, rbp).
__
lea
(
rbx_temp
,
Address
(
rax_argslot
,
arg_slots
,
Address
::
times_ptr
));
verify_argslot
(
_masm
,
rbx_temp
,
"deleted argument(s) must fall within current frame"
);
if
(
arg_slots
.
is_register
())
{
Label
L_ok
,
L_bad
;
__
cmpptr
(
arg_slots
.
as_register
(),
(
int32_t
)
NULL_WORD
);
__
jccb
(
Assembler
::
less
,
L_bad
);
__
testl
(
arg_slots
.
as_register
(),
-
stack_move_unit
()
-
1
);
__
jccb
(
Assembler
::
zero
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
"assert arg_slots >= 0 and clear low bits"
);
__
bind
(
L_ok
);
}
else
{
assert
(
arg_slots
.
as_constant
()
>=
0
,
""
);
assert
(
arg_slots
.
as_constant
()
%
-
stack_move_unit
()
==
0
,
""
);
}
#endif //ASSERT
#ifdef _LP64
if
(
false
)
{
// not needed, since register is positive
// clean high bits of stack motion register (was loaded as an int)
if
(
arg_slots
.
is_register
())
__
movslq
(
arg_slots
.
as_register
(),
arg_slots
.
as_register
());
}
#endif
if
(
VerifyMethodHandles
)
verify_argslots
(
_masm
,
arg_slots
,
rax_argslot
,
false
,
"deleted argument(s) must fall within current frame"
);
if
(
VerifyMethodHandles
)
verify_stack_move
(
_masm
,
arg_slots
,
+
1
);
BLOCK_COMMENT
(
"remove_arg_slots {"
);
// Pull up everything shallower than rax_argslot.
...
...
@@ -299,54 +728,332 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
__
BIND
(
loop
);
// pull one word up each time through the loop
__
movptr
(
rbx_temp
,
Address
(
rdx_temp
,
0
));
__
movptr
(
Address
(
rdx_temp
,
arg_slots
,
Address
::
times_ptr
),
rbx_temp
);
__
movptr
(
Address
(
rdx_temp
,
arg_slots
,
Interpreter
::
stackElementScale
()
),
rbx_temp
);
__
addptr
(
rdx_temp
,
-
wordSize
);
__
cmpptr
(
rdx_temp
,
rsp
);
__
jcc
b
(
Assembler
::
greaterEqual
,
loop
);
__
jcc
(
Assembler
::
greaterEqual
,
loop
);
}
// Now move the argslot up, to point to the just-copied block.
__
lea
(
rsp
,
Address
(
rsp
,
arg_slots
,
Address
::
times_ptr
));
__
lea
(
rsp
,
Address
(
rsp
,
arg_slots
,
Interpreter
::
stackElementScale
()
));
// And adjust the argslot address to point at the deletion point.
__
lea
(
rax_argslot
,
Address
(
rax_argslot
,
arg_slots
,
Address
::
times_ptr
));
__
lea
(
rax_argslot
,
Address
(
rax_argslot
,
arg_slots
,
Interpreter
::
stackElementScale
()
));
BLOCK_COMMENT
(
"} remove_arg_slots"
);
}
// Helper to copy argument slots to the top of the stack.
// The sequence starts with rax_argslot and is counted by slot_count
// slot_count must be a multiple of stack_move_unit() and >= 0
// This function blows the temps but does not change rax_argslot.
void
MethodHandles
::
push_arg_slots
(
MacroAssembler
*
_masm
,
Register
rax_argslot
,
RegisterOrConstant
slot_count
,
int
skip_words_count
,
Register
rbx_temp
,
Register
rdx_temp
)
{
assert_different_registers
(
rax_argslot
,
rbx_temp
,
rdx_temp
,
(
!
slot_count
.
is_register
()
?
rbp
:
slot_count
.
as_register
()),
rsp
);
assert
(
Interpreter
::
stackElementSize
==
wordSize
,
"else change this code"
);
if
(
VerifyMethodHandles
)
verify_stack_move
(
_masm
,
slot_count
,
0
);
// allow constant zero
if
(
slot_count
.
is_constant
()
&&
slot_count
.
as_constant
()
==
0
)
return
;
BLOCK_COMMENT
(
"push_arg_slots {"
);
Register
rbx_top
=
rbx_temp
;
// There is at most 1 word to carry down with the TOS.
switch
(
skip_words_count
)
{
case
1
:
__
pop
(
rdx_temp
);
break
;
case
0
:
break
;
default:
ShouldNotReachHere
();
}
if
(
slot_count
.
is_constant
())
{
for
(
int
i
=
slot_count
.
as_constant
()
-
1
;
i
>=
0
;
i
--
)
{
__
pushptr
(
Address
(
rax_argslot
,
i
*
wordSize
));
}
}
else
{
Label
L_plural
,
L_loop
,
L_break
;
// Emit code to dynamically check for the common cases, zero and one slot.
__
cmpl
(
slot_count
.
as_register
(),
(
int32_t
)
1
);
__
jccb
(
Assembler
::
greater
,
L_plural
);
__
jccb
(
Assembler
::
less
,
L_break
);
__
pushptr
(
Address
(
rax_argslot
,
0
));
__
jmpb
(
L_break
);
__
BIND
(
L_plural
);
// Loop for 2 or more:
// rbx = &rax[slot_count]
// while (rbx > rax) *(--rsp) = *(--rbx)
__
lea
(
rbx_top
,
Address
(
rax_argslot
,
slot_count
,
Address
::
times_ptr
));
__
BIND
(
L_loop
);
__
subptr
(
rbx_top
,
wordSize
);
__
pushptr
(
Address
(
rbx_top
,
0
));
__
cmpptr
(
rbx_top
,
rax_argslot
);
__
jcc
(
Assembler
::
above
,
L_loop
);
__
bind
(
L_break
);
}
switch
(
skip_words_count
)
{
case
1
:
__
push
(
rdx_temp
);
break
;
case
0
:
break
;
default:
ShouldNotReachHere
();
}
BLOCK_COMMENT
(
"} push_arg_slots"
);
}
// in-place movement; no change to rsp
// blows rax_temp, rdx_temp
void
MethodHandles
::
move_arg_slots_up
(
MacroAssembler
*
_masm
,
Register
rbx_bottom
,
// invariant
Address
top_addr
,
// can use rax_temp
RegisterOrConstant
positive_distance_in_slots
,
Register
rax_temp
,
Register
rdx_temp
)
{
BLOCK_COMMENT
(
"move_arg_slots_up {"
);
assert_different_registers
(
rbx_bottom
,
rax_temp
,
rdx_temp
,
positive_distance_in_slots
.
register_or_noreg
());
Label
L_loop
,
L_break
;
Register
rax_top
=
rax_temp
;
if
(
!
top_addr
.
is_same_address
(
Address
(
rax_top
,
0
)))
__
lea
(
rax_top
,
top_addr
);
// Detect empty (or broken) loop:
#ifdef ASSERT
if
(
VerifyMethodHandles
)
{
// Verify that &bottom < &top (non-empty interval)
Label
L_ok
,
L_bad
;
if
(
positive_distance_in_slots
.
is_register
())
{
__
cmpptr
(
positive_distance_in_slots
.
as_register
(),
(
int32_t
)
0
);
__
jcc
(
Assembler
::
lessEqual
,
L_bad
);
}
__
cmpptr
(
rbx_bottom
,
rax_top
);
__
jcc
(
Assembler
::
below
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
"valid bounds (copy up)"
);
__
BIND
(
L_ok
);
}
#endif
__
cmpptr
(
rbx_bottom
,
rax_top
);
__
jccb
(
Assembler
::
aboveEqual
,
L_break
);
// work rax down to rbx, copying contiguous data upwards
// In pseudo-code:
// [rbx, rax) = &[bottom, top)
// while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--;
__
BIND
(
L_loop
);
__
subptr
(
rax_top
,
wordSize
);
__
movptr
(
rdx_temp
,
Address
(
rax_top
,
0
));
__
movptr
(
Address
(
rax_top
,
positive_distance_in_slots
,
Address
::
times_ptr
),
rdx_temp
);
__
cmpptr
(
rax_top
,
rbx_bottom
);
__
jcc
(
Assembler
::
above
,
L_loop
);
assert
(
Interpreter
::
stackElementSize
==
wordSize
,
"else change loop"
);
__
bind
(
L_break
);
BLOCK_COMMENT
(
"} move_arg_slots_up"
);
}
// in-place movement; no change to rsp
// blows rax_temp, rdx_temp
void
MethodHandles
::
move_arg_slots_down
(
MacroAssembler
*
_masm
,
Address
bottom_addr
,
// can use rax_temp
Register
rbx_top
,
// invariant
RegisterOrConstant
negative_distance_in_slots
,
Register
rax_temp
,
Register
rdx_temp
)
{
BLOCK_COMMENT
(
"move_arg_slots_down {"
);
assert_different_registers
(
rbx_top
,
negative_distance_in_slots
.
register_or_noreg
(),
rax_temp
,
rdx_temp
);
Label
L_loop
,
L_break
;
Register
rax_bottom
=
rax_temp
;
if
(
!
bottom_addr
.
is_same_address
(
Address
(
rax_bottom
,
0
)))
__
lea
(
rax_bottom
,
bottom_addr
);
// Detect empty (or broken) loop:
#ifdef ASSERT
assert
(
!
negative_distance_in_slots
.
is_constant
()
||
negative_distance_in_slots
.
as_constant
()
<
0
,
""
);
if
(
VerifyMethodHandles
)
{
// Verify that &bottom < &top (non-empty interval)
Label
L_ok
,
L_bad
;
if
(
negative_distance_in_slots
.
is_register
())
{
__
cmpptr
(
negative_distance_in_slots
.
as_register
(),
(
int32_t
)
0
);
__
jcc
(
Assembler
::
greaterEqual
,
L_bad
);
}
__
cmpptr
(
rax_bottom
,
rbx_top
);
__
jcc
(
Assembler
::
below
,
L_ok
);
__
bind
(
L_bad
);
__
stop
(
"valid bounds (copy down)"
);
__
BIND
(
L_ok
);
}
#endif
__
cmpptr
(
rax_bottom
,
rbx_top
);
__
jccb
(
Assembler
::
aboveEqual
,
L_break
);
// work rax up to rbx, copying contiguous data downwards
// In pseudo-code:
// [rax, rbx) = &[bottom, top)
// while (rax < rbx) *(rax - distance) = *(rax + 0), rax++;
__
BIND
(
L_loop
);
__
movptr
(
rdx_temp
,
Address
(
rax_bottom
,
0
));
__
movptr
(
Address
(
rax_bottom
,
negative_distance_in_slots
,
Address
::
times_ptr
),
rdx_temp
);
__
addptr
(
rax_bottom
,
wordSize
);
__
cmpptr
(
rax_bottom
,
rbx_top
);
__
jcc
(
Assembler
::
below
,
L_loop
);
assert
(
Interpreter
::
stackElementSize
==
wordSize
,
"else change loop"
);
__
bind
(
L_break
);
BLOCK_COMMENT
(
"} move_arg_slots_down"
);
}
// Copy from a field or array element to a stacked argument slot.
// is_element (ignored) says whether caller is loading an array element instead of an instance field.
void
MethodHandles
::
move_typed_arg
(
MacroAssembler
*
_masm
,
BasicType
type
,
bool
is_element
,
Address
slot_dest
,
Address
value_src
,
Register
rbx_temp
,
Register
rdx_temp
)
{
BLOCK_COMMENT
(
!
is_element
?
"move_typed_arg {"
:
"move_typed_arg { (array element)"
);
if
(
type
==
T_OBJECT
||
type
==
T_ARRAY
)
{
__
load_heap_oop
(
rbx_temp
,
value_src
);
__
movptr
(
slot_dest
,
rbx_temp
);
}
else
if
(
type
!=
T_VOID
)
{
int
arg_size
=
type2aelembytes
(
type
);
bool
arg_is_signed
=
is_signed_subword_type
(
type
);
int
slot_size
=
(
arg_size
>
wordSize
)
?
arg_size
:
wordSize
;
__
load_sized_value
(
rdx_temp
,
value_src
,
arg_size
,
arg_is_signed
,
rbx_temp
);
__
store_sized_value
(
slot_dest
,
rdx_temp
,
slot_size
,
rbx_temp
);
}
BLOCK_COMMENT
(
"} move_typed_arg"
);
}
void
MethodHandles
::
move_return_value
(
MacroAssembler
*
_masm
,
BasicType
type
,
Address
return_slot
)
{
BLOCK_COMMENT
(
"move_return_value {"
);
// Old versions of the JVM must clean the FPU stack after every return.
#ifndef _LP64
#ifdef COMPILER2
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
if
((
type
==
T_FLOAT
&&
UseSSE
<
1
)
||
(
type
==
T_DOUBLE
&&
UseSSE
<
2
))
{
for
(
int
i
=
1
;
i
<
8
;
i
++
)
{
__
ffree
(
i
);
}
}
else
if
(
UseSSE
<
2
)
{
__
empty_FPU_stack
();
}
#endif //COMPILER2
#endif //!_LP64
// Look at the type and pull the value out of the corresponding register.
if
(
type
==
T_VOID
)
{
// nothing to do
}
else
if
(
type
==
T_OBJECT
)
{
__
movptr
(
return_slot
,
rax
);
}
else
if
(
type
==
T_INT
||
is_subword_type
(
type
))
{
// write the whole word, even if only 32 bits is significant
__
movptr
(
return_slot
,
rax
);
}
else
if
(
type
==
T_LONG
)
{
// store the value by parts
// Note: We assume longs are continguous (if misaligned) on the interpreter stack.
__
store_sized_value
(
return_slot
,
rax
,
BytesPerLong
,
rdx
);
}
else
if
(
NOT_LP64
((
type
==
T_FLOAT
&&
UseSSE
<
1
)
||
(
type
==
T_DOUBLE
&&
UseSSE
<
2
)
||
)
false
)
{
// Use old x86 FPU registers:
if
(
type
==
T_FLOAT
)
__
fstp_s
(
return_slot
);
else
__
fstp_d
(
return_slot
);
}
else
if
(
type
==
T_FLOAT
)
{
__
movflt
(
return_slot
,
xmm0
);
}
else
if
(
type
==
T_DOUBLE
)
{
__
movdbl
(
return_slot
,
xmm0
);
}
else
{
ShouldNotReachHere
();
}
BLOCK_COMMENT
(
"} move_return_value"
);
}
#ifndef PRODUCT
extern
"C"
void
print_method_handle
(
oop
mh
);
void
trace_method_handle_stub
(
const
char
*
adaptername
,
intptr_t
*
saved_sp
,
oop
mh
,
intptr_t
*
sp
)
{
intptr_t
*
saved_regs
,
intptr_t
*
entry_sp
,
intptr_t
*
saved_sp
,
intptr_t
*
saved_bp
)
{
// called as a leaf from native code: do not block the JVM!
intptr_t
*
entry_sp
=
sp
+
LP64_ONLY
(
16
)
NOT_LP64
(
8
);
tty
->
print_cr
(
"MH %s mh="
INTPTR_FORMAT
" sp="
INTPTR_FORMAT
" saved_sp="
INTPTR_FORMAT
")"
,
adaptername
,
(
intptr_t
)
mh
,
(
intptr_t
)
entry_sp
,
saved_sp
);
intptr_t
*
last_sp
=
(
intptr_t
*
)
saved_bp
[
frame
::
interpreter_frame_last_sp_offset
];
intptr_t
*
base_sp
=
(
intptr_t
*
)
saved_bp
[
frame
::
interpreter_frame_monitor_block_top_offset
];
tty
->
print_cr
(
"MH %s mh="
INTPTR_FORMAT
" sp=("
INTPTR_FORMAT
"+"
INTX_FORMAT
") stack_size="
INTX_FORMAT
" bp="
INTPTR_FORMAT
,
adaptername
,
(
intptr_t
)
mh
,
(
intptr_t
)
entry_sp
,
(
intptr_t
)(
saved_sp
-
entry_sp
),
(
intptr_t
)(
base_sp
-
last_sp
),
(
intptr_t
)
saved_bp
);
if
(
last_sp
!=
saved_sp
&&
last_sp
!=
NULL
)
tty
->
print_cr
(
"*** last_sp="
INTPTR_FORMAT
,
(
intptr_t
)
last_sp
);
if
(
Verbose
)
{
tty
->
print
(
" reg dump: "
);
int
saved_regs_count
=
(
entry_sp
-
1
)
-
saved_regs
;
// 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
int
i
;
for
(
i
=
0
;
i
<=
saved_regs_count
;
i
++
)
{
if
(
i
>
0
&&
i
%
4
==
0
&&
i
!=
saved_regs_count
)
{
tty
->
cr
();
tty
->
print
(
" + dump: "
);
}
tty
->
print
(
" %d: "
INTPTR_FORMAT
,
i
,
saved_regs
[
i
]);
}
tty
->
cr
();
int
stack_dump_count
=
16
;
if
(
stack_dump_count
<
(
int
)(
saved_bp
+
2
-
saved_sp
))
stack_dump_count
=
(
int
)(
saved_bp
+
2
-
saved_sp
);
if
(
stack_dump_count
>
64
)
stack_dump_count
=
48
;
for
(
i
=
0
;
i
<
stack_dump_count
;
i
+=
4
)
{
tty
->
print_cr
(
" dump at SP[%d] "
INTPTR_FORMAT
": "
INTPTR_FORMAT
" "
INTPTR_FORMAT
" "
INTPTR_FORMAT
" "
INTPTR_FORMAT
,
i
,
(
intptr_t
)
&
entry_sp
[
i
+
0
],
entry_sp
[
i
+
0
],
entry_sp
[
i
+
1
],
entry_sp
[
i
+
2
],
entry_sp
[
i
+
3
]);
}
print_method_handle
(
mh
);
}
}
// The stub wraps the arguments in a struct on the stack to avoid
// dealing with the different calling conventions for passing 6
// arguments.
struct
MethodHandleStubArguments
{
const
char
*
adaptername
;
oopDesc
*
mh
;
intptr_t
*
saved_regs
;
intptr_t
*
entry_sp
;
intptr_t
*
saved_sp
;
intptr_t
*
saved_bp
;
};
void
trace_method_handle_stub_wrapper
(
MethodHandleStubArguments
*
args
)
{
trace_method_handle_stub
(
args
->
adaptername
,
args
->
mh
,
args
->
saved_regs
,
args
->
entry_sp
,
args
->
saved_sp
,
args
->
saved_bp
);
}
void
MethodHandles
::
trace_method_handle
(
MacroAssembler
*
_masm
,
const
char
*
adaptername
)
{
if
(
!
TraceMethodHandles
)
return
;
BLOCK_COMMENT
(
"trace_method_handle {"
);
__
push
(
rax
);
__
lea
(
rax
,
Address
(
rsp
,
wordSize
*
NOT_LP64
(
6
)
LP64_ONLY
(
14
)));
// entry_sp __ pusha();
__
pusha
();
#ifdef _LP64
// Pass arguments carefully since the registers overlap with the calling convention.
// rcx: method handle
// r13: saved sp
__
mov
(
c_rarg2
,
rcx
);
// mh
__
mov
(
c_rarg1
,
r13
);
// saved sp
__
mov
(
c_rarg3
,
rsp
);
// sp
__
movptr
(
c_rarg0
,
(
intptr_t
)
adaptername
);
__
super_call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
trace_method_handle_stub
),
c_rarg0
,
c_rarg1
,
c_rarg2
,
c_rarg3
);
#else
// arguments:
__
mov
(
rbx
,
rsp
);
__
enter
();
// incoming state:
// rcx: method handle
// rsi: saved sp
__
movptr
(
rbx
,
(
intptr_t
)
adaptername
);
__
super_call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
trace_method_handle_stub
),
rbx
,
rsi
,
rcx
,
rsp
);
#endif
// r13 or rsi: saved sp
// To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
__
push
(
rbp
);
// saved_bp
__
push
(
rsi
);
// saved_sp
__
push
(
rax
);
// entry_sp
__
push
(
rbx
);
// pusha saved_regs
__
push
(
rcx
);
// mh
__
push
(
rcx
);
// adaptername
__
movptr
(
Address
(
rsp
,
0
),
(
intptr_t
)
adaptername
);
__
super_call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
trace_method_handle_stub_wrapper
),
rsp
);
__
leave
();
__
popa
();
__
pop
(
rax
);
BLOCK_COMMENT
(
"} trace_method_handle"
);
}
#endif //PRODUCT
...
...
@@ -358,13 +1065,21 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_CHECK_CAST
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_PRIM_TO_PRIM
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_REF_TO_PRIM
)
//OP_PRIM_TO_REF is below...
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_SWAP_ARGS
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_ROT_ARGS
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_DUP_ARGS
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_DROP_ARGS
)
//|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
//OP_COLLECT_ARGS is below...
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_SPREAD_ARGS
)
|
(
!
UseRicochetFrames
?
0
:
LP64_ONLY
(
FLAG_IS_DEFAULT
(
UseRicochetFrames
)
?
0
:
)
java_lang_invoke_MethodTypeForm
::
vmlayout_offset_in_bytes
()
<=
0
?
0
:
((
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_PRIM_TO_REF
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_COLLECT_ARGS
)
|
(
1
<<
java_lang_invoke_AdapterMethodHandle
::
OP_FOLD_ARGS
)
))
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
//------------------------------------------------------------------------------
...
...
@@ -373,6 +1088,8 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void
MethodHandles
::
generate_method_handle_stub
(
MacroAssembler
*
_masm
,
MethodHandles
::
EntryKind
ek
)
{
MethodHandles
::
EntryKind
ek_orig
=
ek_original_kind
(
ek
);
// Here is the register state during an interpreted call,
// as set up by generate_method_handle_interpreter_entry():
// - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
...
...
@@ -385,10 +1102,11 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
const
Register
rax_argslot
=
rax
;
const
Register
rbx_temp
=
rbx
;
const
Register
rdx_temp
=
rdx
;
const
Register
rdi_temp
=
rdi
;
// This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
// and gen_c2i_adapter (from compiled calls):
const
Register
saved_last_sp
=
LP64_ONLY
(
r13
)
NOT_LP64
(
rsi
);
const
Register
saved_last_sp
=
saved_last_sp_register
(
);
// Argument registers for _raise_exception.
// 32-bit: Pass first two oop/int args in registers ECX and EDX.
...
...
@@ -421,6 +1139,13 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
return
;
}
#ifdef ASSERT
__
push
((
int32_t
)
0xEEEEEEEE
);
__
push
((
int32_t
)
(
intptr_t
)
entry_name
(
ek
));
LP64_ONLY
(
__
push
((
int32_t
)
high
((
intptr_t
)
entry_name
(
ek
))));
__
push
((
int32_t
)
0x33333333
);
#endif //ASSERT
address
interp_entry
=
__
pc
();
trace_method_handle
(
_masm
,
entry_name
(
ek
));
...
...
@@ -536,7 +1261,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__
load_klass
(
rax_klass
,
rcx_recv
);
__
verify_oop
(
rax_klass
);
Register
rdi_temp
=
rdi
;
Register
rbx_method
=
rbx_index
;
// get interface klass
...
...
@@ -572,16 +1296,14 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case
_bound_long_direct_mh
:
{
bool
direct_to_method
=
(
ek
>=
_bound_ref_direct_mh
);
BasicType
arg_type
=
T_ILLEGAL
;
int
arg_mask
=
_INSERT_NO_MASK
;
int
arg_slots
=
-
1
;
get_ek_bound_mh_info
(
ek
,
arg_type
,
arg_mask
,
arg_slots
);
BasicType
arg_type
=
ek_bound_mh_arg_type
(
ek
);
int
arg_slots
=
type2size
[
arg_type
];
// make room for the new argument:
__
movl
(
rax_argslot
,
rcx_bmh_vmargslot
);
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
));
insert_arg_slots
(
_masm
,
arg_slots
*
stack_move_unit
(),
arg_mask
,
rax_argslot
,
rbx_temp
,
rdx_temp
);
insert_arg_slots
(
_masm
,
arg_slots
*
stack_move_unit
(),
rax_argslot
,
rbx_temp
,
rdx_temp
);
// store bound argument into the new stack slot:
__
load_heap_oop
(
rbx_temp
,
rcx_bmh_argument
);
...
...
@@ -589,9 +1311,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__
movptr
(
Address
(
rax_argslot
,
0
),
rbx_temp
);
}
else
{
Address
prim_value_addr
(
rbx_temp
,
java_lang_boxing_object
::
value_offset_in_bytes
(
arg_type
));
const
int
arg_size
=
type2aelembytes
(
arg_type
);
__
load_sized_value
(
rdx_temp
,
prim_value_addr
,
arg_size
,
is_signed_subword_type
(
arg_type
),
rbx_temp
);
__
store_sized_value
(
Address
(
rax_argslot
,
0
),
rdx_temp
,
arg_size
,
rbx_temp
);
move_typed_arg
(
_masm
,
arg_type
,
false
,
Address
(
rax_argslot
,
0
),
prim_value_addr
,
rbx_temp
,
rdx_temp
);
}
if
(
direct_to_method
)
{
...
...
@@ -628,7 +1351,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// What class are we casting to?
__
load_heap_oop
(
rbx_klass
,
rcx_amh_argument
);
// this is a Class object!
__
load_heap_oop
(
rbx_klass
,
Address
(
rbx_klass
,
java_lang_Class
::
klass_offset_in_bytes
())
);
load_klass_from_Class
(
_masm
,
rbx_klass
);
Label
done
;
__
movptr
(
rdx_temp
,
vmarg
);
...
...
@@ -663,6 +1386,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case
_adapter_prim_to_prim
:
case
_adapter_ref_to_prim
:
case
_adapter_prim_to_ref
:
// handled completely by optimized cases
__
stop
(
"init_AdapterMethodHandle should not issue this"
);
break
;
...
...
@@ -714,8 +1438,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Do the requested conversion and store the value.
Register
rbx_vminfo
=
rbx_temp
;
__
movl
(
rbx_vminfo
,
rcx_amh_conversion
);
assert
(
CONV_VMINFO_SHIFT
==
0
,
"preshifted"
);
load_conversion_vminfo
(
_masm
,
rbx_vminfo
,
rcx_amh_conversion
);
// get the new MH:
__
load_heap_oop
(
rcx_recv
,
rcx_mh_vmtarget
);
...
...
@@ -753,7 +1476,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// on a little-endian machine we keep the first slot and add another after
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
,
1
));
insert_arg_slots
(
_masm
,
stack_move_unit
(),
_INSERT_INT_MASK
,
insert_arg_slots
(
_masm
,
stack_move_unit
(),
rax_argslot
,
rbx_temp
,
rdx_temp
);
Address
vmarg1
(
rax_argslot
,
-
Interpreter
::
stackElementSize
);
Address
vmarg2
=
vmarg1
.
plus_disp
(
Interpreter
::
stackElementSize
);
...
...
@@ -805,7 +1528,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__
movl
(
rax_argslot
,
rcx_amh_vmargslot
);
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
,
1
));
if
(
ek
==
_adapter_opt_f2d
)
{
insert_arg_slots
(
_masm
,
stack_move_unit
(),
_INSERT_INT_MASK
,
insert_arg_slots
(
_masm
,
stack_move_unit
(),
rax_argslot
,
rbx_temp
,
rdx_temp
);
}
Address
vmarg
(
rax_argslot
,
-
Interpreter
::
stackElementSize
);
...
...
@@ -840,10 +1563,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
}
break
;
case
_adapter_prim_to_ref
:
__
unimplemented
(
entry_name
(
ek
));
// %%% FIXME: NYI
break
;
case
_adapter_swap_args
:
case
_adapter_rot_args
:
// handled completely by optimized cases
...
...
@@ -857,8 +1576,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case
_adapter_opt_rot_2_up
:
case
_adapter_opt_rot_2_down
:
{
int
swap_
bytes
=
0
,
rotate
=
0
;
get_ek_adapter_opt_swap_rot_info
(
ek
,
swap_bytes
,
rotate
);
int
swap_
slots
=
ek_adapter_opt_swap_slots
(
ek
)
;
int
rotate
=
ek_adapter_opt_swap_mode
(
ek
);
// 'argslot' is the position of the first argument to swap
__
movl
(
rax_argslot
,
rcx_amh_vmargslot
);
...
...
@@ -866,83 +1585,69 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'vminfo' is the second
Register
rbx_destslot
=
rbx_temp
;
__
movl
(
rbx_destslot
,
rcx_amh_conversion
);
assert
(
CONV_VMINFO_SHIFT
==
0
,
"preshifted"
);
__
andl
(
rbx_destslot
,
CONV_VMINFO_MASK
);
load_conversion_vminfo
(
_masm
,
rbx_destslot
,
rcx_amh_conversion
);
__
lea
(
rbx_destslot
,
__
argument_address
(
rbx_destslot
));
DEBUG_ONLY
(
verify_argslot
(
_masm
,
rbx_destslot
,
"swap point must fall within current frame"
));
if
(
VerifyMethodHandles
)
verify_argslot
(
_masm
,
rbx_destslot
,
"swap point must fall within current frame"
);
assert
(
Interpreter
::
stackElementSize
==
wordSize
,
"else rethink use of wordSize here"
);
if
(
!
rotate
)
{
for
(
int
i
=
0
;
i
<
swap_bytes
;
i
+=
wordSize
)
{
__
movptr
(
rdx_temp
,
Address
(
rax_argslot
,
i
));
__
push
(
rdx_temp
);
__
movptr
(
rdx_temp
,
Address
(
rbx_destslot
,
i
));
__
movptr
(
Address
(
rax_argslot
,
i
),
rdx_temp
);
__
pop
(
rdx_temp
);
__
movptr
(
Address
(
rbx_destslot
,
i
),
rdx_temp
);
// simple swap
for
(
int
i
=
0
;
i
<
swap_slots
;
i
++
)
{
__
movptr
(
rdi_temp
,
Address
(
rax_argslot
,
i
*
wordSize
));
__
movptr
(
rdx_temp
,
Address
(
rbx_destslot
,
i
*
wordSize
));
__
movptr
(
Address
(
rax_argslot
,
i
*
wordSize
),
rdx_temp
);
__
movptr
(
Address
(
rbx_destslot
,
i
*
wordSize
),
rdi_temp
);
}
}
else
{
// push the first chunk, which is going to get overwritten
for
(
int
i
=
swap_bytes
;
(
i
-=
wordSize
)
>=
0
;
)
{
__
movptr
(
rdx_temp
,
Address
(
rax_argslot
,
i
));
__
push
(
rdx_temp
);
// A rotate is actually pair of moves, with an "odd slot" (or pair)
// changing place with a series of other slots.
// First, push the "odd slot", which is going to get overwritten
for
(
int
i
=
swap_slots
-
1
;
i
>=
0
;
i
--
)
{
// handle one with rdi_temp instead of a push:
if
(
i
==
0
)
__
movptr
(
rdi_temp
,
Address
(
rax_argslot
,
i
*
wordSize
));
else
__
pushptr
(
Address
(
rax_argslot
,
i
*
wordSize
));
}
if
(
rotate
>
0
)
{
// rotate upward
__
subptr
(
rax_argslot
,
swap_bytes
);
#ifdef ASSERT
{
// Verify that argslot > destslot, by at least swap_bytes.
Label
L_ok
;
__
cmpptr
(
rax_argslot
,
rbx_destslot
);
__
jccb
(
Assembler
::
aboveEqual
,
L_ok
);
__
stop
(
"source must be above destination (upward rotation)"
);
__
bind
(
L_ok
);
}
#endif
// Here is rotate > 0:
// (low mem) (high mem)
// | dest: more_slots... | arg: odd_slot :arg+1 |
// =>
// | dest: odd_slot | dest+1: more_slots... :arg+1 |
// work argslot down to destslot, copying contiguous data upwards
// pseudo-code:
// rax = src_addr - swap_bytes
// rbx = dest_addr
// while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
Label
loop
;
__
bind
(
loop
);
__
movptr
(
rdx_temp
,
Address
(
rax_argslot
,
0
));
__
movptr
(
Address
(
rax_argslot
,
swap_bytes
),
rdx_temp
);
__
addptr
(
rax_argslot
,
-
wordSize
);
__
cmpptr
(
rax_argslot
,
rbx_destslot
);
__
jccb
(
Assembler
::
aboveEqual
,
loop
);
move_arg_slots_up
(
_masm
,
rbx_destslot
,
Address
(
rax_argslot
,
0
),
swap_slots
,
rax_argslot
,
rdx_temp
);
}
else
{
__
addptr
(
rax_argslot
,
swap_bytes
);
#ifdef ASSERT
{
// Verify that argslot < destslot, by at least swap_bytes.
Label
L_ok
;
__
cmpptr
(
rax_argslot
,
rbx_destslot
);
__
jccb
(
Assembler
::
belowEqual
,
L_ok
);
__
stop
(
"source must be below destination (downward rotation)"
);
__
bind
(
L_ok
);
}
#endif
// Here is the other direction, rotate < 0:
// (low mem) (high mem)
// | arg: odd_slot | arg+1: more_slots... :dest+1 |
// =>
// | arg: more_slots... | dest: odd_slot :dest+1 |
// work argslot up to destslot, copying contiguous data downwards
// pseudo-code:
// rax = src_addr + swap_bytes
// rbx = dest_addr
// while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
Label
loop
;
__
bind
(
loop
);
__
movptr
(
rdx_temp
,
Address
(
rax_argslot
,
0
));
__
movptr
(
Address
(
rax_argslot
,
-
swap_bytes
),
rdx_temp
);
__
addptr
(
rax_argslot
,
wordSize
);
__
cmpptr
(
rax_argslot
,
rbx_destslot
);
__
jccb
(
Assembler
::
belowEqual
,
loop
);
__
addptr
(
rbx_destslot
,
wordSize
);
move_arg_slots_down
(
_masm
,
Address
(
rax_argslot
,
swap_slots
*
wordSize
),
rbx_destslot
,
-
swap_slots
,
rax_argslot
,
rdx_temp
);
__
subptr
(
rbx_destslot
,
wordSize
);
}
// pop the original first chunk into the destination slot, now free
for
(
int
i
=
0
;
i
<
swap_
bytes
;
i
+=
wordSize
)
{
__
pop
(
rdx
_temp
);
__
movptr
(
Address
(
rbx_destslot
,
i
),
rdx_temp
);
for
(
int
i
=
0
;
i
<
swap_
slots
;
i
++
)
{
if
(
i
==
0
)
__
movptr
(
Address
(
rbx_destslot
,
i
*
wordSize
),
rdi
_temp
);
else
__
popptr
(
Address
(
rbx_destslot
,
i
*
wordSize
)
);
}
}
...
...
@@ -958,53 +1663,22 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
));
// 'stack_move' is negative number of words to duplicate
Register
rdx_stack_move
=
rdx_temp
;
__
movl2ptr
(
rdx_stack_move
,
rcx_amh_conversion
);
__
sarptr
(
rdx_stack_move
,
CONV_STACK_MOVE_SHIFT
);
int
argslot0_num
=
0
;
Address
argslot0
=
__
argument_address
(
RegisterOrConstant
(
argslot0_num
));
assert
(
argslot0
.
base
()
==
rsp
,
""
);
int
pre_arg_size
=
argslot0
.
disp
();
assert
(
pre_arg_size
%
wordSize
==
0
,
""
);
assert
(
pre_arg_size
>
0
,
"must include PC"
);
// remember the old rsp+1 (argslot[0])
Register
rbx_oldarg
=
rbx_temp
;
__
lea
(
rbx_oldarg
,
argslot0
);
// move rsp down to make room for dups
__
lea
(
rsp
,
Address
(
rsp
,
rdx_stack_move
,
Address
::
times_ptr
));
// compute the new rsp+1 (argslot[0])
Register
rdx_newarg
=
rdx_temp
;
__
lea
(
rdx_newarg
,
argslot0
);
__
push
(
rdi
);
// need a temp
// (preceding push must be done after arg addresses are taken!)
// pull down the pre_arg_size data (PC)
for
(
int
i
=
-
pre_arg_size
;
i
<
0
;
i
+=
wordSize
)
{
__
movptr
(
rdi
,
Address
(
rbx_oldarg
,
i
));
__
movptr
(
Address
(
rdx_newarg
,
i
),
rdi
);
Register
rdi_stack_move
=
rdi_temp
;
load_stack_move
(
_masm
,
rdi_stack_move
,
rcx_recv
,
true
);
if
(
VerifyMethodHandles
)
{
verify_argslots
(
_masm
,
rdi_stack_move
,
rax_argslot
,
true
,
"copied argument(s) must fall within current frame"
);
}
// copy from rax_argslot[0...] down to new_rsp[1...]
// pseudo-code:
// rbx = old_rsp+1
// rdx = new_rsp+1
// rax = argslot
// while (rdx < rbx) *rdx++ = *rax++
Label
loop
;
__
bind
(
loop
);
__
movptr
(
rdi
,
Address
(
rax_argslot
,
0
));
__
movptr
(
Address
(
rdx_newarg
,
0
),
rdi
);
__
addptr
(
rax_argslot
,
wordSize
);
__
addptr
(
rdx_newarg
,
wordSize
);
__
cmpptr
(
rdx_newarg
,
rbx_oldarg
);
__
jccb
(
Assembler
::
less
,
loop
);
__
pop
(
rdi
);
// restore temp
// insert location is always the bottom of the argument list:
Address
insert_location
=
__
argument_address
(
constant
(
0
));
int
pre_arg_words
=
insert_location
.
disp
()
/
wordSize
;
// return PC is pushed
assert
(
insert_location
.
base
()
==
rsp
,
""
);
__
negl
(
rdi_stack_move
);
push_arg_slots
(
_masm
,
rax_argslot
,
rdi_stack_move
,
pre_arg_words
,
rbx_temp
,
rdx_temp
);
__
load_heap_oop
(
rcx_recv
,
rcx_mh_vmtarget
);
__
jump_to_method_handle_entry
(
rcx_recv
,
rdx_temp
);
...
...
@@ -1017,63 +1691,583 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__
movl
(
rax_argslot
,
rcx_amh_vmargslot
);
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
));
__
push
(
rdi
);
// need a temp
// (must do previous push after argslot address is taken)
// 'stack_move' is number of words to drop
Register
rdi_stack_move
=
rdi
;
__
movl2ptr
(
rdi_stack_move
,
rcx_amh_conversion
);
__
sarptr
(
rdi_stack_move
,
CONV_STACK_MOVE_SHIFT
);
Register
rdi_stack_move
=
rdi_temp
;
load_stack_move
(
_masm
,
rdi_stack_move
,
rcx_recv
,
false
);
remove_arg_slots
(
_masm
,
rdi_stack_move
,
rax_argslot
,
rbx_temp
,
rdx_temp
);
__
pop
(
rdi
);
// restore temp
__
load_heap_oop
(
rcx_recv
,
rcx_mh_vmtarget
);
__
jump_to_method_handle_entry
(
rcx_recv
,
rdx_temp
);
}
break
;
case
_adapter_collect_args
:
__
unimplemented
(
entry_name
(
ek
));
// %%% FIXME: NYI
break
;
case
_adapter_fold_args
:
case
_adapter_spread_args
:
// handled completely by optimized cases
__
stop
(
"init_AdapterMethodHandle should not issue this"
);
break
;
case
_adapter_opt_collect_ref
:
case
_adapter_opt_collect_int
:
case
_adapter_opt_collect_long
:
case
_adapter_opt_collect_float
:
case
_adapter_opt_collect_double
:
case
_adapter_opt_collect_void
:
case
_adapter_opt_collect_0_ref
:
case
_adapter_opt_collect_1_ref
:
case
_adapter_opt_collect_2_ref
:
case
_adapter_opt_collect_3_ref
:
case
_adapter_opt_collect_4_ref
:
case
_adapter_opt_collect_5_ref
:
case
_adapter_opt_filter_S0_ref
:
case
_adapter_opt_filter_S1_ref
:
case
_adapter_opt_filter_S2_ref
:
case
_adapter_opt_filter_S3_ref
:
case
_adapter_opt_filter_S4_ref
:
case
_adapter_opt_filter_S5_ref
:
case
_adapter_opt_collect_2_S0_ref
:
case
_adapter_opt_collect_2_S1_ref
:
case
_adapter_opt_collect_2_S2_ref
:
case
_adapter_opt_collect_2_S3_ref
:
case
_adapter_opt_collect_2_S4_ref
:
case
_adapter_opt_collect_2_S5_ref
:
case
_adapter_opt_fold_ref
:
case
_adapter_opt_fold_int
:
case
_adapter_opt_fold_long
:
case
_adapter_opt_fold_float
:
case
_adapter_opt_fold_double
:
case
_adapter_opt_fold_void
:
case
_adapter_opt_fold_1_ref
:
case
_adapter_opt_fold_2_ref
:
case
_adapter_opt_fold_3_ref
:
case
_adapter_opt_fold_4_ref
:
case
_adapter_opt_fold_5_ref
:
{
// Given a fresh incoming stack frame, build a new ricochet frame.
// On entry, TOS points at a return PC, and RBP is the callers frame ptr.
// RSI/R13 has the caller's exact stack pointer, which we must also preserve.
// RCX contains an AdapterMethodHandle of the indicated kind.
// Relevant AMH fields:
// amh.vmargslot:
// points to the trailing edge of the arguments
// to filter, collect, or fold. For a boxing operation,
// it points just after the single primitive value.
// amh.argument:
// recursively called MH, on |collect| arguments
// amh.vmtarget:
// final destination MH, on return value, etc.
// amh.conversion.dest:
// tells what is the type of the return value
// (not needed here, since dest is also derived from ek)
// amh.conversion.vminfo:
// points to the trailing edge of the return value
// when the vmtarget is to be called; this is
// equal to vmargslot + (retained ? |collect| : 0)
// Pass 0 or more argument slots to the recursive target.
int
collect_count_constant
=
ek_adapter_opt_collect_count
(
ek
);
// The collected arguments are copied from the saved argument list:
int
collect_slot_constant
=
ek_adapter_opt_collect_slot
(
ek
);
assert
(
ek_orig
==
_adapter_collect_args
||
ek_orig
==
_adapter_fold_args
,
""
);
bool
retain_original_args
=
(
ek_orig
==
_adapter_fold_args
);
// The return value is replaced (or inserted) at the 'vminfo' argslot.
// Sometimes we can compute this statically.
int
dest_slot_constant
=
-
1
;
if
(
!
retain_original_args
)
dest_slot_constant
=
collect_slot_constant
;
else
if
(
collect_slot_constant
>=
0
&&
collect_count_constant
>=
0
)
// We are preserving all the arguments, and the return value is prepended,
// so the return slot is to the left (above) the |collect| sequence.
dest_slot_constant
=
collect_slot_constant
+
collect_count_constant
;
// Replace all those slots by the result of the recursive call.
// The result type can be one of ref, int, long, float, double, void.
// In the case of void, nothing is pushed on the stack after return.
BasicType
dest
=
ek_adapter_opt_collect_type
(
ek
);
assert
(
dest
==
type2wfield
[
dest
],
"dest is a stack slot type"
);
int
dest_count
=
type2size
[
dest
];
assert
(
dest_count
==
1
||
dest_count
==
2
||
(
dest_count
==
0
&&
dest
==
T_VOID
),
"dest has a size"
);
// Choose a return continuation.
EntryKind
ek_ret
=
_adapter_opt_return_any
;
if
(
dest
!=
T_CONFLICT
&&
OptimizeMethodHandles
)
{
switch
(
dest
)
{
case
T_INT
:
ek_ret
=
_adapter_opt_return_int
;
break
;
case
T_LONG
:
ek_ret
=
_adapter_opt_return_long
;
break
;
case
T_FLOAT
:
ek_ret
=
_adapter_opt_return_float
;
break
;
case
T_DOUBLE
:
ek_ret
=
_adapter_opt_return_double
;
break
;
case
T_OBJECT
:
ek_ret
=
_adapter_opt_return_ref
;
break
;
case
T_VOID
:
ek_ret
=
_adapter_opt_return_void
;
break
;
default
:
ShouldNotReachHere
();
}
if
(
dest
==
T_OBJECT
&&
dest_slot_constant
>=
0
)
{
EntryKind
ek_try
=
EntryKind
(
_adapter_opt_return_S0_ref
+
dest_slot_constant
);
if
(
ek_try
<=
_adapter_opt_return_LAST
&&
ek_adapter_opt_return_slot
(
ek_try
)
==
dest_slot_constant
)
{
ek_ret
=
ek_try
;
}
}
assert
(
ek_adapter_opt_return_type
(
ek_ret
)
==
dest
,
""
);
}
// Already pushed: ... keep1 | collect | keep2 | sender_pc |
// push(sender_pc);
// Compute argument base:
Register
rax_argv
=
rax_argslot
;
__
lea
(
rax_argv
,
__
argument_address
(
constant
(
0
)));
// Push a few extra argument words, if we need them to store the return value.
{
int
extra_slots
=
0
;
if
(
retain_original_args
)
{
extra_slots
=
dest_count
;
}
else
if
(
collect_count_constant
==
-
1
)
{
extra_slots
=
dest_count
;
// collect_count might be zero; be generous
}
else
if
(
dest_count
>
collect_count_constant
)
{
extra_slots
=
(
dest_count
-
collect_count_constant
);
}
else
{
// else we know we have enough dead space in |collect| to repurpose for return values
}
DEBUG_ONLY
(
extra_slots
+=
1
);
if
(
extra_slots
>
0
)
{
__
pop
(
rbx_temp
);
// return value
__
subptr
(
rsp
,
(
extra_slots
*
Interpreter
::
stackElementSize
));
// Push guard word #2 in debug mode.
DEBUG_ONLY
(
__
movptr
(
Address
(
rsp
,
0
),
(
int32_t
)
RicochetFrame
::
MAGIC_NUMBER_2
));
__
push
(
rbx_temp
);
}
}
RicochetFrame
::
enter_ricochet_frame
(
_masm
,
rcx_recv
,
rax_argv
,
entry
(
ek_ret
)
->
from_interpreted_entry
(),
rbx_temp
);
// Now pushed: ... keep1 | collect | keep2 | RF |
// some handy frame slots:
Address
exact_sender_sp_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
exact_sender_sp_offset_in_bytes
());
Address
conversion_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
conversion_offset_in_bytes
());
Address
saved_args_base_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
saved_args_base_offset_in_bytes
());
#ifdef ASSERT
if
(
VerifyMethodHandles
&&
dest
!=
T_CONFLICT
)
{
BLOCK_COMMENT
(
"verify AMH.conv.dest"
);
load_conversion_dest_type
(
_masm
,
rbx_temp
,
conversion_addr
);
Label
L_dest_ok
;
__
cmpl
(
rbx_temp
,
(
int
)
dest
);
__
jcc
(
Assembler
::
equal
,
L_dest_ok
);
if
(
dest
==
T_INT
)
{
for
(
int
bt
=
T_BOOLEAN
;
bt
<
T_INT
;
bt
++
)
{
if
(
is_subword_type
(
BasicType
(
bt
)))
{
__
cmpl
(
rbx_temp
,
(
int
)
bt
);
__
jcc
(
Assembler
::
equal
,
L_dest_ok
);
}
}
}
__
stop
(
"bad dest in AMH.conv"
);
__
BIND
(
L_dest_ok
);
}
#endif //ASSERT
// Find out where the original copy of the recursive argument sequence begins.
Register
rax_coll
=
rax_argv
;
{
RegisterOrConstant
collect_slot
=
collect_slot_constant
;
if
(
collect_slot_constant
==
-
1
)
{
__
movl
(
rdi_temp
,
rcx_amh_vmargslot
);
collect_slot
=
rdi_temp
;
}
if
(
collect_slot_constant
!=
0
)
__
lea
(
rax_coll
,
Address
(
rax_argv
,
collect_slot
,
Interpreter
::
stackElementScale
()));
// rax_coll now points at the trailing edge of |collect| and leading edge of |keep2|
}
// Replace the old AMH with the recursive MH. (No going back now.)
// In the case of a boxing call, the recursive call is to a 'boxer' method,
// such as Integer.valueOf or Long.valueOf. In the case of a filter
// or collect call, it will take one or more arguments, transform them,
// and return some result, to store back into argument_base[vminfo].
__
load_heap_oop
(
rcx_recv
,
rcx_amh_argument
);
if
(
VerifyMethodHandles
)
verify_method_handle
(
_masm
,
rcx_recv
);
// Push a space for the recursively called MH first:
__
push
((
int32_t
)
NULL_WORD
);
// Calculate |collect|, the number of arguments we are collecting.
Register
rdi_collect_count
=
rdi_temp
;
RegisterOrConstant
collect_count
;
if
(
collect_count_constant
>=
0
)
{
collect_count
=
collect_count_constant
;
}
else
{
__
load_method_handle_vmslots
(
rdi_collect_count
,
rcx_recv
,
rdx_temp
);
collect_count
=
rdi_collect_count
;
}
#ifdef ASSERT
if
(
VerifyMethodHandles
&&
collect_count_constant
>=
0
)
{
__
load_method_handle_vmslots
(
rbx_temp
,
rcx_recv
,
rdx_temp
);
Label
L_count_ok
;
__
cmpl
(
rbx_temp
,
collect_count_constant
);
__
jcc
(
Assembler
::
equal
,
L_count_ok
);
__
stop
(
"bad vminfo in AMH.conv"
);
__
BIND
(
L_count_ok
);
}
#endif //ASSERT
// copy |collect| slots directly to TOS:
push_arg_slots
(
_masm
,
rax_coll
,
collect_count
,
0
,
rbx_temp
,
rdx_temp
);
// Now pushed: ... keep1 | collect | keep2 | RF... | collect |
// rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
// If necessary, adjust the saved arguments to make room for the eventual return value.
// Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
// If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect |
// In the non-retaining case, this might move keep2 either up or down.
// We don't have to copy the whole | RF... collect | complex,
// but we must adjust RF.saved_args_base.
// Also, from now on, we will forget about the origial copy of |collect|.
// If we are retaining it, we will treat it as part of |keep2|.
// For clarity we will define |keep3| = |collect|keep2| or |keep2|.
BLOCK_COMMENT
(
"adjust trailing arguments {"
);
// Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
int
open_count
=
dest_count
;
RegisterOrConstant
close_count
=
collect_count_constant
;
Register
rdi_close_count
=
rdi_collect_count
;
if
(
retain_original_args
)
{
close_count
=
constant
(
0
);
}
else
if
(
collect_count_constant
==
-
1
)
{
close_count
=
rdi_collect_count
;
}
// How many slots need moving? This is simply dest_slot (0 => no |keep3|).
RegisterOrConstant
keep3_count
;
Register
rsi_keep3_count
=
rsi
;
// can repair from RF.exact_sender_sp
if
(
dest_slot_constant
>=
0
)
{
keep3_count
=
dest_slot_constant
;
}
else
{
load_conversion_vminfo
(
_masm
,
rsi_keep3_count
,
conversion_addr
);
keep3_count
=
rsi_keep3_count
;
}
#ifdef ASSERT
if
(
VerifyMethodHandles
&&
dest_slot_constant
>=
0
)
{
load_conversion_vminfo
(
_masm
,
rbx_temp
,
conversion_addr
);
Label
L_vminfo_ok
;
__
cmpl
(
rbx_temp
,
dest_slot_constant
);
__
jcc
(
Assembler
::
equal
,
L_vminfo_ok
);
__
stop
(
"bad vminfo in AMH.conv"
);
__
BIND
(
L_vminfo_ok
);
}
#endif //ASSERT
// tasks remaining:
bool
move_keep3
=
(
!
keep3_count
.
is_constant
()
||
keep3_count
.
as_constant
()
!=
0
);
bool
stomp_dest
=
(
NOT_DEBUG
(
dest
==
T_OBJECT
)
DEBUG_ONLY
(
dest_count
!=
0
));
bool
fix_arg_base
=
(
!
close_count
.
is_constant
()
||
open_count
!=
close_count
.
as_constant
());
if
(
stomp_dest
|
fix_arg_base
)
{
// we will probably need an updated rax_argv value
if
(
collect_slot_constant
>=
0
)
{
// rax_coll already holds the leading edge of |keep2|, so tweak it
assert
(
rax_coll
==
rax_argv
,
"elided a move"
);
if
(
collect_slot_constant
!=
0
)
__
subptr
(
rax_argv
,
collect_slot_constant
*
Interpreter
::
stackElementSize
);
}
else
{
// Just reload from RF.saved_args_base.
__
movptr
(
rax_argv
,
saved_args_base_addr
);
}
}
// Old and new argument locations (based at slot 0).
// Net shift (&new_argv - &old_argv) is (close_count - open_count).
bool
zero_open_count
=
(
open_count
==
0
);
// remember this bit of info
if
(
move_keep3
&&
fix_arg_base
)
{
// It will be easier t have everything in one register:
if
(
close_count
.
is_register
())
{
// Deduct open_count from close_count register to get a clean +/- value.
__
subptr
(
close_count
.
as_register
(),
open_count
);
}
else
{
close_count
=
close_count
.
as_constant
()
-
open_count
;
}
open_count
=
0
;
}
Address
old_argv
(
rax_argv
,
0
);
Address
new_argv
(
rax_argv
,
close_count
,
Interpreter
::
stackElementScale
(),
-
open_count
*
Interpreter
::
stackElementSize
);
// First decide if any actual data are to be moved.
// We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
// (As it happens, all movements involve an argument list size change.)
// If there are variable parameters, use dynamic checks to skip around the whole mess.
Label
L_done
;
if
(
!
keep3_count
.
is_constant
())
{
__
testl
(
keep3_count
.
as_register
(),
keep3_count
.
as_register
());
__
jcc
(
Assembler
::
zero
,
L_done
);
}
if
(
!
close_count
.
is_constant
())
{
__
cmpl
(
close_count
.
as_register
(),
open_count
);
__
jcc
(
Assembler
::
equal
,
L_done
);
}
if
(
move_keep3
&&
fix_arg_base
)
{
bool
emit_move_down
=
false
,
emit_move_up
=
false
,
emit_guard
=
false
;
if
(
!
close_count
.
is_constant
())
{
emit_move_down
=
emit_guard
=
!
zero_open_count
;
emit_move_up
=
true
;
}
else
if
(
open_count
!=
close_count
.
as_constant
())
{
emit_move_down
=
(
open_count
>
close_count
.
as_constant
());
emit_move_up
=
!
emit_move_down
;
}
Label
L_move_up
;
if
(
emit_guard
)
{
__
cmpl
(
close_count
.
as_register
(),
open_count
);
__
jcc
(
Assembler
::
greater
,
L_move_up
);
}
if
(
emit_move_down
)
{
// Move arguments down if |+dest+| > |-collect-|
// (This is rare, except when arguments are retained.)
// This opens space for the return value.
if
(
keep3_count
.
is_constant
())
{
for
(
int
i
=
0
;
i
<
keep3_count
.
as_constant
();
i
++
)
{
__
movptr
(
rdx_temp
,
old_argv
.
plus_disp
(
i
*
Interpreter
::
stackElementSize
));
__
movptr
(
new_argv
.
plus_disp
(
i
*
Interpreter
::
stackElementSize
),
rdx_temp
);
}
}
else
{
Register
rbx_argv_top
=
rbx_temp
;
__
lea
(
rbx_argv_top
,
old_argv
.
plus_disp
(
keep3_count
,
Interpreter
::
stackElementScale
()));
move_arg_slots_down
(
_masm
,
old_argv
,
// beginning of old argv
rbx_argv_top
,
// end of old argv
close_count
,
// distance to move down (must be negative)
rax_argv
,
rdx_temp
);
// Used argv as an iteration variable; reload from RF.saved_args_base.
__
movptr
(
rax_argv
,
saved_args_base_addr
);
}
}
if
(
emit_guard
)
{
__
jmp
(
L_done
);
// assumes emit_move_up is true also
__
BIND
(
L_move_up
);
}
if
(
emit_move_up
)
{
// Move arguments up if |+dest+| < |-collect-|
// (This is usual, except when |keep3| is empty.)
// This closes up the space occupied by the now-deleted collect values.
if
(
keep3_count
.
is_constant
())
{
for
(
int
i
=
keep3_count
.
as_constant
()
-
1
;
i
>=
0
;
i
--
)
{
__
movptr
(
rdx_temp
,
old_argv
.
plus_disp
(
i
*
Interpreter
::
stackElementSize
));
__
movptr
(
new_argv
.
plus_disp
(
i
*
Interpreter
::
stackElementSize
),
rdx_temp
);
}
}
else
{
Address
argv_top
=
old_argv
.
plus_disp
(
keep3_count
,
Interpreter
::
stackElementScale
());
move_arg_slots_up
(
_masm
,
rax_argv
,
// beginning of old argv
argv_top
,
// end of old argv
close_count
,
// distance to move up (must be positive)
rbx_temp
,
rdx_temp
);
}
}
}
__
BIND
(
L_done
);
if
(
fix_arg_base
)
{
// adjust RF.saved_args_base by adding (close_count - open_count)
if
(
!
new_argv
.
is_same_address
(
Address
(
rax_argv
,
0
)))
__
lea
(
rax_argv
,
new_argv
);
__
movptr
(
saved_args_base_addr
,
rax_argv
);
}
if
(
stomp_dest
)
{
// Stomp the return slot, so it doesn't hold garbage.
// This isn't strictly necessary, but it may help detect bugs.
int
forty_two
=
RicochetFrame
::
RETURN_VALUE_PLACEHOLDER
;
__
movptr
(
Address
(
rax_argv
,
keep3_count
,
Address
::
times_ptr
),
(
int32_t
)
forty_two
);
// uses rsi_keep3_count
}
BLOCK_COMMENT
(
"} adjust trailing arguments"
);
BLOCK_COMMENT
(
"do_recursive_call"
);
__
mov
(
saved_last_sp
,
rsp
);
// set rsi/r13 for callee
__
pushptr
(
ExternalAddress
(
SharedRuntime
::
ricochet_blob
()
->
bounce_addr
()).
addr
());
// The globally unique bounce address has two purposes:
// 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
// 2. When returned to, it cuts back the stack and redirects control flow
// to the return handler.
// The return handler will further cut back the stack when it takes
// down the RF. Perhaps there is a way to streamline this further.
// State during recursive call:
// ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
__
jump_to_method_handle_entry
(
rcx_recv
,
rdx_temp
);
break
;
}
case
_adapter_opt_return_ref
:
case
_adapter_opt_return_int
:
case
_adapter_opt_return_long
:
case
_adapter_opt_return_float
:
case
_adapter_opt_return_double
:
case
_adapter_opt_return_void
:
case
_adapter_opt_return_S0_ref
:
case
_adapter_opt_return_S1_ref
:
case
_adapter_opt_return_S2_ref
:
case
_adapter_opt_return_S3_ref
:
case
_adapter_opt_return_S4_ref
:
case
_adapter_opt_return_S5_ref
:
{
BasicType
dest_type_constant
=
ek_adapter_opt_return_type
(
ek
);
int
dest_slot_constant
=
ek_adapter_opt_return_slot
(
ek
);
if
(
VerifyMethodHandles
)
RicochetFrame
::
verify_clean
(
_masm
);
if
(
dest_slot_constant
==
-
1
)
{
// The current stub is a general handler for this dest_type.
// It can be called from _adapter_opt_return_any below.
// Stash the address in a little table.
assert
((
dest_type_constant
&
CONV_TYPE_MASK
)
==
dest_type_constant
,
"oob"
);
address
return_handler
=
__
pc
();
_adapter_return_handlers
[
dest_type_constant
]
=
return_handler
;
if
(
dest_type_constant
==
T_INT
)
{
// do the subword types too
for
(
int
bt
=
T_BOOLEAN
;
bt
<
T_INT
;
bt
++
)
{
if
(
is_subword_type
(
BasicType
(
bt
))
&&
_adapter_return_handlers
[
bt
]
==
NULL
)
{
_adapter_return_handlers
[
bt
]
=
return_handler
;
}
}
}
}
Register
rbx_arg_base
=
rbx_temp
;
assert_different_registers
(
rax
,
rdx
,
// possibly live return value registers
rdi_temp
,
rbx_arg_base
);
Address
conversion_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
conversion_offset_in_bytes
());
Address
saved_args_base_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
saved_args_base_offset_in_bytes
());
__
movptr
(
rbx_arg_base
,
saved_args_base_addr
);
RegisterOrConstant
dest_slot
=
dest_slot_constant
;
if
(
dest_slot_constant
==
-
1
)
{
load_conversion_vminfo
(
_masm
,
rdi_temp
,
conversion_addr
);
dest_slot
=
rdi_temp
;
}
// Store the result back into the argslot.
// This code uses the interpreter calling sequence, in which the return value
// is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
// There are certain irregularities with floating point values, which can be seen
// in TemplateInterpreterGenerator::generate_return_entry_for.
move_return_value
(
_masm
,
dest_type_constant
,
Address
(
rbx_arg_base
,
dest_slot
,
Interpreter
::
stackElementScale
()));
RicochetFrame
::
leave_ricochet_frame
(
_masm
,
rcx_recv
,
rbx_arg_base
,
rdx_temp
);
__
push
(
rdx_temp
);
// repush the return PC
// Load the final target and go.
if
(
VerifyMethodHandles
)
verify_method_handle
(
_masm
,
rcx_recv
);
__
jump_to_method_handle_entry
(
rcx_recv
,
rdx_temp
);
__
hlt
();
// --------------------
break
;
}
case
_adapter_opt_return_any
:
{
if
(
VerifyMethodHandles
)
RicochetFrame
::
verify_clean
(
_masm
);
Register
rdi_conv
=
rdi_temp
;
assert_different_registers
(
rax
,
rdx
,
// possibly live return value registers
rdi_conv
,
rbx_temp
);
Address
conversion_addr
=
RicochetFrame
::
frame_address
(
RicochetFrame
::
conversion_offset_in_bytes
());
load_conversion_dest_type
(
_masm
,
rdi_conv
,
conversion_addr
);
__
lea
(
rbx_temp
,
ExternalAddress
((
address
)
&
_adapter_return_handlers
[
0
]));
__
movptr
(
rbx_temp
,
Address
(
rbx_temp
,
rdi_conv
,
Address
::
times_ptr
));
#ifdef ASSERT
{
Label
L_badconv
;
__
testptr
(
rbx_temp
,
rbx_temp
);
__
jccb
(
Assembler
::
zero
,
L_badconv
);
__
jmp
(
rbx_temp
);
__
bind
(
L_badconv
);
__
stop
(
"bad method handle return"
);
}
#else //ASSERT
__
jmp
(
rbx_temp
);
#endif //ASSERT
break
;
}
case
_adapter_opt_spread_0
:
case
_adapter_opt_spread_1
:
case
_adapter_opt_spread_more
:
case
_adapter_opt_spread_1_ref
:
case
_adapter_opt_spread_2_ref
:
case
_adapter_opt_spread_3_ref
:
case
_adapter_opt_spread_4_ref
:
case
_adapter_opt_spread_5_ref
:
case
_adapter_opt_spread_ref
:
case
_adapter_opt_spread_byte
:
case
_adapter_opt_spread_char
:
case
_adapter_opt_spread_short
:
case
_adapter_opt_spread_int
:
case
_adapter_opt_spread_long
:
case
_adapter_opt_spread_float
:
case
_adapter_opt_spread_double
:
{
// spread an array out into a group of arguments
int
length_constant
=
get_ek_adapter_opt_spread_info
(
ek
);
int
length_constant
=
ek_adapter_opt_spread_count
(
ek
);
bool
length_can_be_zero
=
(
length_constant
==
0
);
if
(
length_constant
<
0
)
{
// some adapters with variable length must handle the zero case
if
(
!
OptimizeMethodHandles
||
ek_adapter_opt_spread_type
(
ek
)
!=
T_OBJECT
)
length_can_be_zero
=
true
;
}
// find the address of the array argument
__
movl
(
rax_argslot
,
rcx_amh_vmargslot
);
__
lea
(
rax_argslot
,
__
argument_address
(
rax_argslot
));
// grab some temps
{
__
push
(
rsi
);
__
push
(
rdi
);
}
// (preceding pushes must be done after argslot address is taken!)
#define UNPUSH_RSI_RDI \
{ __ pop(rdi); __ pop(rsi); }
// grab another temp
Register
rsi_temp
=
rsi
;
{
if
(
rsi_temp
==
saved_last_sp
)
__
push
(
saved_last_sp
);
}
// (preceding push must be done after argslot address is taken!)
#define UNPUSH_RSI \
{ if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
// arx_argslot points both to the array and to the first output arg
vmarg
=
Address
(
rax_argslot
,
0
);
// Get the array value.
Register
rsi_array
=
rsi
;
Register
rsi_array
=
rsi
_temp
;
Register
rdx_array_klass
=
rdx_temp
;
BasicType
elem_type
=
T_OBJECT
;
BasicType
elem_type
=
ek_adapter_opt_spread_type
(
ek
);
int
elem_slots
=
type2size
[
elem_type
];
// 1 or 2
int
array_slots
=
1
;
// array is always a T_OBJECT
int
length_offset
=
arrayOopDesc
::
length_offset_in_bytes
();
int
elem0_offset
=
arrayOopDesc
::
base_offset_in_bytes
(
elem_type
);
__
movptr
(
rsi_array
,
vmarg
);
Label
skip_array_check
;
if
(
length_constant
==
0
)
{
Label
L_array_is_empty
,
L_insert_arg_space
,
L_copy_args
,
L_args_done
;
if
(
length_can_be_zero
)
{
// handle the null pointer case, if zero is allowed
Label
L_skip
;
if
(
length_constant
<
0
)
{
load_conversion_vminfo
(
_masm
,
rbx_temp
,
rcx_amh_conversion
);
__
testl
(
rbx_temp
,
rbx_temp
);
__
jcc
(
Assembler
::
notZero
,
L_skip
);
}
__
testptr
(
rsi_array
,
rsi_array
);
__
jcc
(
Assembler
::
zero
,
skip_array_check
);
__
jcc
(
Assembler
::
zero
,
L_array_is_empty
);
__
bind
(
L_skip
);
}
__
null_check
(
rsi_array
,
oopDesc
::
klass_offset_in_bytes
());
__
load_klass
(
rdx_array_klass
,
rsi_array
);
...
...
@@ -1081,22 +2275,20 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Check the array type.
Register
rbx_klass
=
rbx_temp
;
__
load_heap_oop
(
rbx_klass
,
rcx_amh_argument
);
// this is a Class object!
__
load_heap_oop
(
rbx_klass
,
Address
(
rbx_klass
,
java_lang_Class
::
klass_offset_in_bytes
())
);
load_klass_from_Class
(
_masm
,
rbx_klass
);
Label
ok_array_klass
,
bad_array_klass
,
bad_array_length
;
__
check_klass_subtype
(
rdx_array_klass
,
rbx_klass
,
rdi
,
ok_array_klass
);
__
check_klass_subtype
(
rdx_array_klass
,
rbx_klass
,
rdi
_temp
,
ok_array_klass
);
// If we get here, the type check failed!
__
jmp
(
bad_array_klass
);
__
bind
(
ok_array_klass
);
__
BIND
(
ok_array_klass
);
// Check length.
if
(
length_constant
>=
0
)
{
__
cmpl
(
Address
(
rsi_array
,
length_offset
),
length_constant
);
}
else
{
Register
rbx_vminfo
=
rbx_temp
;
__
movl
(
rbx_vminfo
,
rcx_amh_conversion
);
assert
(
CONV_VMINFO_SHIFT
==
0
,
"preshifted"
);
__
andl
(
rbx_vminfo
,
CONV_VMINFO_MASK
);
load_conversion_vminfo
(
_masm
,
rbx_vminfo
,
rcx_amh_conversion
);
__
cmpl
(
rbx_vminfo
,
Address
(
rsi_array
,
length_offset
));
}
__
jcc
(
Assembler
::
notEqual
,
bad_array_length
);
...
...
@@ -1108,90 +2300,104 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Form a pointer to the end of the affected region.
__
lea
(
rdx_argslot_limit
,
Address
(
rax_argslot
,
Interpreter
::
stackElementSize
));
// 'stack_move' is negative number of words to insert
Register
rdi_stack_move
=
rdi
;
__
movl2ptr
(
rdi_stack_move
,
rcx_amh_conversion
);
__
sarptr
(
rdi_stack_move
,
CONV_STACK_MOVE_SHIFT
);
// This number already accounts for elem_slots.
Register
rdi_stack_move
=
rdi_temp
;
load_stack_move
(
_masm
,
rdi_stack_move
,
rcx_recv
,
true
);
__
cmpptr
(
rdi_stack_move
,
0
);
assert
(
stack_move_unit
()
<
0
,
"else change this comparison"
);
__
jcc
(
Assembler
::
less
,
L_insert_arg_space
);
__
jcc
(
Assembler
::
equal
,
L_copy_args
);
// single argument case, with no array movement
__
BIND
(
L_array_is_empty
);
remove_arg_slots
(
_masm
,
-
stack_move_unit
()
*
array_slots
,
rax_argslot
,
rbx_temp
,
rdx_temp
);
__
jmp
(
L_args_done
);
// no spreading to do
__
BIND
(
L_insert_arg_space
);
// come here in the usual case, stack_move < 0 (2 or more spread arguments)
Register
rsi_temp
=
rsi_array
;
// spill this
insert_arg_slots
(
_masm
,
rdi_stack_move
,
-
1
,
insert_arg_slots
(
_masm
,
rdi_stack_move
,
rax_argslot
,
rbx_temp
,
rsi_temp
);
// reload the array (since rsi was killed)
__
movptr
(
rsi_array
,
vmarg
);
}
else
if
(
length_constant
>
1
)
{
int
arg_mask
=
0
;
int
new_slots
=
(
length_constant
-
1
);
for
(
int
i
=
0
;
i
<
new_slots
;
i
++
)
{
arg_mask
<<=
1
;
arg_mask
|=
_INSERT_REF_MASK
;
}
insert_arg_slots
(
_masm
,
new_slots
*
stack_move_unit
(),
arg_mask
,
// reload the array since rsi was killed
// reload from rdx_argslot_limit since rax_argslot is now decremented
__
movptr
(
rsi_array
,
Address
(
rdx_argslot_limit
,
-
Interpreter
::
stackElementSize
));
}
else
if
(
length_constant
>=
1
)
{
int
new_slots
=
(
length_constant
*
elem_slots
)
-
array_slots
;
insert_arg_slots
(
_masm
,
new_slots
*
stack_move_unit
(),
rax_argslot
,
rbx_temp
,
rdx_temp
);
}
else
if
(
length_constant
==
1
)
{
// no stack resizing required
}
else
if
(
length_constant
==
0
)
{
remove_arg_slots
(
_masm
,
-
stack_move_unit
(),
__
BIND
(
L_array_is_empty
);
remove_arg_slots
(
_masm
,
-
stack_move_unit
()
*
array_slots
,
rax_argslot
,
rbx_temp
,
rdx_temp
);
}
else
{
ShouldNotReachHere
();
}
// Copy from the array to the new slots.
// Note: Stack change code preserves integrity of rax_argslot pointer.
// So even after slot insertions, rax_argslot still points to first argument.
// Beware: Arguments that are shallow on the stack are deep in the array,
// and vice versa. So a downward-growing stack (the usual) has to be copied
// elementwise in reverse order from the source array.
__
BIND
(
L_copy_args
);
if
(
length_constant
==
-
1
)
{
// [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
// Array element [0] goes at rdx_argslot_limit[-wordSize].
Register
rsi_source
=
rsi_array
;
__
lea
(
rsi_source
,
Address
(
rsi_array
,
elem0_offset
));
Register
rdx_fill_ptr
=
rdx_argslot_limit
;
Label
loop
;
__
bind
(
loop
);
__
movptr
(
rbx_temp
,
Address
(
rsi_source
,
0
));
__
movptr
(
Address
(
rax_argslot
,
0
),
rbx_temp
);
__
BIND
(
loop
);
__
addptr
(
rdx_fill_ptr
,
-
Interpreter
::
stackElementSize
*
elem_slots
);
move_typed_arg
(
_masm
,
elem_type
,
true
,
Address
(
rdx_fill_ptr
,
0
),
Address
(
rsi_source
,
0
),
rbx_temp
,
rdi_temp
);
__
addptr
(
rsi_source
,
type2aelembytes
(
elem_type
));
__
addptr
(
rax_argslot
,
Interpreter
::
stackElementSize
);
__
cmpptr
(
rax_argslot
,
rdx_argslot_limit
);
__
jccb
(
Assembler
::
less
,
loop
);
__
cmpptr
(
rdx_fill_ptr
,
rax_argslot
);
__
jcc
(
Assembler
::
greater
,
loop
);
}
else
if
(
length_constant
==
0
)
{
__
bind
(
skip_array_check
);
// nothing to copy
}
else
{
int
elem_offset
=
elem0_offset
;
int
slot_offset
=
0
;
int
slot_offset
=
length_constant
*
Interpreter
::
stackElementSize
;
for
(
int
index
=
0
;
index
<
length_constant
;
index
++
)
{
__
movptr
(
rbx_temp
,
Address
(
rsi_array
,
elem_offset
));
__
movptr
(
Address
(
rax_argslot
,
slot_offset
),
rbx_temp
);
slot_offset
-=
Interpreter
::
stackElementSize
*
elem_slots
;
// fill backward
move_typed_arg
(
_masm
,
elem_type
,
true
,
Address
(
rax_argslot
,
slot_offset
),
Address
(
rsi_array
,
elem_offset
),
rbx_temp
,
rdi_temp
);
elem_offset
+=
type2aelembytes
(
elem_type
);
slot_offset
+=
Interpreter
::
stackElementSize
;
}
}
__
BIND
(
L_args_done
);
// Arguments are spread. Move to next method handle.
UNPUSH_RSI
_RDI
;
UNPUSH_RSI
;
__
load_heap_oop
(
rcx_recv
,
rcx_mh_vmtarget
);
__
jump_to_method_handle_entry
(
rcx_recv
,
rdx_temp
);
__
bind
(
bad_array_klass
);
UNPUSH_RSI
_RDI
;
UNPUSH_RSI
;
assert
(
!
vmarg
.
uses
(
rarg2_required
),
"must be different registers"
);
__
movptr
(
rarg2_required
,
Address
(
rdx_array_klass
,
java_mirror_offset
));
// required type
__
movptr
(
rarg1_actual
,
vmarg
);
// bad array
__
movl
(
rarg0_code
,
(
int
)
Bytecodes
::
_aaload
);
// who is complaining?
__
load_heap_oop
(
rarg2_required
,
Address
(
rdx_array_klass
,
java_mirror_offset
));
// required type
__
movptr
(
rarg1_actual
,
vmarg
);
// bad array
__
movl
(
rarg0_code
,
(
int
)
Bytecodes
::
_aaload
);
// who is complaining?
__
jump
(
ExternalAddress
(
from_interpreted_entry
(
_raise_exception
)));
__
bind
(
bad_array_length
);
UNPUSH_RSI
_RDI
;
UNPUSH_RSI
;
assert
(
!
vmarg
.
uses
(
rarg2_required
),
"must be different registers"
);
__
mov
(
rarg2_required
,
rcx_recv
);
// AMH requiring a certain length
__
movptr
(
rarg1_actual
,
vmarg
);
// bad array
__
movl
(
rarg0_code
,
(
int
)
Bytecodes
::
_arraylength
);
// who is complaining?
__
mov
(
rarg2_required
,
rcx_recv
);
// AMH requiring a certain length
__
movptr
(
rarg1_actual
,
vmarg
);
// bad array
__
movl
(
rarg0_code
,
(
int
)
Bytecodes
::
_arraylength
);
// who is complaining?
__
jump
(
ExternalAddress
(
from_interpreted_entry
(
_raise_exception
)));
#undef UNPUSH_RSI
#undef UNPUSH_RSI_RDI
break
;
}
break
;
case
_adapter_flyby
:
case
_adapter_ricochet
:
__
unimplemented
(
entry_name
(
ek
));
// %%% FIXME: NYI
break
;
default:
ShouldNotReachHere
();
default:
// do not require all platforms to recognize all adapter types
__
nop
();
return
;
}
__
hlt
();
...
...
src/cpu/x86/vm/methodHandles_x86.hpp
0 → 100644
浏览文件 @
2b587c38
/*
* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.
public
:
// The stack just after the recursive call from a ricochet frame
// looks something like this. Offsets are marked in words, not bytes.
// rsi (r13 on LP64) is part of the interpreter calling sequence
// which tells the callee where is my real rsp (for frame walking).
// (...lower memory addresses)
// rsp: [ return pc ] always the global RicochetBlob::bounce_addr
// rsp+1: [ recursive arg N ]
// rsp+2: [ recursive arg N-1 ]
// ...
// rsp+N: [ recursive arg 1 ]
// rsp+N+1: [ recursive method handle ]
// ...
// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame)
// rbp-5: [ saved target MH ] the MH we will call on the saved args
// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout
// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0)
// rbp-2: [ conversion ] information about how the return value is used
// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame
// rbp+0: [ saved sender fp ] (for original sender of AMH)
// rbp+1: [ saved sender pc ] (back to original sender of AMH)
// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender)
// rbp+3: [ transformed adapter arg M-1]
// ...
// rbp+M+1: [ transformed adapter arg 1 ]
// rbp+M+2: [ padding ] <-- (rbp + saved args base offset)
// ... [ optional padding]
// (higher memory addresses...)
//
// The arguments originally passed by the original sender
// are lost, and arbitrary amounts of stack motion might have
// happened due to argument transformation.
// (This is done by C2I/I2C adapters and non-direct method handles.)
// This is why there is an unpredictable amount of memory between
// the extended and exact TOS of the sender.
// The ricochet adapter itself will also (in general) perform
// transformations before the recursive call.
//
// The transformed and saved arguments, immediately above the saved
// return PC, are a well-formed method handle invocation ready to execute.
// When the GC needs to walk the stack, these arguments are described
// via the saved arg types oop, an int[] array with a private format.
// This array is derived from the type of the transformed adapter
// method handle, which also sits at the base of the saved argument
// bundle. Since the GC may not be able to fish out the int[]
// array, so it is pushed explicitly on the stack. This may be
// an unnecessary expense.
//
// The following register conventions are significant at this point:
// rsp the thread stack, as always; preserved by caller
// rsi/r13 exact TOS of recursive frame (contents of [rbp-2])
// rcx recursive method handle (contents of [rsp+N+1])
// rbp preserved by caller (not used by caller)
// Unless otherwise specified, all registers can be blown by the call.
//
// If this frame must be walked, the transformed adapter arguments
// will be found with the help of the saved arguments descriptor.
//
// Therefore, the descriptor must match the referenced arguments.
// The arguments must be followed by at least one word of padding,
// which will be necessary to complete the final method handle call.
// That word is not treated as holding an oop. Neither is the word
//
// The word pointed to by the return argument pointer is not
// treated as an oop, even if points to a saved argument.
// This allows the saved argument list to have a "hole" in it
// to receive an oop from the recursive call.
// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
//
// When the recursive callee returns, RicochetBlob::bounce_addr will
// immediately jump to the continuation stored in the RF.
// This continuation will merge the recursive return value
// into the saved argument list. At that point, the original
// rsi, rbp, and rsp will be reloaded, the ricochet frame will
// disappear, and the final target of the adapter method handle
// will be invoked on the transformed argument list.
class
RicochetFrame
{
friend
class
MethodHandles
;
private:
intptr_t
*
_continuation
;
// what to do when control gets back here
oopDesc
*
_saved_target
;
// target method handle to invoke on saved_args
oopDesc
*
_saved_args_layout
;
// caching point for MethodTypeForm.vmlayout cookie
intptr_t
*
_saved_args_base
;
// base of pushed arguments (slot 0, arg N) (-3)
intptr_t
_conversion
;
// misc. information from original AdapterMethodHandle (-2)
intptr_t
*
_exact_sender_sp
;
// parallel to interpreter_frame_sender_sp (-1)
intptr_t
*
_sender_link
;
// *must* coincide with frame::link_offset (0)
address
_sender_pc
;
// *must* coincide with frame::return_addr_offset (1)
public:
intptr_t
*
continuation
()
const
{
return
_continuation
;
}
oop
saved_target
()
const
{
return
_saved_target
;
}
oop
saved_args_layout
()
const
{
return
_saved_args_layout
;
}
intptr_t
*
saved_args_base
()
const
{
return
_saved_args_base
;
}
intptr_t
conversion
()
const
{
return
_conversion
;
}
intptr_t
*
exact_sender_sp
()
const
{
return
_exact_sender_sp
;
}
intptr_t
*
sender_link
()
const
{
return
_sender_link
;
}
address
sender_pc
()
const
{
return
_sender_pc
;
}
intptr_t
*
extended_sender_sp
()
const
{
return
saved_args_base
();
}
intptr_t
return_value_slot_number
()
const
{
return
adapter_conversion_vminfo
(
conversion
());
}
BasicType
return_value_type
()
const
{
return
adapter_conversion_dest_type
(
conversion
());
}
bool
has_return_value_slot
()
const
{
return
return_value_type
()
!=
T_VOID
;
}
intptr_t
*
return_value_slot_addr
()
const
{
assert
(
has_return_value_slot
(),
""
);
return
saved_arg_slot_addr
(
return_value_slot_number
());
}
intptr_t
*
saved_target_slot_addr
()
const
{
return
saved_arg_slot_addr
(
saved_args_length
());
}
intptr_t
*
saved_arg_slot_addr
(
int
slot
)
const
{
assert
(
slot
>=
0
,
""
);
return
(
intptr_t
*
)(
(
address
)
saved_args_base
()
+
(
slot
*
Interpreter
::
stackElementSize
)
);
}
jint
saved_args_length
()
const
;
jint
saved_arg_offset
(
int
arg
)
const
;
// GC interface
oop
*
saved_target_addr
()
{
return
(
oop
*
)
&
_saved_target
;
}
oop
*
saved_args_layout_addr
()
{
return
(
oop
*
)
&
_saved_args_layout
;
}
oop
compute_saved_args_layout
(
bool
read_cache
,
bool
write_cache
);
// Compiler/assembler interface.
static
int
continuation_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_continuation
);
}
static
int
saved_target_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_saved_target
);
}
static
int
saved_args_layout_offset_in_bytes
(){
return
offset_of
(
RicochetFrame
,
_saved_args_layout
);
}
static
int
saved_args_base_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_saved_args_base
);
}
static
int
conversion_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_conversion
);
}
static
int
exact_sender_sp_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_exact_sender_sp
);
}
static
int
sender_link_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_sender_link
);
}
static
int
sender_pc_offset_in_bytes
()
{
return
offset_of
(
RicochetFrame
,
_sender_pc
);
}
// This value is not used for much, but it apparently must be nonzero.
static
int
frame_size_in_bytes
()
{
return
sender_link_offset_in_bytes
();
}
#ifdef ASSERT
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
enum
{
MAGIC_NUMBER_1
=
0xFEED03E
,
MAGIC_NUMBER_2
=
0xBEEF03E
};
static
int
magic_number_1_offset_in_bytes
()
{
return
-
wordSize
;
}
static
int
magic_number_2_offset_in_bytes
()
{
return
sizeof
(
RicochetFrame
);
}
intptr_t
magic_number_1
()
const
{
return
*
(
intptr_t
*
)((
address
)
this
+
magic_number_1_offset_in_bytes
());
};
intptr_t
magic_number_2
()
const
{
return
*
(
intptr_t
*
)((
address
)
this
+
magic_number_2_offset_in_bytes
());
};
#endif //ASSERT
enum
{
RETURN_VALUE_PLACEHOLDER
=
(
NOT_DEBUG
(
0
)
DEBUG_ONLY
(
42
))
};
static
void
verify_offsets
()
NOT_DEBUG_RETURN
;
void
verify
()
const
NOT_DEBUG_RETURN
;
// check for MAGIC_NUMBER, etc.
void
zap_arguments
()
NOT_DEBUG_RETURN
;
static
void
generate_ricochet_blob
(
MacroAssembler
*
_masm
,
// output params:
int
*
frame_size_in_words
,
int
*
bounce_offset
,
int
*
exception_offset
);
static
void
enter_ricochet_frame
(
MacroAssembler
*
_masm
,
Register
rcx_recv
,
Register
rax_argv
,
address
return_handler
,
Register
rbx_temp
);
static
void
leave_ricochet_frame
(
MacroAssembler
*
_masm
,
Register
rcx_recv
,
Register
new_sp_reg
,
Register
sender_pc_reg
);
static
Address
frame_address
(
int
offset
=
0
)
{
// The RicochetFrame is found by subtracting a constant offset from rbp.
return
Address
(
rbp
,
-
sender_link_offset_in_bytes
()
+
offset
);
}
static
RicochetFrame
*
from_frame
(
const
frame
&
fr
)
{
address
bp
=
(
address
)
fr
.
fp
();
RicochetFrame
*
rf
=
(
RicochetFrame
*
)(
bp
-
sender_link_offset_in_bytes
());
rf
->
verify
();
return
rf
;
}
static
void
verify_clean
(
MacroAssembler
*
_masm
)
NOT_DEBUG_RETURN
;
};
// Additional helper methods for MethodHandles code generation:
public
:
static
void
load_klass_from_Class
(
MacroAssembler
*
_masm
,
Register
klass_reg
);
static
void
load_conversion_vminfo
(
MacroAssembler
*
_masm
,
Register
reg
,
Address
conversion_field_addr
);
static
void
load_conversion_dest_type
(
MacroAssembler
*
_masm
,
Register
reg
,
Address
conversion_field_addr
);
static
void
load_stack_move
(
MacroAssembler
*
_masm
,
Register
rdi_stack_move
,
Register
rcx_amh
,
bool
might_be_negative
);
static
void
insert_arg_slots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
Register
rax_argslot
,
Register
rbx_temp
,
Register
rdx_temp
);
static
void
remove_arg_slots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
Register
rax_argslot
,
Register
rbx_temp
,
Register
rdx_temp
);
static
void
push_arg_slots
(
MacroAssembler
*
_masm
,
Register
rax_argslot
,
RegisterOrConstant
slot_count
,
int
skip_words_count
,
Register
rbx_temp
,
Register
rdx_temp
);
static
void
move_arg_slots_up
(
MacroAssembler
*
_masm
,
Register
rbx_bottom
,
// invariant
Address
top_addr
,
// can use rax_temp
RegisterOrConstant
positive_distance_in_slots
,
Register
rax_temp
,
Register
rdx_temp
);
static
void
move_arg_slots_down
(
MacroAssembler
*
_masm
,
Address
bottom_addr
,
// can use rax_temp
Register
rbx_top
,
// invariant
RegisterOrConstant
negative_distance_in_slots
,
Register
rax_temp
,
Register
rdx_temp
);
static
void
move_typed_arg
(
MacroAssembler
*
_masm
,
BasicType
type
,
bool
is_element
,
Address
slot_dest
,
Address
value_src
,
Register
rbx_temp
,
Register
rdx_temp
);
static
void
move_return_value
(
MacroAssembler
*
_masm
,
BasicType
type
,
Address
return_slot
);
static
void
verify_argslot
(
MacroAssembler
*
_masm
,
Register
argslot_reg
,
const
char
*
error_message
)
NOT_DEBUG_RETURN
;
static
void
verify_argslots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
argslot_count
,
Register
argslot_reg
,
bool
negate_argslot
,
const
char
*
error_message
)
NOT_DEBUG_RETURN
;
static
void
verify_stack_move
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
int
direction
)
NOT_DEBUG_RETURN
;
static
void
verify_klass
(
MacroAssembler
*
_masm
,
Register
obj
,
KlassHandle
klass
,
const
char
*
error_message
=
"wrong klass"
)
NOT_DEBUG_RETURN
;
static
void
verify_method_handle
(
MacroAssembler
*
_masm
,
Register
mh_reg
)
{
verify_klass
(
_masm
,
mh_reg
,
SystemDictionaryHandles
::
MethodHandle_klass
(),
"reference is a MH"
);
}
static
void
trace_method_handle
(
MacroAssembler
*
_masm
,
const
char
*
adaptername
)
PRODUCT_RETURN
;
static
Register
saved_last_sp_register
()
{
// Should be in sharedRuntime, not here.
return
LP64_ONLY
(
r13
)
NOT_LP64
(
rsi
);
}
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
浏览文件 @
2b587c38
...
...
@@ -2253,6 +2253,31 @@ uint SharedRuntime::out_preserve_stack_slots() {
return
0
;
}
//----------------------------generate_ricochet_blob---------------------------
void
SharedRuntime
::
generate_ricochet_blob
()
{
if
(
!
EnableInvokeDynamic
)
return
;
// leave it as a null
// allocate space for the code
ResourceMark
rm
;
// setup code generation tools
CodeBuffer
buffer
(
"ricochet_blob"
,
256
,
256
);
MacroAssembler
*
masm
=
new
MacroAssembler
(
&
buffer
);
int
frame_size_in_words
=
-
1
,
bounce_offset
=
-
1
,
exception_offset
=
-
1
;
MethodHandles
::
RicochetFrame
::
generate_ricochet_blob
(
masm
,
&
frame_size_in_words
,
&
bounce_offset
,
&
exception_offset
);
// -------------
// make sure all code is generated
masm
->
flush
();
// failed to generate?
if
(
frame_size_in_words
<
0
||
bounce_offset
<
0
||
exception_offset
<
0
)
{
assert
(
false
,
"bad ricochet blob"
);
return
;
}
_ricochet_blob
=
RicochetBlob
::
create
(
&
buffer
,
bounce_offset
,
exception_offset
,
frame_size_in_words
);
}
//------------------------------generate_deopt_blob----------------------------
void
SharedRuntime
::
generate_deopt_blob
()
{
...
...
@@ -2996,6 +3021,8 @@ void SharedRuntime::generate_stubs() {
generate_handler_blob
(
CAST_FROM_FN_PTR
(
address
,
SafepointSynchronize
::
handle_polling_page_exception
),
true
);
generate_ricochet_blob
();
generate_deopt_blob
();
#ifdef COMPILER2
generate_uncommon_trap_blob
();
...
...
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
浏览文件 @
2b587c38
...
...
@@ -2530,6 +2530,32 @@ uint SharedRuntime::out_preserve_stack_slots() {
}
//----------------------------generate_ricochet_blob---------------------------
void
SharedRuntime
::
generate_ricochet_blob
()
{
if
(
!
EnableInvokeDynamic
)
return
;
// leave it as a null
// allocate space for the code
ResourceMark
rm
;
// setup code generation tools
CodeBuffer
buffer
(
"ricochet_blob"
,
512
,
512
);
MacroAssembler
*
masm
=
new
MacroAssembler
(
&
buffer
);
int
frame_size_in_words
=
-
1
,
bounce_offset
=
-
1
,
exception_offset
=
-
1
;
MethodHandles
::
RicochetFrame
::
generate_ricochet_blob
(
masm
,
&
frame_size_in_words
,
&
bounce_offset
,
&
exception_offset
);
// -------------
// make sure all code is generated
masm
->
flush
();
// failed to generate?
if
(
frame_size_in_words
<
0
||
bounce_offset
<
0
||
exception_offset
<
0
)
{
assert
(
false
,
"bad ricochet blob"
);
return
;
}
_ricochet_blob
=
RicochetBlob
::
create
(
&
buffer
,
bounce_offset
,
exception_offset
,
frame_size_in_words
);
}
//------------------------------generate_deopt_blob----------------------------
void
SharedRuntime
::
generate_deopt_blob
()
{
// Allocate space for the code
...
...
@@ -3205,6 +3231,8 @@ void SharedRuntime::generate_stubs() {
generate_handler_blob
(
CAST_FROM_FN_PTR
(
address
,
SafepointSynchronize
::
handle_polling_page_exception
),
true
);
generate_ricochet_blob
();
generate_deopt_blob
();
#ifdef COMPILER2
...
...
src/cpu/x86/vm/stubRoutines_x86_32.hpp
浏览文件 @
2b587c38
...
...
@@ -36,7 +36,7 @@ enum platform_dependent_constants {
// MethodHandles adapters
enum
method_handles_platform_dependent_constants
{
method_handles_adapters_code_size
=
10000
method_handles_adapters_code_size
=
30000
DEBUG_ONLY
(
+
10000
)
};
class
x86
{
...
...
src/cpu/x86/vm/stubRoutines_x86_64.hpp
浏览文件 @
2b587c38
...
...
@@ -38,7 +38,7 @@ enum platform_dependent_constants {
// MethodHandles adapters
enum
method_handles_platform_dependent_constants
{
method_handles_adapters_code_size
=
40000
method_handles_adapters_code_size
=
80000
DEBUG_ONLY
(
+
120000
)
};
class
x86
{
...
...
src/share/vm/classfile/javaClasses.cpp
浏览文件 @
2b587c38
...
...
@@ -2602,6 +2602,7 @@ int java_lang_invoke_MethodType::ptype_count(oop mt) {
// Support for java_lang_invoke_MethodTypeForm
int
java_lang_invoke_MethodTypeForm
::
_vmslots_offset
;
int
java_lang_invoke_MethodTypeForm
::
_vmlayout_offset
;
int
java_lang_invoke_MethodTypeForm
::
_erasedType_offset
;
int
java_lang_invoke_MethodTypeForm
::
_genericInvoker_offset
;
...
...
@@ -2609,6 +2610,7 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() {
klassOop
k
=
SystemDictionary
::
MethodTypeForm_klass
();
if
(
k
!=
NULL
)
{
compute_optional_offset
(
_vmslots_offset
,
k
,
vmSymbols
::
vmslots_name
(),
vmSymbols
::
int_signature
(),
true
);
compute_optional_offset
(
_vmlayout_offset
,
k
,
vmSymbols
::
vmlayout_name
(),
vmSymbols
::
object_signature
());
compute_optional_offset
(
_erasedType_offset
,
k
,
vmSymbols
::
erasedType_name
(),
vmSymbols
::
java_lang_invoke_MethodType_signature
(),
true
);
compute_optional_offset
(
_genericInvoker_offset
,
k
,
vmSymbols
::
genericInvoker_name
(),
vmSymbols
::
java_lang_invoke_MethodHandle_signature
(),
true
);
if
(
_genericInvoker_offset
==
0
)
_genericInvoker_offset
=
-
1
;
// set to explicit "empty" value
...
...
@@ -2617,9 +2619,31 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() {
int
java_lang_invoke_MethodTypeForm
::
vmslots
(
oop
mtform
)
{
assert
(
mtform
->
klass
()
==
SystemDictionary
::
MethodTypeForm_klass
(),
"MTForm only"
);
assert
(
_vmslots_offset
>
0
,
""
);
return
mtform
->
int_field
(
_vmslots_offset
);
}
oop
java_lang_invoke_MethodTypeForm
::
vmlayout
(
oop
mtform
)
{
assert
(
mtform
->
klass
()
==
SystemDictionary
::
MethodTypeForm_klass
(),
"MTForm only"
);
assert
(
_vmlayout_offset
>
0
,
""
);
return
mtform
->
obj_field
(
_vmlayout_offset
);
}
oop
java_lang_invoke_MethodTypeForm
::
init_vmlayout
(
oop
mtform
,
oop
cookie
)
{
assert
(
mtform
->
klass
()
==
SystemDictionary
::
MethodTypeForm_klass
(),
"MTForm only"
);
oop
previous
=
vmlayout
(
mtform
);
if
(
previous
!=
NULL
)
{
return
previous
;
// someone else beat us to it
}
HeapWord
*
cookie_addr
=
(
HeapWord
*
)
mtform
->
obj_field_addr
<
oop
>
(
_vmlayout_offset
);
OrderAccess
::
storestore
();
// make sure our copy is fully committed
previous
=
oopDesc
::
atomic_compare_exchange_oop
(
cookie
,
cookie_addr
,
previous
);
if
(
previous
!=
NULL
)
{
return
previous
;
// someone else beat us to it
}
return
cookie
;
}
oop
java_lang_invoke_MethodTypeForm
::
erasedType
(
oop
mtform
)
{
assert
(
mtform
->
klass
()
==
SystemDictionary
::
MethodTypeForm_klass
(),
"MTForm only"
);
return
mtform
->
obj_field
(
_erasedType_offset
);
...
...
src/share/vm/classfile/javaClasses.hpp
浏览文件 @
2b587c38
...
...
@@ -949,18 +949,19 @@ class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodH
OP_CHECK_CAST
=
0x2
,
// ref-to-ref conversion; requires a Class argument
OP_PRIM_TO_PRIM
=
0x3
,
// converts from one primitive to another
OP_REF_TO_PRIM
=
0x4
,
// unboxes a wrapper to produce a primitive
OP_PRIM_TO_REF
=
0x5
,
// boxes a primitive into a wrapper
(NYI)
OP_PRIM_TO_REF
=
0x5
,
// boxes a primitive into a wrapper
OP_SWAP_ARGS
=
0x6
,
// swap arguments (vminfo is 2nd arg)
OP_ROT_ARGS
=
0x7
,
// rotate arguments (vminfo is displaced arg)
OP_DUP_ARGS
=
0x8
,
// duplicates one or more arguments (at TOS)
OP_DROP_ARGS
=
0x9
,
// remove one or more argument slots
OP_COLLECT_ARGS
=
0xA
,
// combine
one or more arguments into a varargs (NYI)
OP_COLLECT_ARGS
=
0xA
,
// combine
arguments using an auxiliary function
OP_SPREAD_ARGS
=
0xB
,
// expand in place a varargs array (of known size)
OP_F
LYBY
=
0xC
,
// operate first on reified argument list (NYI)
OP_RICOCHET
=
0xD
,
// run an adapter chain on the return value (NYI)
OP_F
OLD_ARGS
=
0xC
,
// combine but do not remove arguments; prepend result
//OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists
CONV_OP_LIMIT
=
0xE
,
// limit of CONV_OP enumeration
CONV_OP_MASK
=
0xF00
,
// this nybble contains the conversion op field
CONV_TYPE_MASK
=
0x0F
,
// fits T_ADDRESS and below
CONV_VMINFO_MASK
=
0x0FF
,
// LSB is reserved for JVM use
CONV_VMINFO_SHIFT
=
0
,
// position of bits in CONV_VMINFO_MASK
CONV_OP_SHIFT
=
8
,
// position of bits in CONV_OP_MASK
...
...
@@ -1089,6 +1090,7 @@ class java_lang_invoke_MethodTypeForm: AllStatic {
private:
static
int
_vmslots_offset
;
// number of argument slots needed
static
int
_vmlayout_offset
;
// object describing internal calling sequence
static
int
_erasedType_offset
;
// erasedType = canonical MethodType
static
int
_genericInvoker_offset
;
// genericInvoker = adapter for invokeGeneric
...
...
@@ -1100,8 +1102,12 @@ class java_lang_invoke_MethodTypeForm: AllStatic {
static
oop
erasedType
(
oop
mtform
);
static
oop
genericInvoker
(
oop
mtform
);
static
oop
vmlayout
(
oop
mtform
);
static
oop
init_vmlayout
(
oop
mtform
,
oop
cookie
);
// Accessors for code generation:
static
int
vmslots_offset_in_bytes
()
{
return
_vmslots_offset
;
}
static
int
vmlayout_offset_in_bytes
()
{
return
_vmlayout_offset
;
}
static
int
erasedType_offset_in_bytes
()
{
return
_erasedType_offset
;
}
static
int
genericInvoker_offset_in_bytes
()
{
return
_genericInvoker_offset
;
}
};
...
...
src/share/vm/classfile/systemDictionary.cpp
浏览文件 @
2b587c38
...
...
@@ -2362,8 +2362,15 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
spe
=
invoke_method_table
()
->
find_entry
(
index
,
hash
,
signature
,
name_id
);
if
(
spe
==
NULL
)
spe
=
invoke_method_table
()
->
add_entry
(
index
,
hash
,
signature
,
name_id
);
if
(
spe
->
property_oop
()
==
NULL
)
if
(
spe
->
property_oop
()
==
NULL
)
{
spe
->
set_property_oop
(
m
());
// Link m to his method type, if it is suitably generic.
oop
mtform
=
java_lang_invoke_MethodType
::
form
(
mt
());
if
(
mtform
!=
NULL
&&
mt
()
==
java_lang_invoke_MethodTypeForm
::
erasedType
(
mtform
)
&&
java_lang_invoke_MethodTypeForm
::
vmlayout_offset_in_bytes
()
>
0
)
{
java_lang_invoke_MethodTypeForm
::
init_vmlayout
(
mtform
,
m
());
}
}
}
else
{
non_cached_result
=
m
;
}
...
...
src/share/vm/classfile/vmSymbols.hpp
浏览文件 @
2b587c38
...
...
@@ -341,6 +341,7 @@
template(vmtarget_name, "vmtarget") \
template(vmentry_name, "vmentry") \
template(vmslots_name, "vmslots") \
template(vmlayout_name, "vmlayout") \
template(vmindex_name, "vmindex") \
template(vmargslot_name, "vmargslot") \
template(flags_name, "flags") \
...
...
@@ -393,6 +394,7 @@
template(void_signature, "V") \
template(byte_array_signature, "[B") \
template(char_array_signature, "[C") \
template(int_array_signature, "[I") \
template(object_void_signature, "(Ljava/lang/Object;)V") \
template(object_int_signature, "(Ljava/lang/Object;)I") \
template(object_boolean_signature, "(Ljava/lang/Object;)Z") \
...
...
src/share/vm/code/codeBlob.cpp
浏览文件 @
2b587c38
...
...
@@ -152,6 +152,32 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
}
void
CodeBlob
::
trace_new_stub
(
CodeBlob
*
stub
,
const
char
*
name1
,
const
char
*
name2
)
{
// Do not hold the CodeCache lock during name formatting.
assert
(
!
CodeCache_lock
->
owned_by_self
(),
"release CodeCache before registering the stub"
);
if
(
stub
!=
NULL
)
{
char
stub_id
[
256
];
assert
(
strlen
(
name1
)
+
strlen
(
name2
)
<
sizeof
(
stub_id
),
""
);
jio_snprintf
(
stub_id
,
sizeof
(
stub_id
),
"%s%s"
,
name1
,
name2
);
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
stub_id
,
(
intptr_t
)
stub
);
Disassembler
::
decode
(
stub
->
code_begin
(),
stub
->
code_end
());
}
Forte
::
register_stub
(
stub_id
,
stub
->
code_begin
(),
stub
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
const
char
*
stub_name
=
name2
;
if
(
name2
[
0
]
==
'\0'
)
stub_name
=
name1
;
JvmtiExport
::
post_dynamic_code_generated
(
stub_name
,
stub
->
code_begin
(),
stub
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
}
void
CodeBlob
::
flush
()
{
if
(
_oop_maps
)
{
FREE_C_HEAP_ARRAY
(
unsigned
char
,
_oop_maps
);
...
...
@@ -312,23 +338,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
stub
=
new
(
size
)
RuntimeStub
(
stub_name
,
cb
,
size
,
frame_complete
,
frame_size
,
oop_maps
,
caller_must_gc_arguments
);
}
// Do not hold the CodeCache lock during name formatting.
if
(
stub
!=
NULL
)
{
char
stub_id
[
256
];
jio_snprintf
(
stub_id
,
sizeof
(
stub_id
),
"RuntimeStub - %s"
,
stub_name
);
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
stub_id
,
stub
);
Disassembler
::
decode
(
stub
->
code_begin
(),
stub
->
code_end
());
}
Forte
::
register_stub
(
stub_id
,
stub
->
code_begin
(),
stub
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
JvmtiExport
::
post_dynamic_code_generated
(
stub_name
,
stub
->
code_begin
(),
stub
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
trace_new_stub
(
stub
,
"RuntimeStub - "
,
stub_name
);
return
stub
;
}
...
...
@@ -340,6 +350,50 @@ void* RuntimeStub::operator new(size_t s, unsigned size) {
return
p
;
}
// operator new shared by all singletons:
void
*
SingletonBlob
::
operator
new
(
size_t
s
,
unsigned
size
)
{
void
*
p
=
CodeCache
::
allocate
(
size
);
if
(
!
p
)
fatal
(
"Initial size of CodeCache is too small"
);
return
p
;
}
//----------------------------------------------------------------------------------------------------
// Implementation of RicochetBlob
RicochetBlob
::
RicochetBlob
(
CodeBuffer
*
cb
,
int
size
,
int
bounce_offset
,
int
exception_offset
,
int
frame_size
)
:
SingletonBlob
(
"RicochetBlob"
,
cb
,
sizeof
(
RicochetBlob
),
size
,
frame_size
,
(
OopMapSet
*
)
NULL
)
{
_bounce_offset
=
bounce_offset
;
_exception_offset
=
exception_offset
;
}
RicochetBlob
*
RicochetBlob
::
create
(
CodeBuffer
*
cb
,
int
bounce_offset
,
int
exception_offset
,
int
frame_size
)
{
RicochetBlob
*
blob
=
NULL
;
ThreadInVMfromUnknown
__tiv
;
// get to VM state in case we block on CodeCache_lock
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
unsigned
int
size
=
allocation_size
(
cb
,
sizeof
(
RicochetBlob
));
blob
=
new
(
size
)
RicochetBlob
(
cb
,
size
,
bounce_offset
,
exception_offset
,
frame_size
);
}
trace_new_stub
(
blob
,
"RicochetBlob"
);
return
blob
;
}
//----------------------------------------------------------------------------------------------------
// Implementation of DeoptimizationBlob
...
...
@@ -386,34 +440,12 @@ DeoptimizationBlob* DeoptimizationBlob::create(
frame_size
);
}
// Do not hold the CodeCache lock during name formatting.
if
(
blob
!=
NULL
)
{
char
blob_id
[
256
];
jio_snprintf
(
blob_id
,
sizeof
(
blob_id
),
"DeoptimizationBlob@"
PTR_FORMAT
,
blob
->
code_begin
());
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
blob_id
,
blob
);
Disassembler
::
decode
(
blob
->
code_begin
(),
blob
->
code_end
());
}
Forte
::
register_stub
(
blob_id
,
blob
->
code_begin
(),
blob
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
JvmtiExport
::
post_dynamic_code_generated
(
"DeoptimizationBlob"
,
blob
->
code_begin
(),
blob
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
trace_new_stub
(
blob
,
"DeoptimizationBlob"
);
return
blob
;
}
void
*
DeoptimizationBlob
::
operator
new
(
size_t
s
,
unsigned
size
)
{
void
*
p
=
CodeCache
::
allocate
(
size
);
if
(
!
p
)
fatal
(
"Initial size of CodeCache is too small"
);
return
p
;
}
//----------------------------------------------------------------------------------------------------
// Implementation of UncommonTrapBlob
...
...
@@ -441,33 +473,12 @@ UncommonTrapBlob* UncommonTrapBlob::create(
blob
=
new
(
size
)
UncommonTrapBlob
(
cb
,
size
,
oop_maps
,
frame_size
);
}
// Do not hold the CodeCache lock during name formatting.
if
(
blob
!=
NULL
)
{
char
blob_id
[
256
];
jio_snprintf
(
blob_id
,
sizeof
(
blob_id
),
"UncommonTrapBlob@"
PTR_FORMAT
,
blob
->
code_begin
());
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
blob_id
,
blob
);
Disassembler
::
decode
(
blob
->
code_begin
(),
blob
->
code_end
());
}
Forte
::
register_stub
(
blob_id
,
blob
->
code_begin
(),
blob
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
JvmtiExport
::
post_dynamic_code_generated
(
"UncommonTrapBlob"
,
blob
->
code_begin
(),
blob
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
trace_new_stub
(
blob
,
"UncommonTrapBlob"
);
return
blob
;
}
void
*
UncommonTrapBlob
::
operator
new
(
size_t
s
,
unsigned
size
)
{
void
*
p
=
CodeCache
::
allocate
(
size
);
if
(
!
p
)
fatal
(
"Initial size of CodeCache is too small"
);
return
p
;
}
#endif // COMPILER2
...
...
@@ -498,33 +509,12 @@ ExceptionBlob* ExceptionBlob::create(
blob
=
new
(
size
)
ExceptionBlob
(
cb
,
size
,
oop_maps
,
frame_size
);
}
// We do not need to hold the CodeCache lock during name formatting
if
(
blob
!=
NULL
)
{
char
blob_id
[
256
];
jio_snprintf
(
blob_id
,
sizeof
(
blob_id
),
"ExceptionBlob@"
PTR_FORMAT
,
blob
->
code_begin
());
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
blob_id
,
blob
);
Disassembler
::
decode
(
blob
->
code_begin
(),
blob
->
code_end
());
}
Forte
::
register_stub
(
blob_id
,
blob
->
code_begin
(),
blob
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
JvmtiExport
::
post_dynamic_code_generated
(
"ExceptionBlob"
,
blob
->
code_begin
(),
blob
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
trace_new_stub
(
blob
,
"ExceptionBlob"
);
return
blob
;
}
void
*
ExceptionBlob
::
operator
new
(
size_t
s
,
unsigned
size
)
{
void
*
p
=
CodeCache
::
allocate
(
size
);
if
(
!
p
)
fatal
(
"Initial size of CodeCache is too small"
);
return
p
;
}
#endif // COMPILER2
...
...
@@ -554,35 +544,12 @@ SafepointBlob* SafepointBlob::create(
blob
=
new
(
size
)
SafepointBlob
(
cb
,
size
,
oop_maps
,
frame_size
);
}
// We do not need to hold the CodeCache lock during name formatting.
if
(
blob
!=
NULL
)
{
char
blob_id
[
256
];
jio_snprintf
(
blob_id
,
sizeof
(
blob_id
),
"SafepointBlob@"
PTR_FORMAT
,
blob
->
code_begin
());
if
(
PrintStubCode
)
{
tty
->
print_cr
(
"Decoding %s "
INTPTR_FORMAT
,
blob_id
,
blob
);
Disassembler
::
decode
(
blob
->
code_begin
(),
blob
->
code_end
());
}
Forte
::
register_stub
(
blob_id
,
blob
->
code_begin
(),
blob
->
code_end
());
if
(
JvmtiExport
::
should_post_dynamic_code_generated
())
{
JvmtiExport
::
post_dynamic_code_generated
(
"SafepointBlob"
,
blob
->
code_begin
(),
blob
->
code_end
());
}
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService
::
track_code_cache_memory_usage
();
trace_new_stub
(
blob
,
"SafepointBlob"
);
return
blob
;
}
void
*
SafepointBlob
::
operator
new
(
size_t
s
,
unsigned
size
)
{
void
*
p
=
CodeCache
::
allocate
(
size
);
if
(
!
p
)
fatal
(
"Initial size of CodeCache is too small"
);
return
p
;
}
//----------------------------------------------------------------------------------------------------
// Verification and printing
...
...
src/share/vm/code/codeBlob.hpp
浏览文件 @
2b587c38
...
...
@@ -35,6 +35,7 @@
// Suptypes are:
// nmethod : Compiled Java methods (include method that calls to native code)
// RuntimeStub : Call to VM runtime methods
// RicochetBlob : Used for blocking MethodHandle adapters
// DeoptimizationBlob : Used for deoptimizatation
// ExceptionBlob : Used for stack unrolling
// SafepointBlob : Used to handle illegal instruction exceptions
...
...
@@ -95,12 +96,13 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
void
flush
();
// Typing
virtual
bool
is_buffer_blob
()
const
{
return
false
;
}
virtual
bool
is_nmethod
()
const
{
return
false
;
}
virtual
bool
is_runtime_stub
()
const
{
return
false
;
}
virtual
bool
is_deoptimization_stub
()
const
{
return
false
;
}
virtual
bool
is_uncommon_trap_stub
()
const
{
return
false
;
}
virtual
bool
is_exception_stub
()
const
{
return
false
;
}
virtual
bool
is_buffer_blob
()
const
{
return
false
;
}
virtual
bool
is_nmethod
()
const
{
return
false
;
}
virtual
bool
is_runtime_stub
()
const
{
return
false
;
}
virtual
bool
is_ricochet_stub
()
const
{
return
false
;
}
virtual
bool
is_deoptimization_stub
()
const
{
return
false
;
}
virtual
bool
is_uncommon_trap_stub
()
const
{
return
false
;
}
virtual
bool
is_exception_stub
()
const
{
return
false
;
}
virtual
bool
is_safepoint_stub
()
const
{
return
false
;
}
virtual
bool
is_adapter_blob
()
const
{
return
false
;
}
virtual
bool
is_method_handles_adapter_blob
()
const
{
return
false
;
}
...
...
@@ -182,6 +184,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
virtual
void
print_on
(
outputStream
*
st
)
const
;
virtual
void
print_value_on
(
outputStream
*
st
)
const
;
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static
void
trace_new_stub
(
CodeBlob
*
blob
,
const
char
*
name1
,
const
char
*
name2
=
""
);
// Print the comment associated with offset on stream, if there is one
virtual
void
print_block_comment
(
outputStream
*
stream
,
address
block_begin
)
{
intptr_t
offset
=
(
intptr_t
)(
block_begin
-
code_begin
());
...
...
@@ -318,7 +323,11 @@ class RuntimeStub: public CodeBlob {
class
SingletonBlob
:
public
CodeBlob
{
friend
class
VMStructs
;
public:
protected:
void
*
operator
new
(
size_t
s
,
unsigned
size
);
public:
SingletonBlob
(
const
char
*
name
,
CodeBuffer
*
cb
,
...
...
@@ -340,6 +349,50 @@ class SingletonBlob: public CodeBlob {
};
//----------------------------------------------------------------------------------------------------
// RicochetBlob
// Holds an arbitrary argument list indefinitely while Java code executes recursively.
class
RicochetBlob
:
public
SingletonBlob
{
friend
class
VMStructs
;
private:
int
_bounce_offset
;
int
_exception_offset
;
// Creation support
RicochetBlob
(
CodeBuffer
*
cb
,
int
size
,
int
bounce_offset
,
int
exception_offset
,
int
frame_size
);
public:
// Creation
static
RicochetBlob
*
create
(
CodeBuffer
*
cb
,
int
bounce_offset
,
int
exception_offset
,
int
frame_size
);
// Typing
bool
is_ricochet_stub
()
const
{
return
true
;
}
// GC for args
void
preserve_callee_argument_oops
(
frame
fr
,
const
RegisterMap
*
reg_map
,
OopClosure
*
f
)
{
/* Nothing to do */
}
address
bounce_addr
()
const
{
return
code_begin
()
+
_bounce_offset
;
}
address
exception_addr
()
const
{
return
code_begin
()
+
_exception_offset
;
}
bool
returns_to_bounce_addr
(
address
pc
)
const
{
address
bounce_pc
=
bounce_addr
();
return
(
pc
==
bounce_pc
||
(
pc
+
frame
::
pc_return_offset
)
==
bounce_pc
);
}
};
//----------------------------------------------------------------------------------------------------
// DeoptimizationBlob
...
...
@@ -363,8 +416,6 @@ class DeoptimizationBlob: public SingletonBlob {
int
frame_size
);
void
*
operator
new
(
size_t
s
,
unsigned
size
);
public:
// Creation
static
DeoptimizationBlob
*
create
(
...
...
@@ -378,7 +429,6 @@ class DeoptimizationBlob: public SingletonBlob {
// Typing
bool
is_deoptimization_stub
()
const
{
return
true
;
}
const
DeoptimizationBlob
*
as_deoptimization_stub
()
const
{
return
this
;
}
bool
exception_address_is_unpack_entry
(
address
pc
)
const
{
address
unpack_pc
=
unpack
();
return
(
pc
==
unpack_pc
||
(
pc
+
frame
::
pc_return_offset
)
==
unpack_pc
);
...
...
@@ -426,8 +476,6 @@ class UncommonTrapBlob: public SingletonBlob {
int
frame_size
);
void
*
operator
new
(
size_t
s
,
unsigned
size
);
public:
// Creation
static
UncommonTrapBlob
*
create
(
...
...
@@ -458,8 +506,6 @@ class ExceptionBlob: public SingletonBlob {
int
frame_size
);
void
*
operator
new
(
size_t
s
,
unsigned
size
);
public:
// Creation
static
ExceptionBlob
*
create
(
...
...
@@ -491,8 +537,6 @@ class SafepointBlob: public SingletonBlob {
int
frame_size
);
void
*
operator
new
(
size_t
s
,
unsigned
size
);
public:
// Creation
static
SafepointBlob
*
create
(
...
...
src/share/vm/code/codeCache.cpp
浏览文件 @
2b587c38
...
...
@@ -796,6 +796,7 @@ void CodeCache::print_internals() {
int
nmethodCount
=
0
;
int
runtimeStubCount
=
0
;
int
adapterCount
=
0
;
int
ricochetStubCount
=
0
;
int
deoptimizationStubCount
=
0
;
int
uncommonTrapStubCount
=
0
;
int
bufferBlobCount
=
0
;
...
...
@@ -840,6 +841,8 @@ void CodeCache::print_internals() {
}
}
else
if
(
cb
->
is_runtime_stub
())
{
runtimeStubCount
++
;
}
else
if
(
cb
->
is_ricochet_stub
())
{
ricochetStubCount
++
;
}
else
if
(
cb
->
is_deoptimization_stub
())
{
deoptimizationStubCount
++
;
}
else
if
(
cb
->
is_uncommon_trap_stub
())
{
...
...
@@ -876,6 +879,7 @@ void CodeCache::print_internals() {
tty
->
print_cr
(
"runtime_stubs: %d"
,
runtimeStubCount
);
tty
->
print_cr
(
"adapters: %d"
,
adapterCount
);
tty
->
print_cr
(
"buffer blobs: %d"
,
bufferBlobCount
);
tty
->
print_cr
(
"ricochet_stubs: %d"
,
ricochetStubCount
);
tty
->
print_cr
(
"deoptimization_stubs: %d"
,
deoptimizationStubCount
);
tty
->
print_cr
(
"uncommon_traps: %d"
,
uncommonTrapStubCount
);
tty
->
print_cr
(
"
\n
nmethod size distribution (non-zombie java)"
);
...
...
src/share/vm/compiler/disassembler.cpp
浏览文件 @
2b587c38
...
...
@@ -283,10 +283,10 @@ void decode_env::print_address(address adr) {
st
->
print
(
"Stub::%s"
,
desc
->
name
());
if
(
desc
->
begin
()
!=
adr
)
st
->
print
(
"%+d 0x%p"
,
adr
-
desc
->
begin
(),
adr
);
else
if
(
WizardMode
)
st
->
print
(
" "
INT
PTR_FORMAT
,
adr
);
else
if
(
WizardMode
)
st
->
print
(
" "
PTR_FORMAT
,
adr
);
return
;
}
st
->
print
(
"Stub::<unknown> "
INT
PTR_FORMAT
,
adr
);
st
->
print
(
"Stub::<unknown> "
PTR_FORMAT
,
adr
);
return
;
}
...
...
@@ -314,8 +314,8 @@ void decode_env::print_address(address adr) {
}
}
// Fall through to a simple numeral.
st
->
print
(
INTPTR_FORMAT
,
(
intptr_t
)
adr
);
// Fall through to a simple
(hexadecimal)
numeral.
st
->
print
(
PTR_FORMAT
,
adr
);
}
void
decode_env
::
print_insn_labels
()
{
...
...
@@ -326,7 +326,7 @@ void decode_env::print_insn_labels() {
cb
->
print_block_comment
(
st
,
p
);
}
if
(
_print_pc
)
{
st
->
print
(
" "
INTPTR_FORMAT
": "
,
(
intptr_t
)
p
);
st
->
print
(
" "
PTR_FORMAT
": "
,
p
);
}
}
...
...
@@ -432,7 +432,7 @@ address decode_env::decode_instructions(address start, address end) {
void
Disassembler
::
decode
(
CodeBlob
*
cb
,
outputStream
*
st
)
{
if
(
!
load_library
())
return
;
decode_env
env
(
cb
,
st
);
env
.
output
()
->
print_cr
(
"Decoding CodeBlob "
INT
PTR_FORMAT
,
cb
);
env
.
output
()
->
print_cr
(
"Decoding CodeBlob "
PTR_FORMAT
,
cb
);
env
.
decode_instructions
(
cb
->
code_begin
(),
cb
->
code_end
());
}
...
...
@@ -446,7 +446,7 @@ void Disassembler::decode(address start, address end, outputStream* st) {
void
Disassembler
::
decode
(
nmethod
*
nm
,
outputStream
*
st
)
{
if
(
!
load_library
())
return
;
decode_env
env
(
nm
,
st
);
env
.
output
()
->
print_cr
(
"Decoding compiled method "
INT
PTR_FORMAT
":"
,
nm
);
env
.
output
()
->
print_cr
(
"Decoding compiled method "
PTR_FORMAT
":"
,
nm
);
env
.
output
()
->
print_cr
(
"Code:"
);
#ifdef SHARK
...
...
@@ -478,9 +478,9 @@ void Disassembler::decode(nmethod* nm, outputStream* st) {
int
offset
=
0
;
for
(
address
p
=
nm
->
consts_begin
();
p
<
nm
->
consts_end
();
p
+=
4
,
offset
+=
4
)
{
if
((
offset
%
8
)
==
0
)
{
env
.
output
()
->
print_cr
(
" "
INTPTR_FORMAT
" (offset: %4d): "
PTR32_FORMAT
" "
PTR64_FORMAT
,
(
intptr_t
)
p
,
offset
,
*
((
int32_t
*
)
p
),
*
((
int64_t
*
)
p
));
env
.
output
()
->
print_cr
(
" "
PTR_FORMAT
" (offset: %4d): "
PTR32_FORMAT
" "
PTR64_FORMAT
,
p
,
offset
,
*
((
int32_t
*
)
p
),
*
((
int64_t
*
)
p
));
}
else
{
env
.
output
()
->
print_cr
(
" "
INTPTR_FORMAT
" (offset: %4d): "
PTR32_FORMAT
,
(
intptr_t
)
p
,
offset
,
*
((
int32_t
*
)
p
));
env
.
output
()
->
print_cr
(
" "
PTR_FORMAT
" (offset: %4d): "
PTR32_FORMAT
,
p
,
offset
,
*
((
int32_t
*
)
p
));
}
}
}
...
...
src/share/vm/prims/jvmtiTagMap.cpp
浏览文件 @
2b587c38
...
...
@@ -3158,6 +3158,9 @@ inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread,
if
(
fr
->
is_entry_frame
())
{
last_entry_frame
=
fr
;
}
if
(
fr
->
is_ricochet_frame
())
{
fr
->
oops_ricochet_do
(
blk
,
vf
->
register_map
());
}
}
vf
=
vf
->
sender
();
...
...
src/share/vm/prims/methodHandleWalk.cpp
浏览文件 @
2b587c38
...
...
@@ -409,6 +409,11 @@ MethodHandleWalker::walk(TRAPS) {
break
;
}
case
java_lang_invoke_AdapterMethodHandle
::
OP_FOLD_ARGS
:
{
//NYI, may GC
lose
(
"unimplemented"
,
CHECK_
(
empty
));
break
;
}
case
java_lang_invoke_AdapterMethodHandle
::
OP_SPREAD_ARGS
:
{
klassOop
array_klass_oop
=
NULL
;
BasicType
array_type
=
java_lang_Class
::
as_BasicType
(
chain
().
adapter_arg_oop
(),
...
...
@@ -452,9 +457,18 @@ MethodHandleWalker::walk(TRAPS) {
Bytecodes
::
_invokestatic
,
false
,
3
,
&
arglist
[
0
],
CHECK_
(
empty
));
// Spread out the array elements.
Bytecodes
::
Code
aload_op
=
Bytecodes
::
_aaload
;
if
(
element_type
!=
T_OBJECT
)
{
lose
(
"primitive array NYI"
,
CHECK_
(
empty
));
Bytecodes
::
Code
aload_op
=
Bytecodes
::
_nop
;
switch
(
element_type
)
{
case
T_INT
:
aload_op
=
Bytecodes
::
_iaload
;
break
;
case
T_LONG
:
aload_op
=
Bytecodes
::
_laload
;
break
;
case
T_FLOAT
:
aload_op
=
Bytecodes
::
_faload
;
break
;
case
T_DOUBLE
:
aload_op
=
Bytecodes
::
_daload
;
break
;
case
T_OBJECT
:
aload_op
=
Bytecodes
::
_aaload
;
break
;
case
T_BOOLEAN
:
// fall through:
case
T_BYTE
:
aload_op
=
Bytecodes
::
_baload
;
break
;
case
T_CHAR
:
aload_op
=
Bytecodes
::
_caload
;
break
;
case
T_SHORT
:
aload_op
=
Bytecodes
::
_saload
;
break
;
default:
lose
(
"primitive array NYI"
,
CHECK_
(
empty
));
}
int
ap
=
arg_slot
;
for
(
int
i
=
0
;
i
<
spread_length
;
i
++
)
{
...
...
@@ -467,11 +481,6 @@ MethodHandleWalker::walk(TRAPS) {
break
;
}
case
java_lang_invoke_AdapterMethodHandle
::
OP_FLYBY
:
//NYI, runs Java code
case
java_lang_invoke_AdapterMethodHandle
::
OP_RICOCHET
:
//NYI, runs Java code
lose
(
"unimplemented"
,
CHECK_
(
empty
));
break
;
default:
lose
(
"bad adapter conversion"
,
CHECK_
(
empty
));
break
;
...
...
src/share/vm/prims/methodHandles.cpp
浏览文件 @
2b587c38
...
...
@@ -66,8 +66,8 @@ const char* MethodHandles::_entry_names[_EK_LIMIT+1] = {
"adapter_drop_args"
,
"adapter_collect_args"
,
"adapter_spread_args"
,
"adapter_f
lyby
"
,
"adapter_
ricochet
"
,
"adapter_f
old_args
"
,
"adapter_
unused_13
"
,
// optimized adapter types:
"adapter_swap_args/1"
,
...
...
@@ -83,9 +83,76 @@ const char* MethodHandles::_entry_names[_EK_LIMIT+1] = {
"adapter_prim_to_prim/f2d"
,
"adapter_ref_to_prim/unboxi"
,
"adapter_ref_to_prim/unboxl"
,
"adapter_spread_args/0"
,
"adapter_spread_args/1"
,
"adapter_spread_args/more"
,
// return value handlers for collect/filter/fold adapters:
"return/ref"
,
"return/int"
,
"return/long"
,
"return/float"
,
"return/double"
,
"return/void"
,
"return/S0/ref"
,
"return/S1/ref"
,
"return/S2/ref"
,
"return/S3/ref"
,
"return/S4/ref"
,
"return/S5/ref"
,
"return/any"
,
// spreading (array length cases 0, 1, ...)
"adapter_spread/0"
,
"adapter_spread/1/ref"
,
"adapter_spread/2/ref"
,
"adapter_spread/3/ref"
,
"adapter_spread/4/ref"
,
"adapter_spread/5/ref"
,
"adapter_spread/ref"
,
"adapter_spread/byte"
,
"adapter_spread/char"
,
"adapter_spread/short"
,
"adapter_spread/int"
,
"adapter_spread/long"
,
"adapter_spread/float"
,
"adapter_spread/double"
,
// blocking filter/collect conversions:
"adapter_collect/ref"
,
"adapter_collect/int"
,
"adapter_collect/long"
,
"adapter_collect/float"
,
"adapter_collect/double"
,
"adapter_collect/void"
,
"adapter_collect/0/ref"
,
"adapter_collect/1/ref"
,
"adapter_collect/2/ref"
,
"adapter_collect/3/ref"
,
"adapter_collect/4/ref"
,
"adapter_collect/5/ref"
,
"adapter_filter/S0/ref"
,
"adapter_filter/S1/ref"
,
"adapter_filter/S2/ref"
,
"adapter_filter/S3/ref"
,
"adapter_filter/S4/ref"
,
"adapter_filter/S5/ref"
,
"adapter_collect/2/S0/ref"
,
"adapter_collect/2/S1/ref"
,
"adapter_collect/2/S2/ref"
,
"adapter_collect/2/S3/ref"
,
"adapter_collect/2/S4/ref"
,
"adapter_collect/2/S5/ref"
,
// blocking fold conversions:
"adapter_fold/ref"
,
"adapter_fold/int"
,
"adapter_fold/long"
,
"adapter_fold/float"
,
"adapter_fold/double"
,
"adapter_fold/void"
,
"adapter_fold/1/ref"
,
"adapter_fold/2/ref"
,
"adapter_fold/3/ref"
,
"adapter_fold/4/ref"
,
"adapter_fold/5/ref"
,
NULL
};
...
...
@@ -96,13 +163,23 @@ int MethodHandles::_adapter_code_size = StubRoutines::meth
jobject
MethodHandles
::
_raise_exception_method
;
address
MethodHandles
::
_adapter_return_handlers
[
CONV_TYPE_MASK
+
1
];
#ifdef ASSERT
bool
MethodHandles
::
spot_check_entry_names
()
{
assert
(
!
strcmp
(
entry_name
(
_invokestatic_mh
),
"invokestatic"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_bound_ref_mh
),
"bound_ref"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_retype_only
),
"adapter_retype_only"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_
ricochet
),
"adapter_ricochet
"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_
fold_args
),
"adapter_fold_args
"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_unboxi
),
"adapter_ref_to_prim/unboxi"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_spread_char
),
"adapter_spread/char"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_spread_double
),
"adapter_spread/double"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_collect_int
),
"adapter_collect/int"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_collect_0_ref
),
"adapter_collect/0/ref"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_collect_2_S3_ref
),
"adapter_collect/2/S3/ref"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_filter_S5_ref
),
"adapter_filter/S5/ref"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_fold_3_ref
),
"adapter_fold/3/ref"
),
""
);
assert
(
!
strcmp
(
entry_name
(
_adapter_opt_fold_void
),
"adapter_fold/void"
),
""
);
return
true
;
}
#endif
...
...
@@ -112,6 +189,9 @@ bool MethodHandles::spot_check_entry_names() {
// MethodHandles::generate_adapters
//
void
MethodHandles
::
generate_adapters
()
{
#ifdef TARGET_ARCH_NYI_6939861
if
(
FLAG_IS_DEFAULT
(
UseRicochetFrames
))
UseRicochetFrames
=
false
;
#endif
if
(
!
EnableInvokeDynamic
||
SystemDictionary
::
MethodHandle_klass
()
==
NULL
)
return
;
assert
(
_adapter_code
==
NULL
,
"generate only once"
);
...
...
@@ -126,7 +206,6 @@ void MethodHandles::generate_adapters() {
g
.
generate
();
}
//------------------------------------------------------------------------------
// MethodHandlesAdapterGenerator::generate
//
...
...
@@ -135,9 +214,59 @@ void MethodHandlesAdapterGenerator::generate() {
for
(
MethodHandles
::
EntryKind
ek
=
MethodHandles
::
_EK_FIRST
;
ek
<
MethodHandles
::
_EK_LIMIT
;
ek
=
MethodHandles
::
EntryKind
(
1
+
(
int
)
ek
))
{
StubCodeMark
mark
(
this
,
"MethodHandle"
,
MethodHandles
::
entry_name
(
ek
));
MethodHandles
::
generate_method_handle_stub
(
_masm
,
ek
);
if
(
MethodHandles
::
ek_supported
(
ek
))
{
StubCodeMark
mark
(
this
,
"MethodHandle"
,
MethodHandles
::
entry_name
(
ek
));
MethodHandles
::
generate_method_handle_stub
(
_masm
,
ek
);
}
}
}
#ifdef TARGET_ARCH_NYI_6939861
// these defs belong in methodHandles_<arch>.cpp
frame
MethodHandles
::
ricochet_frame_sender
(
const
frame
&
fr
,
RegisterMap
*
map
)
{
ShouldNotCallThis
();
return
fr
;
}
void
MethodHandles
::
ricochet_frame_oops_do
(
const
frame
&
fr
,
OopClosure
*
f
,
const
RegisterMap
*
reg_map
)
{
ShouldNotCallThis
();
}
#endif //TARGET_ARCH_NYI_6939861
//------------------------------------------------------------------------------
// MethodHandles::ek_supported
//
bool
MethodHandles
::
ek_supported
(
MethodHandles
::
EntryKind
ek
)
{
MethodHandles
::
EntryKind
ek_orig
=
MethodHandles
::
ek_original_kind
(
ek
);
switch
(
ek_orig
)
{
case
_adapter_unused_13
:
return
false
;
// not defined yet
case
_adapter_prim_to_ref
:
return
UseRicochetFrames
&&
conv_op_supported
(
java_lang_invoke_AdapterMethodHandle
::
OP_PRIM_TO_REF
);
case
_adapter_collect_args
:
return
UseRicochetFrames
&&
conv_op_supported
(
java_lang_invoke_AdapterMethodHandle
::
OP_COLLECT_ARGS
);
case
_adapter_fold_args
:
return
UseRicochetFrames
&&
conv_op_supported
(
java_lang_invoke_AdapterMethodHandle
::
OP_FOLD_ARGS
);
case
_adapter_opt_return_any
:
return
UseRicochetFrames
;
#ifdef TARGET_ARCH_NYI_6939861
// ports before 6939861 supported only three kinds of spread ops
case
_adapter_spread_args
:
// restrict spreads to three kinds:
switch
(
ek
)
{
case
_adapter_opt_spread_0
:
case
_adapter_opt_spread_1
:
case
_adapter_opt_spread_more
:
break
;
default:
return
false
;
break
;
}
break
;
#endif //TARGET_ARCH_NYI_6939861
}
return
true
;
}
...
...
@@ -1564,6 +1693,8 @@ void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh,
if
(
m
->
is_abstract
())
{
THROW
(
vmSymbols
::
java_lang_AbstractMethodError
());
}
java_lang_invoke_MethodHandle
::
init_vmslots
(
mh
());
int
vmargslot
=
m
->
size_of_parameters
()
-
1
;
assert
(
java_lang_invoke_BoundMethodHandle
::
vmargslot
(
mh
())
==
vmargslot
,
""
);
if
(
VerifyMethodHandles
)
{
verify_BoundMethodHandle_with_receiver
(
mh
,
m
,
CHECK
);
...
...
@@ -1642,14 +1773,9 @@ void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnu
DEBUG_ONLY
(
int
this_pushes
=
decode_MethodHandle_stack_pushes
(
mh
()));
if
(
direct_to_method
)
{
assert
(
this_pushes
==
slots_pushed
,
"BMH pushes one or two stack slots"
);
assert
(
slots_pushed
<=
MethodHandlePushLimit
,
""
);
}
else
{
int
target_pushes
=
decode_MethodHandle_stack_pushes
(
target
());
assert
(
this_pushes
==
slots_pushed
+
target_pushes
,
"BMH stack motion must be correct"
);
// do not blow the stack; use a Java-based adapter if this limit is exceeded
// FIXME
// if (slots_pushed + target_pushes > MethodHandlePushLimit)
// err = "too many bound parameters";
}
}
...
...
@@ -1672,10 +1798,11 @@ void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum,
}
java_lang_invoke_MethodHandle
::
init_vmslots
(
mh
());
int
argslot
=
java_lang_invoke_BoundMethodHandle
::
vmargslot
(
mh
());
if
(
VerifyMethodHandles
)
{
int
insert_after
=
argnum
-
1
;
verify_vmargslot
(
mh
,
insert_after
,
java_lang_invoke_BoundMethodHandle
::
vmargslot
(
mh
())
,
CHECK
);
verify_vmargslot
(
mh
,
insert_after
,
argslot
,
CHECK
);
verify_vmslots
(
mh
,
CHECK
);
}
...
...
@@ -1769,6 +1896,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
Handle
target
(
THREAD
,
java_lang_invoke_AdapterMethodHandle
::
vmtarget
(
mh
()));
Handle
src_mtype
(
THREAD
,
java_lang_invoke_MethodHandle
::
type
(
mh
()));
Handle
dst_mtype
(
THREAD
,
java_lang_invoke_MethodHandle
::
type
(
target
()));
Handle
arg_mtype
;
const
char
*
err
=
NULL
;
...
...
@@ -1777,25 +1905,29 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
switch
(
ek
)
{
case
_adapter_check_cast
:
// target type of cast
case
_adapter_ref_to_prim
:
// wrapper type from which to unbox
case
_adapter_prim_to_ref
:
// wrapper type to box into
case
_adapter_collect_args
:
// array type to collect into
case
_adapter_spread_args
:
// array type to spread from
if
(
!
java_lang_Class
::
is_instance
(
argument
())
||
java_lang_Class
::
is_primitive
(
argument
()))
{
err
=
"adapter requires argument of type java.lang.Class"
;
break
;
}
if
(
ek
==
_adapter_collect_args
||
ek
==
_adapter_spread_args
)
{
if
(
ek
==
_adapter_spread_args
)
{
// Make sure it is a suitable collection type. (Array, for now.)
Klass
*
ak
=
Klass
::
cast
(
java_lang_Class
::
as_klassOop
(
argument
()));
if
(
!
ak
->
oop_is_objArray
())
{
{
err
=
"adapter requires argument of type java.lang.Class<Object[]>"
;
break
;
}
}
if
(
!
ak
->
oop_is_array
())
{
err
=
"spread adapter requires argument representing an array class"
;
break
;
}
BasicType
et
=
arrayKlass
::
cast
(
ak
->
as_klassOop
())
->
element_type
();
if
(
et
!=
dest
&&
stack_move
<=
0
)
{
err
=
"spread adapter requires array class argument of correct type"
;
break
;
}
}
break
;
case
_adapter_flyby
:
case
_adapter_ricochet
:
case
_adapter_prim_to_ref
:
// boxer MH to use
case
_adapter_collect_args
:
// method handle which collects the args
case
_adapter_fold_args
:
// method handle which collects the args
if
(
!
UseRicochetFrames
)
{
{
err
=
"box/collect/fold operators are not supported"
;
break
;
}
}
if
(
!
java_lang_invoke_MethodHandle
::
is_instance
(
argument
()))
{
err
=
"MethodHandle adapter argument required"
;
break
;
}
arg_mtype
=
Handle
(
THREAD
,
java_lang_invoke_MethodHandle
::
type
(
argument
()));
break
;
default:
if
(
argument
.
not_null
())
...
...
@@ -1806,6 +1938,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
if
(
err
==
NULL
)
{
// Check that the src/dest types are supplied if needed.
// Also check relevant parameter or return types.
switch
(
ek
)
{
case
_adapter_check_cast
:
if
(
src
!=
T_OBJECT
||
dest
!=
T_OBJECT
)
{
...
...
@@ -1828,8 +1961,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
}
break
;
case
_adapter_prim_to_ref
:
if
(
!
is_java_primitive
(
src
)
||
dest
!=
T_OBJECT
||
argument
()
!=
Klass
::
cast
(
SystemDictionary
::
box_klass
(
src
))
->
java_mirror
())
{
if
(
!
is_java_primitive
(
src
)
||
dest
!=
T_OBJECT
)
{
err
=
"adapter requires primitive src conversion subfield"
;
break
;
}
break
;
...
...
@@ -1840,14 +1972,12 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
err
=
"adapter requires src/dest conversion subfields for swap"
;
break
;
}
int
swap_size
=
type2size
[
src
];
oop
src_mtype
=
java_lang_invoke_AdapterMethodHandle
::
type
(
mh
());
oop
dest_mtype
=
java_lang_invoke_AdapterMethodHandle
::
type
(
target
());
int
slot_limit
=
java_lang_invoke_AdapterMethodHandle
::
vmslots
(
target
());
int
slot_limit
=
java_lang_invoke_MethodHandle
::
vmslots
(
target
());
int
src_slot
=
argslot
;
int
dest_slot
=
vminfo
;
bool
rotate_up
=
(
src_slot
>
dest_slot
);
// upward rotation
int
src_arg
=
argnum
;
int
dest_arg
=
argument_slot_to_argnum
(
d
est_mtype
,
dest_slot
);
int
dest_arg
=
argument_slot_to_argnum
(
d
st_mtype
()
,
dest_slot
);
verify_vmargslot
(
mh
,
dest_arg
,
dest_slot
,
CHECK
);
if
(
!
(
dest_slot
>=
src_slot
+
swap_size
)
&&
!
(
src_slot
>=
dest_slot
+
swap_size
))
{
...
...
@@ -1855,8 +1985,8 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
}
else
if
(
ek
==
_adapter_swap_args
&&
!
(
src_slot
>
dest_slot
))
{
err
=
"source of swap must be deeper in stack"
;
}
else
if
(
ek
==
_adapter_swap_args
)
{
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
,
dest_arg
),
java_lang_invoke_MethodType
::
ptype
(
d
est_mtype
,
src_arg
),
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
()
,
dest_arg
),
java_lang_invoke_MethodType
::
ptype
(
d
st_mtype
()
,
src_arg
),
dest_arg
);
}
else
if
(
ek
==
_adapter_rot_args
)
{
if
(
rotate_up
)
{
...
...
@@ -1864,8 +1994,8 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
// rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot]
// that is: [src_arg+1..dest_arg] --> [src_arg..dest_arg-1]
for
(
int
i
=
src_arg
+
1
;
i
<=
dest_arg
&&
err
==
NULL
;
i
++
)
{
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
,
i
),
java_lang_invoke_MethodType
::
ptype
(
d
est_mtype
,
i
-
1
),
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
()
,
i
),
java_lang_invoke_MethodType
::
ptype
(
d
st_mtype
()
,
i
-
1
),
i
);
}
}
else
{
// rotate down
...
...
@@ -1873,28 +2003,54 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
// rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss]
// that is: [dest_arg..src_arg-1] --> [dst_arg+1..src_arg]
for
(
int
i
=
dest_arg
;
i
<=
src_arg
-
1
&&
err
==
NULL
;
i
++
)
{
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
,
i
),
java_lang_invoke_MethodType
::
ptype
(
d
est_mtype
,
i
+
1
),
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
()
,
i
),
java_lang_invoke_MethodType
::
ptype
(
d
st_mtype
()
,
i
+
1
),
i
);
}
}
}
if
(
err
==
NULL
)
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
,
src_arg
),
java_lang_invoke_MethodType
::
ptype
(
d
est_mtype
,
dest_arg
),
err
=
check_argument_type_change
(
java_lang_invoke_MethodType
::
ptype
(
src_mtype
()
,
src_arg
),
java_lang_invoke_MethodType
::
ptype
(
d
st_mtype
()
,
dest_arg
),
src_arg
);
}
break
;
case
_adapter_collect_args
:
case
_adapter_spread_args
:
case
_adapter_collect_args
:
case
_adapter_fold_args
:
{
BasicType
coll_type
=
(
ek
==
_adapter_collect_args
)
?
dest
:
src
;
BasicType
elem_type
=
(
ek
==
_adapter_collect_args
)
?
src
:
dest
;
if
(
coll_type
!=
T_OBJECT
||
elem_type
!=
T_OBJECT
)
{
err
=
"adapter requires src/dest subfields"
;
break
;
// later:
// - consider making coll be a primitive array
// - consider making coll be a heterogeneous collection
bool
is_spread
=
(
ek
==
_adapter_spread_args
);
bool
is_fold
=
(
ek
==
_adapter_fold_args
);
BasicType
coll_type
=
is_spread
?
src
:
dest
;
BasicType
elem_type
=
is_spread
?
dest
:
src
;
// coll_type is type of args in collected form (or T_VOID if none)
// elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous)
if
(
coll_type
==
0
||
elem_type
==
0
)
{
err
=
"adapter requires src/dest subfields for spread or collect"
;
break
;
}
if
(
is_spread
&&
coll_type
!=
T_OBJECT
)
{
err
=
"spread adapter requires object type for argument bundle"
;
break
;
}
Handle
spread_mtype
=
(
is_spread
?
dst_mtype
:
src_mtype
);
int
spread_slot
=
argslot
;
int
spread_arg
=
argnum
;
int
slots_pushed
=
stack_move
/
stack_move_unit
();
int
coll_slot_count
=
type2size
[
coll_type
];
int
spread_slot_count
=
(
is_spread
?
slots_pushed
:
-
slots_pushed
)
+
coll_slot_count
;
if
(
is_fold
)
spread_slot_count
=
argument_slot_count
(
arg_mtype
());
if
(
!
is_spread
)
{
int
init_slots
=
argument_slot_count
(
src_mtype
());
int
coll_slots
=
argument_slot_count
(
arg_mtype
());
if
(
spread_slot_count
>
init_slots
||
spread_slot_count
!=
coll_slots
)
{
err
=
"collect adapter has inconsistent arg counts"
;
break
;
}
int
next_slots
=
argument_slot_count
(
dst_mtype
());
int
unchanged_slots_in
=
(
init_slots
-
spread_slot_count
);
int
unchanged_slots_out
=
(
next_slots
-
coll_slot_count
-
(
is_fold
?
spread_slot_count
:
0
));
if
(
unchanged_slots_in
!=
unchanged_slots_out
)
{
err
=
"collect adapter continuation has inconsistent arg counts"
;
break
;
}
}
}
break
;
...
...
@@ -1929,8 +2085,9 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
}
break
;
case
_adapter_collect_args
:
if
(
slots_pushed
>
1
)
{
err
=
"adapter requires conversion subfield slots_pushed <= 1"
;
case
_adapter_fold_args
:
if
(
slots_pushed
>
2
)
{
err
=
"adapter requires conversion subfield slots_pushed <= 2"
;
}
break
;
case
_adapter_spread_args
:
...
...
@@ -1950,32 +2107,36 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
}
if
(
err
==
NULL
)
{
// Make sure this adapter
does not push too deeply
.
// Make sure this adapter
's stack pushing is accurately recorded
.
int
slots_pushed
=
stack_move
/
stack_move_unit
();
int
this_vmslots
=
java_lang_invoke_MethodHandle
::
vmslots
(
mh
());
int
target_vmslots
=
java_lang_invoke_MethodHandle
::
vmslots
(
target
());
int
target_pushes
=
decode_MethodHandle_stack_pushes
(
target
());
if
(
slots_pushed
!=
(
target_vmslots
-
this_vmslots
))
{
err
=
"stack_move inconsistent with previous and current MethodType vmslots"
;
}
else
if
(
slots_pushed
>
0
)
{
// verify stack_move against MethodHandlePushLimit
int
target_pushes
=
decode_MethodHandle_stack_pushes
(
target
());
// do not blow the stack; use a Java-based adapter if this limit is exceeded
if
(
slots_pushed
+
target_pushes
>
MethodHandlePushLimit
)
{
err
=
"adapter pushes too many parameters"
;
}
else
{
int
this_pushes
=
decode_MethodHandle_stack_pushes
(
mh
());
if
(
slots_pushed
+
target_pushes
!=
this_pushes
)
{
if
(
this_pushes
==
0
)
err
=
"adapter push count not initialized"
;
else
err
=
"adapter push count is wrong"
;
}
}
// While we're at it, check that the stack motion decoder works:
DEBUG_ONLY
(
int
target_pushes
=
decode_MethodHandle_stack_pushes
(
target
()));
DEBUG_ONLY
(
int
this_pushes
=
decode_MethodHandle_stack_pushes
(
mh
()));
assert
(
this_pushes
==
slots_pushed
+
target_pushes
,
"AMH stack motion must be correct"
);
}
if
(
err
==
NULL
&&
vminfo
!=
0
)
{
switch
(
ek
)
{
case
_adapter_swap_args
:
case
_adapter_rot_args
:
break
;
// OK
case
_adapter_swap_args
:
case
_adapter_rot_args
:
case
_adapter_prim_to_ref
:
case
_adapter_collect_args
:
case
_adapter_fold_args
:
break
;
// OK
default:
err
=
"vminfo subfield is reserved to the JVM"
;
}
...
...
@@ -2026,6 +2187,7 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
// adjust the adapter code to the internal EntryKind enumeration:
EntryKind
ek_orig
=
adapter_entry_kind
(
conv_op
);
EntryKind
ek_opt
=
ek_orig
;
// may be optimized
EntryKind
ek_try
;
// temp
// Finalize the vmtarget field (Java initialized it to null).
if
(
!
java_lang_invoke_MethodHandle
::
is_instance
(
target
()))
{
...
...
@@ -2034,17 +2196,23 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
}
java_lang_invoke_AdapterMethodHandle
::
set_vmtarget
(
mh
(),
target
());
if
(
VerifyMethodHandles
)
{
verify_AdapterMethodHandle
(
mh
,
argnum
,
CHECK
);
}
int
stack_move
=
adapter_conversion_stack_move
(
conversion
);
BasicType
src
=
adapter_conversion_src_type
(
conversion
);
BasicType
dest
=
adapter_conversion_dest_type
(
conversion
);
int
vminfo
=
adapter_conversion_vminfo
(
conversion
);
// should be zero
int
slots_pushed
=
stack_move
/
stack_move_unit
();
if
(
VerifyMethodHandles
)
{
verify_AdapterMethodHandle
(
mh
,
argnum
,
CHECK
);
}
const
char
*
err
=
NULL
;
if
(
!
conv_op_supported
(
conv_op
))
{
err
=
"adapter not yet implemented in the JVM"
;
}
// Now it's time to finish the case analysis and pick a MethodHandleEntry.
switch
(
ek_orig
)
{
case
_adapter_retype_only
:
...
...
@@ -2077,7 +2245,7 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
}
break
;
case
1
*
4
+
2
:
if
(
src
==
T_INT
&&
dest
==
T_LONG
)
{
if
(
(
src
==
T_INT
||
is_subword_type
(
src
))
&&
dest
==
T_LONG
)
{
ek_opt
=
_adapter_opt_i2l
;
}
else
if
(
src
==
T_FLOAT
&&
dest
==
T_DOUBLE
)
{
ek_opt
=
_adapter_opt_f2d
;
...
...
@@ -2110,7 +2278,44 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
break
;
case
_adapter_prim_to_ref
:
goto
throw_not_impl
;
// allocates, hence could block
{
assert
(
UseRicochetFrames
,
"else don't come here"
);
// vminfo will be the location to insert the return value
vminfo
=
argslot
;
ek_opt
=
_adapter_opt_collect_ref
;
ensure_vmlayout_field
(
target
,
CHECK
);
if
(
!
OptimizeMethodHandles
)
break
;
switch
(
type2size
[
src
])
{
case
1
:
ek_try
=
EntryKind
(
_adapter_opt_filter_S0_ref
+
argslot
);
if
(
ek_try
<
_adapter_opt_collect_LAST
&&
ek_adapter_opt_collect_slot
(
ek_try
)
==
argslot
)
{
assert
(
ek_adapter_opt_collect_count
(
ek_try
)
==
1
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
T_OBJECT
,
""
);
ek_opt
=
ek_try
;
break
;
}
// else downgrade to variable slot:
ek_opt
=
_adapter_opt_collect_1_ref
;
break
;
case
2
:
ek_try
=
EntryKind
(
_adapter_opt_collect_2_S0_ref
+
argslot
);
if
(
ek_try
<
_adapter_opt_collect_LAST
&&
ek_adapter_opt_collect_slot
(
ek_try
)
==
argslot
)
{
assert
(
ek_adapter_opt_collect_count
(
ek_try
)
==
2
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
T_OBJECT
,
""
);
ek_opt
=
ek_try
;
break
;
}
// else downgrade to variable slot:
ek_opt
=
_adapter_opt_collect_2_ref
;
break
;
default:
assert
(
false
,
""
);
break
;
}
}
break
;
case
_adapter_swap_args
:
case
_adapter_rot_args
:
...
...
@@ -2136,29 +2341,180 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
}
break
;
case
_adapter_collect_args
:
goto
throw_not_impl
;
// allocates, hence could block
case
_adapter_spread_args
:
{
#ifdef TARGET_ARCH_NYI_6939861
// ports before 6939861 supported only three kinds of spread ops
if
(
!
UseRicochetFrames
)
{
int
array_size
=
slots_pushed
+
1
;
assert
(
array_size
>=
0
,
""
);
vminfo
=
array_size
;
switch
(
array_size
)
{
case
0
:
ek_opt
=
_adapter_opt_spread_0
;
break
;
case
1
:
ek_opt
=
_adapter_opt_spread_1
;
break
;
default:
ek_opt
=
_adapter_opt_spread_more
;
break
;
}
break
;
}
#endif //TARGET_ARCH_NYI_6939861
// vminfo will be the required length of the array
int
slots_pushed
=
stack_move
/
stack_move_unit
();
int
array_size
=
slots_pushed
+
1
;
assert
(
array_size
>=
0
,
""
);
int
array_size
=
(
slots_pushed
+
1
)
/
(
type2size
[
dest
]
==
2
?
2
:
1
);
vminfo
=
array_size
;
switch
(
array_size
)
{
case
0
:
ek_opt
=
_adapter_opt_spread_0
;
break
;
case
1
:
ek_opt
=
_adapter_opt_spread_1
;
break
;
default:
ek_opt
=
_adapter_opt_spread_more
;
break
;
// general case
switch
(
dest
)
{
case
T_BOOLEAN
:
// fall through to T_BYTE:
case
T_BYTE
:
ek_opt
=
_adapter_opt_spread_byte
;
break
;
case
T_CHAR
:
ek_opt
=
_adapter_opt_spread_char
;
break
;
case
T_SHORT
:
ek_opt
=
_adapter_opt_spread_short
;
break
;
case
T_INT
:
ek_opt
=
_adapter_opt_spread_int
;
break
;
case
T_LONG
:
ek_opt
=
_adapter_opt_spread_long
;
break
;
case
T_FLOAT
:
ek_opt
=
_adapter_opt_spread_float
;
break
;
case
T_DOUBLE
:
ek_opt
=
_adapter_opt_spread_double
;
break
;
case
T_OBJECT
:
ek_opt
=
_adapter_opt_spread_ref
;
break
;
case
T_VOID
:
if
(
array_size
!=
0
)
goto
throw_not_impl
;
ek_opt
=
_adapter_opt_spread_ref
;
break
;
default
:
goto
throw_not_impl
;
}
assert
(
array_size
==
0
||
// it doesn't matter what the spreader is
(
ek_adapter_opt_spread_count
(
ek_opt
)
==
-
1
&&
(
ek_adapter_opt_spread_type
(
ek_opt
)
==
dest
||
(
ek_adapter_opt_spread_type
(
ek_opt
)
==
T_BYTE
&&
dest
==
T_BOOLEAN
))),
err_msg
(
"dest=%d ek_opt=%d"
,
dest
,
ek_opt
));
if
(
array_size
<=
0
)
{
// since the general case does not handle length 0, this case is required:
ek_opt
=
_adapter_opt_spread_0
;
break
;
}
if
((
vminfo
&
CONV_VMINFO_MASK
)
!=
vminfo
)
goto
throw_not_impl
;
// overflow
if
(
dest
==
T_OBJECT
)
{
ek_try
=
EntryKind
(
_adapter_opt_spread_1_ref
-
1
+
array_size
);
if
(
ek_try
<
_adapter_opt_spread_LAST
&&
ek_adapter_opt_spread_count
(
ek_try
)
==
array_size
)
{
assert
(
ek_adapter_opt_spread_type
(
ek_try
)
==
dest
,
""
);
ek_opt
=
ek_try
;
break
;
}
}
break
;
}
break
;
case
_adapter_flyby
:
case
_adapter_ricochet
:
goto
throw_not_impl
;
// runs Java code, hence could block
case
_adapter_collect_args
:
{
assert
(
UseRicochetFrames
,
"else don't come here"
);
int
elem_slots
=
argument_slot_count
(
java_lang_invoke_MethodHandle
::
type
(
java_lang_invoke_AdapterMethodHandle
::
argument
(
mh
())
)
);
// vminfo will be the location to insert the return value
vminfo
=
argslot
;
ensure_vmlayout_field
(
target
,
CHECK
);
// general case:
switch
(
dest
)
{
default
:
if
(
!
is_subword_type
(
dest
))
goto
throw_not_impl
;
// else fall through:
case
T_INT
:
ek_opt
=
_adapter_opt_collect_int
;
break
;
case
T_LONG
:
ek_opt
=
_adapter_opt_collect_long
;
break
;
case
T_FLOAT
:
ek_opt
=
_adapter_opt_collect_float
;
break
;
case
T_DOUBLE
:
ek_opt
=
_adapter_opt_collect_double
;
break
;
case
T_OBJECT
:
ek_opt
=
_adapter_opt_collect_ref
;
break
;
case
T_VOID
:
ek_opt
=
_adapter_opt_collect_void
;
break
;
}
assert
(
ek_adapter_opt_collect_slot
(
ek_opt
)
==
-
1
&&
ek_adapter_opt_collect_count
(
ek_opt
)
==
-
1
&&
(
ek_adapter_opt_collect_type
(
ek_opt
)
==
dest
||
ek_adapter_opt_collect_type
(
ek_opt
)
==
T_INT
&&
is_subword_type
(
dest
)),
""
);
if
(
dest
==
T_OBJECT
&&
elem_slots
==
1
&&
OptimizeMethodHandles
)
{
// filter operation on a ref
ek_try
=
EntryKind
(
_adapter_opt_filter_S0_ref
+
argslot
);
if
(
ek_try
<
_adapter_opt_collect_LAST
&&
ek_adapter_opt_collect_slot
(
ek_try
)
==
argslot
)
{
assert
(
ek_adapter_opt_collect_count
(
ek_try
)
==
elem_slots
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
dest
,
""
);
ek_opt
=
ek_try
;
break
;
}
ek_opt
=
_adapter_opt_collect_1_ref
;
break
;
}
if
(
dest
==
T_OBJECT
&&
elem_slots
==
2
&&
OptimizeMethodHandles
)
{
// filter of two arguments
ek_try
=
EntryKind
(
_adapter_opt_collect_2_S0_ref
+
argslot
);
if
(
ek_try
<
_adapter_opt_collect_LAST
&&
ek_adapter_opt_collect_slot
(
ek_try
)
==
argslot
)
{
assert
(
ek_adapter_opt_collect_count
(
ek_try
)
==
elem_slots
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
dest
,
""
);
ek_opt
=
ek_try
;
break
;
}
ek_opt
=
_adapter_opt_collect_2_ref
;
break
;
}
if
(
dest
==
T_OBJECT
&&
OptimizeMethodHandles
)
{
// try to use a fixed length adapter
ek_try
=
EntryKind
(
_adapter_opt_collect_0_ref
+
elem_slots
);
if
(
ek_try
<
_adapter_opt_collect_LAST
&&
ek_adapter_opt_collect_count
(
ek_try
)
==
elem_slots
)
{
assert
(
ek_adapter_opt_collect_slot
(
ek_try
)
==
-
1
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
dest
,
""
);
ek_opt
=
ek_try
;
break
;
}
}
break
;
}
case
_adapter_fold_args
:
{
assert
(
UseRicochetFrames
,
"else don't come here"
);
int
elem_slots
=
argument_slot_count
(
java_lang_invoke_MethodHandle
::
type
(
java_lang_invoke_AdapterMethodHandle
::
argument
(
mh
())
)
);
// vminfo will be the location to insert the return value
vminfo
=
argslot
+
elem_slots
;
ensure_vmlayout_field
(
target
,
CHECK
);
switch
(
dest
)
{
default
:
if
(
!
is_subword_type
(
dest
))
goto
throw_not_impl
;
// else fall through:
case
T_INT
:
ek_opt
=
_adapter_opt_fold_int
;
break
;
case
T_LONG
:
ek_opt
=
_adapter_opt_fold_long
;
break
;
case
T_FLOAT
:
ek_opt
=
_adapter_opt_fold_float
;
break
;
case
T_DOUBLE
:
ek_opt
=
_adapter_opt_fold_double
;
break
;
case
T_OBJECT
:
ek_opt
=
_adapter_opt_fold_ref
;
break
;
case
T_VOID
:
ek_opt
=
_adapter_opt_fold_void
;
break
;
}
assert
(
ek_adapter_opt_collect_slot
(
ek_opt
)
==
-
1
&&
ek_adapter_opt_collect_count
(
ek_opt
)
==
-
1
&&
(
ek_adapter_opt_collect_type
(
ek_opt
)
==
dest
||
ek_adapter_opt_collect_type
(
ek_opt
)
==
T_INT
&&
is_subword_type
(
dest
)),
""
);
if
(
dest
==
T_OBJECT
&&
elem_slots
==
0
&&
OptimizeMethodHandles
)
{
// if there are no args, just pretend it's a collect
ek_opt
=
_adapter_opt_collect_0_ref
;
break
;
}
if
(
dest
==
T_OBJECT
&&
OptimizeMethodHandles
)
{
// try to use a fixed length adapter
ek_try
=
EntryKind
(
_adapter_opt_fold_1_ref
-
1
+
elem_slots
);
if
(
ek_try
<
_adapter_opt_fold_LAST
&&
ek_adapter_opt_collect_count
(
ek_try
)
==
elem_slots
)
{
assert
(
ek_adapter_opt_collect_slot
(
ek_try
)
==
-
1
&&
ek_adapter_opt_collect_type
(
ek_try
)
==
dest
,
""
);
ek_opt
=
ek_try
;
break
;
}
}
break
;
}
default:
// should have failed much earlier; must be a missing case here
...
...
@@ -2166,11 +2522,20 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
// and fall through:
throw_not_impl:
// FIXME: these adapters are NYI
err
=
"adapter not yet implemented in the JVM
"
;
if
(
err
==
NULL
)
err
=
"unknown adapter type
"
;
break
;
}
if
(
err
!=
NULL
&&
(
vminfo
&
CONV_VMINFO_MASK
)
!=
vminfo
)
{
// should not happen, since vminfo is used to encode arg/slot indexes < 255
err
=
"vminfo overflow"
;
}
if
(
err
!=
NULL
&&
!
have_entry
(
ek_opt
))
{
err
=
"adapter stub for this kind of method handle is missing"
;
}
if
(
err
!=
NULL
)
{
throw_InternalError_for_bad_conversion
(
conversion
,
err
,
THREAD
);
return
;
...
...
@@ -2190,6 +2555,26 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
// Java code can publish it in global data structures.
}
void
MethodHandles
::
ensure_vmlayout_field
(
Handle
target
,
TRAPS
)
{
Handle
mtype
(
THREAD
,
java_lang_invoke_MethodHandle
::
type
(
target
()));
Handle
mtform
(
THREAD
,
java_lang_invoke_MethodType
::
form
(
mtype
()));
if
(
mtform
.
is_null
())
{
THROW
(
vmSymbols
::
java_lang_InternalError
());
}
if
(
java_lang_invoke_MethodTypeForm
::
vmlayout_offset_in_bytes
()
>
0
)
{
if
(
java_lang_invoke_MethodTypeForm
::
vmlayout
(
mtform
())
==
NULL
)
{
// fill it in
Handle
erased_mtype
(
THREAD
,
java_lang_invoke_MethodTypeForm
::
erasedType
(
mtform
()));
TempNewSymbol
erased_signature
=
java_lang_invoke_MethodType
::
as_signature
(
erased_mtype
(),
/*intern:*/
true
,
CHECK
);
methodOop
cookie
=
SystemDictionary
::
find_method_handle_invoke
(
vmSymbols
::
invokeExact_name
(),
erased_signature
,
SystemDictionaryHandles
::
Object_klass
(),
THREAD
);
java_lang_invoke_MethodTypeForm
::
init_vmlayout
(
mtform
(),
cookie
);
}
}
}
//
// Here are the native methods on sun.invoke.MethodHandleImpl.
// They are the private interface between this JVM and the HotSpot-specific
...
...
@@ -2360,8 +2745,10 @@ JVM_END
#ifndef PRODUCT
#define EACH_NAMED_CON(template) \
template(MethodHandles,GC_JVM_PUSH_LIMIT) \
template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) \
/* hold back this one until JDK stabilizes */
\
/* template(MethodHandles,GC_JVM_PUSH_LIMIT) */
\
/* hold back this one until JDK stabilizes */
\
/* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */
\
template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \
template(MethodHandles,ETF_DIRECT_HANDLE) \
template(MethodHandles,ETF_METHOD_NAME) \
...
...
@@ -2385,9 +2772,8 @@ JVM_END
template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \
template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \
template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \
template(java_lang_invoke_AdapterMethodHandle,OP_FLYBY) \
template(java_lang_invoke_AdapterMethodHandle,OP_RICOCHET) \
template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT) \
/* hold back this one until JDK stabilizes */
\
/*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/
\
template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \
template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \
template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \
...
...
src/share/vm/prims/methodHandles.hpp
浏览文件 @
2b587c38
...
...
@@ -66,8 +66,8 @@ class MethodHandles: AllStatic {
_adapter_drop_args
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_DROP_ARGS
,
_adapter_collect_args
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_COLLECT_ARGS
,
_adapter_spread_args
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_SPREAD_ARGS
,
_adapter_f
lyby
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_FLYBY
,
_adapter_
ricochet
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_RICOCHET
,
_adapter_f
old_args
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
OP_FOLD_ARGS
,
_adapter_
unused_13
=
_adapter_mh_first
+
13
,
//hole in the CONV_OP enumeration
_adapter_mh_last
=
_adapter_mh_first
+
java_lang_invoke_AdapterMethodHandle
::
CONV_OP_LIMIT
-
1
,
// Optimized adapter types
...
...
@@ -93,10 +93,99 @@ class MethodHandles: AllStatic {
_adapter_opt_unboxi
,
_adapter_opt_unboxl
,
// spreading (array length cases 0, 1, >=2)
_adapter_opt_spread_0
,
_adapter_opt_spread_1
,
_adapter_opt_spread_more
,
// %% Maybe tame the following with a VM_SYMBOLS_DO type macro?
// how a blocking adapter returns (platform-dependent)
_adapter_opt_return_ref
,
_adapter_opt_return_int
,
_adapter_opt_return_long
,
_adapter_opt_return_float
,
_adapter_opt_return_double
,
_adapter_opt_return_void
,
_adapter_opt_return_S0_ref
,
// return ref to S=0 (last slot)
_adapter_opt_return_S1_ref
,
// return ref to S=1 (2nd-to-last slot)
_adapter_opt_return_S2_ref
,
_adapter_opt_return_S3_ref
,
_adapter_opt_return_S4_ref
,
_adapter_opt_return_S5_ref
,
_adapter_opt_return_any
,
// dynamically select r/i/l/f/d
_adapter_opt_return_FIRST
=
_adapter_opt_return_ref
,
_adapter_opt_return_LAST
=
_adapter_opt_return_any
,
// spreading (array length cases 0, 1, ...)
_adapter_opt_spread_0
,
// spread empty array to N=0 arguments
_adapter_opt_spread_1_ref
,
// spread Object[] to N=1 argument
_adapter_opt_spread_2_ref
,
// spread Object[] to N=2 arguments
_adapter_opt_spread_3_ref
,
// spread Object[] to N=3 arguments
_adapter_opt_spread_4_ref
,
// spread Object[] to N=4 arguments
_adapter_opt_spread_5_ref
,
// spread Object[] to N=5 arguments
_adapter_opt_spread_ref
,
// spread Object[] to N arguments
_adapter_opt_spread_byte
,
// spread byte[] or boolean[] to N arguments
_adapter_opt_spread_char
,
// spread char[], etc., to N arguments
_adapter_opt_spread_short
,
// spread short[], etc., to N arguments
_adapter_opt_spread_int
,
// spread int[], short[], etc., to N arguments
_adapter_opt_spread_long
,
// spread long[] to N arguments
_adapter_opt_spread_float
,
// spread float[] to N arguments
_adapter_opt_spread_double
,
// spread double[] to N arguments
_adapter_opt_spread_FIRST
=
_adapter_opt_spread_0
,
_adapter_opt_spread_LAST
=
_adapter_opt_spread_double
,
// blocking filter/collect conversions
// These collect N arguments and replace them (at slot S) by a return value
// which is passed to the final target, along with the unaffected arguments.
// collect_{N}_{T} collects N arguments at any position into a T value
// collect_{N}_S{S}_{T} collects N arguments at slot S into a T value
// collect_{T} collects any number of arguments at any position
// filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection)
// (collect_2 is also usable as a filter, with long or double arguments)
_adapter_opt_collect_ref
,
// combine N arguments, replace with a reference
_adapter_opt_collect_int
,
// combine N arguments, replace with an int, short, etc.
_adapter_opt_collect_long
,
// combine N arguments, replace with a long
_adapter_opt_collect_float
,
// combine N arguments, replace with a float
_adapter_opt_collect_double
,
// combine N arguments, replace with a double
_adapter_opt_collect_void
,
// combine N arguments, replace with nothing
// if there is a small fixed number to push, do so without a loop:
_adapter_opt_collect_0_ref
,
// collect N=0 arguments, insert a reference
_adapter_opt_collect_1_ref
,
// collect N=1 argument, replace with a reference
_adapter_opt_collect_2_ref
,
// combine N=2 arguments, replace with a reference
_adapter_opt_collect_3_ref
,
// combine N=3 arguments, replace with a reference
_adapter_opt_collect_4_ref
,
// combine N=4 arguments, replace with a reference
_adapter_opt_collect_5_ref
,
// combine N=5 arguments, replace with a reference
// filters are an important special case because they never move arguments:
_adapter_opt_filter_S0_ref
,
// filter N=1 argument at S=0, replace with a reference
_adapter_opt_filter_S1_ref
,
// filter N=1 argument at S=1, replace with a reference
_adapter_opt_filter_S2_ref
,
// filter N=1 argument at S=2, replace with a reference
_adapter_opt_filter_S3_ref
,
// filter N=1 argument at S=3, replace with a reference
_adapter_opt_filter_S4_ref
,
// filter N=1 argument at S=4, replace with a reference
_adapter_opt_filter_S5_ref
,
// filter N=1 argument at S=5, replace with a reference
// these move arguments, but they are important for boxing
_adapter_opt_collect_2_S0_ref
,
// combine last N=2 arguments, replace with a reference
_adapter_opt_collect_2_S1_ref
,
// combine N=2 arguments at S=1, replace with a reference
_adapter_opt_collect_2_S2_ref
,
// combine N=2 arguments at S=2, replace with a reference
_adapter_opt_collect_2_S3_ref
,
// combine N=2 arguments at S=3, replace with a reference
_adapter_opt_collect_2_S4_ref
,
// combine N=2 arguments at S=4, replace with a reference
_adapter_opt_collect_2_S5_ref
,
// combine N=2 arguments at S=5, replace with a reference
_adapter_opt_collect_FIRST
=
_adapter_opt_collect_ref
,
_adapter_opt_collect_LAST
=
_adapter_opt_collect_2_S5_ref
,
// blocking folding conversions
// these are like collects, but retain all the N arguments for the final target
//_adapter_opt_fold_0_ref, // same as _adapter_opt_collect_0_ref
// fold_{N}_{T} processes N arguments at any position into a T value, which it inserts
// fold_{T} processes any number of arguments at any position
_adapter_opt_fold_ref
,
// process N arguments, prepend a reference
_adapter_opt_fold_int
,
// process N arguments, prepend an int, short, etc.
_adapter_opt_fold_long
,
// process N arguments, prepend a long
_adapter_opt_fold_float
,
// process N arguments, prepend a float
_adapter_opt_fold_double
,
// process N arguments, prepend a double
_adapter_opt_fold_void
,
// process N arguments, but leave the list unchanged
_adapter_opt_fold_1_ref
,
// process N=1 argument, prepend a reference
_adapter_opt_fold_2_ref
,
// process N=2 arguments, prepend a reference
_adapter_opt_fold_3_ref
,
// process N=3 arguments, prepend a reference
_adapter_opt_fold_4_ref
,
// process N=4 arguments, prepend a reference
_adapter_opt_fold_5_ref
,
// process N=5 arguments, prepend a reference
_adapter_opt_fold_FIRST
=
_adapter_opt_fold_ref
,
_adapter_opt_fold_LAST
=
_adapter_opt_fold_5_ref
,
_EK_LIMIT
,
_EK_FIRST
=
0
...
...
@@ -110,6 +199,7 @@ class MethodHandles: AllStatic {
enum
{
// import java_lang_invoke_AdapterMethodHandle::CONV_OP_*
CONV_OP_LIMIT
=
java_lang_invoke_AdapterMethodHandle
::
CONV_OP_LIMIT
,
CONV_OP_MASK
=
java_lang_invoke_AdapterMethodHandle
::
CONV_OP_MASK
,
CONV_TYPE_MASK
=
java_lang_invoke_AdapterMethodHandle
::
CONV_TYPE_MASK
,
CONV_VMINFO_MASK
=
java_lang_invoke_AdapterMethodHandle
::
CONV_VMINFO_MASK
,
CONV_VMINFO_SHIFT
=
java_lang_invoke_AdapterMethodHandle
::
CONV_VMINFO_SHIFT
,
CONV_OP_SHIFT
=
java_lang_invoke_AdapterMethodHandle
::
CONV_OP_SHIFT
,
...
...
@@ -123,6 +213,7 @@ class MethodHandles: AllStatic {
static
MethodHandleEntry
*
_entries
[
_EK_LIMIT
];
static
const
char
*
_entry_names
[
_EK_LIMIT
+
1
];
static
jobject
_raise_exception_method
;
static
address
_adapter_return_handlers
[
CONV_TYPE_MASK
+
1
];
// Adapters.
static
MethodHandlesAdapterBlob
*
_adapter_code
;
...
...
@@ -147,39 +238,195 @@ class MethodHandles: AllStatic {
}
// Some adapter helper functions.
static
void
get_ek_bound_mh_info
(
EntryKind
ek
,
BasicType
&
arg_type
,
int
&
arg_mask
,
int
&
arg_slots
)
{
static
EntryKind
ek_original_kind
(
EntryKind
ek
)
{
if
(
ek
<=
_adapter_mh_last
)
return
ek
;
switch
(
ek
)
{
case
_bound_int_mh
:
// fall-thru
case
_bound_int_direct_mh
:
arg_type
=
T_INT
;
arg_mask
=
_INSERT_INT_MASK
;
break
;
case
_bound_long_mh
:
// fall-thru
case
_bound_long_direct_mh
:
arg_type
=
T_LONG
;
arg_mask
=
_INSERT_LONG_MASK
;
break
;
case
_bound_ref_mh
:
// fall-thru
case
_bound_ref_direct_mh
:
arg_type
=
T_OBJECT
;
arg_mask
=
_INSERT_REF_MASK
;
break
;
default:
ShouldNotReachHere
();
case
_adapter_opt_swap_1
:
case
_adapter_opt_swap_2
:
return
_adapter_swap_args
;
case
_adapter_opt_rot_1_up
:
case
_adapter_opt_rot_1_down
:
case
_adapter_opt_rot_2_up
:
case
_adapter_opt_rot_2_down
:
return
_adapter_rot_args
;
case
_adapter_opt_i2i
:
case
_adapter_opt_l2i
:
case
_adapter_opt_d2f
:
case
_adapter_opt_i2l
:
case
_adapter_opt_f2d
:
return
_adapter_prim_to_prim
;
case
_adapter_opt_unboxi
:
case
_adapter_opt_unboxl
:
return
_adapter_ref_to_prim
;
}
arg_slots
=
type2size
[
arg_type
];
if
(
ek
>=
_adapter_opt_spread_FIRST
&&
ek
<=
_adapter_opt_spread_LAST
)
return
_adapter_spread_args
;
if
(
ek
>=
_adapter_opt_collect_FIRST
&&
ek
<=
_adapter_opt_collect_LAST
)
return
_adapter_collect_args
;
if
(
ek
>=
_adapter_opt_fold_FIRST
&&
ek
<=
_adapter_opt_fold_LAST
)
return
_adapter_fold_args
;
if
(
ek
>=
_adapter_opt_return_FIRST
&&
ek
<=
_adapter_opt_return_LAST
)
return
_adapter_opt_return_any
;
assert
(
false
,
"oob"
);
return
_EK_LIMIT
;
}
static
void
get_ek_adapter_opt_swap_rot_info
(
EntryKind
ek
,
int
&
swap_bytes
,
int
&
rotate
)
{
int
swap_slots
=
0
;
static
bool
ek_supported
(
MethodHandles
::
EntryKind
ek
);
static
BasicType
ek_bound_mh_arg_type
(
EntryKind
ek
)
{
switch
(
ek
)
{
case
_adapter_opt_swap_1
:
swap_slots
=
1
;
rotate
=
0
;
break
;
case
_adapter_opt_swap_2
:
swap_slots
=
2
;
rotate
=
0
;
break
;
case
_adapter_opt_rot_1_up
:
swap_slots
=
1
;
rotate
=
1
;
break
;
case
_adapter_opt_rot_1_down
:
swap_slots
=
1
;
rotate
=
-
1
;
break
;
case
_adapter_opt_rot_2_up
:
swap_slots
=
2
;
rotate
=
1
;
break
;
case
_adapter_opt_rot_2_down
:
swap_slots
=
2
;
rotate
=
-
1
;
break
;
default:
ShouldNotReachHere
();
case
_bound_int_mh
:
// fall-thru
case
_bound_int_direct_mh
:
return
T_INT
;
case
_bound_long_mh
:
// fall-thru
case
_bound_long_direct_mh
:
return
T_LONG
;
default
:
return
T_OBJECT
;
}
// Return the size of the stack slots to move in bytes.
swap_bytes
=
swap_slots
*
Interpreter
::
stackElementSize
;
}
static
int
get_ek_adapter_opt_spread_info
(
EntryKind
ek
)
{
static
int
ek_adapter_opt_swap_slots
(
EntryKind
ek
)
{
switch
(
ek
)
{
case
_adapter_opt_swap_1
:
return
1
;
case
_adapter_opt_swap_2
:
return
2
;
case
_adapter_opt_rot_1_up
:
return
1
;
case
_adapter_opt_rot_1_down
:
return
1
;
case
_adapter_opt_rot_2_up
:
return
2
;
case
_adapter_opt_rot_2_down
:
return
2
;
default
:
ShouldNotReachHere
();
return
-
1
;
}
}
static
int
ek_adapter_opt_swap_mode
(
EntryKind
ek
)
{
switch
(
ek
)
{
case
_adapter_opt_swap_1
:
return
0
;
case
_adapter_opt_swap_2
:
return
0
;
case
_adapter_opt_rot_1_up
:
return
1
;
case
_adapter_opt_rot_1_down
:
return
-
1
;
case
_adapter_opt_rot_2_up
:
return
1
;
case
_adapter_opt_rot_2_down
:
return
-
1
;
default
:
ShouldNotReachHere
();
return
0
;
}
}
static
int
ek_adapter_opt_collect_count
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_collect_FIRST
&&
ek
<=
_adapter_opt_collect_LAST
||
ek
>=
_adapter_opt_fold_FIRST
&&
ek
<=
_adapter_opt_fold_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_collect_0_ref
:
return
0
;
case
_adapter_opt_filter_S0_ref
:
case
_adapter_opt_filter_S1_ref
:
case
_adapter_opt_filter_S2_ref
:
case
_adapter_opt_filter_S3_ref
:
case
_adapter_opt_filter_S4_ref
:
case
_adapter_opt_filter_S5_ref
:
case
_adapter_opt_fold_1_ref
:
case
_adapter_opt_collect_1_ref
:
return
1
;
case
_adapter_opt_collect_2_S0_ref
:
case
_adapter_opt_collect_2_S1_ref
:
case
_adapter_opt_collect_2_S2_ref
:
case
_adapter_opt_collect_2_S3_ref
:
case
_adapter_opt_collect_2_S4_ref
:
case
_adapter_opt_collect_2_S5_ref
:
case
_adapter_opt_fold_2_ref
:
case
_adapter_opt_collect_2_ref
:
return
2
;
case
_adapter_opt_fold_3_ref
:
case
_adapter_opt_collect_3_ref
:
return
3
;
case
_adapter_opt_fold_4_ref
:
case
_adapter_opt_collect_4_ref
:
return
4
;
case
_adapter_opt_fold_5_ref
:
case
_adapter_opt_collect_5_ref
:
return
5
;
default
:
return
-
1
;
// sentinel value for "variable"
}
}
static
int
ek_adapter_opt_collect_slot
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_collect_FIRST
&&
ek
<=
_adapter_opt_collect_LAST
||
ek
>=
_adapter_opt_fold_FIRST
&&
ek
<=
_adapter_opt_fold_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_collect_2_S0_ref
:
case
_adapter_opt_filter_S0_ref
:
return
0
;
case
_adapter_opt_collect_2_S1_ref
:
case
_adapter_opt_filter_S1_ref
:
return
1
;
case
_adapter_opt_collect_2_S2_ref
:
case
_adapter_opt_filter_S2_ref
:
return
2
;
case
_adapter_opt_collect_2_S3_ref
:
case
_adapter_opt_filter_S3_ref
:
return
3
;
case
_adapter_opt_collect_2_S4_ref
:
case
_adapter_opt_filter_S4_ref
:
return
4
;
case
_adapter_opt_collect_2_S5_ref
:
case
_adapter_opt_filter_S5_ref
:
return
5
;
default
:
return
-
1
;
// sentinel value for "variable"
}
}
static
BasicType
ek_adapter_opt_collect_type
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_collect_FIRST
&&
ek
<=
_adapter_opt_collect_LAST
||
ek
>=
_adapter_opt_fold_FIRST
&&
ek
<=
_adapter_opt_fold_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_spread_0
:
return
0
;
case
_adapter_opt_spread_1
:
return
1
;
default
:
return
-
1
;
case
_adapter_opt_fold_int
:
case
_adapter_opt_collect_int
:
return
T_INT
;
case
_adapter_opt_fold_long
:
case
_adapter_opt_collect_long
:
return
T_LONG
;
case
_adapter_opt_fold_float
:
case
_adapter_opt_collect_float
:
return
T_FLOAT
;
case
_adapter_opt_fold_double
:
case
_adapter_opt_collect_double
:
return
T_DOUBLE
;
case
_adapter_opt_fold_void
:
case
_adapter_opt_collect_void
:
return
T_VOID
;
default
:
return
T_OBJECT
;
}
}
static
int
ek_adapter_opt_return_slot
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_return_FIRST
&&
ek
<=
_adapter_opt_return_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_return_S0_ref
:
return
0
;
case
_adapter_opt_return_S1_ref
:
return
1
;
case
_adapter_opt_return_S2_ref
:
return
2
;
case
_adapter_opt_return_S3_ref
:
return
3
;
case
_adapter_opt_return_S4_ref
:
return
4
;
case
_adapter_opt_return_S5_ref
:
return
5
;
default
:
return
-
1
;
// sentinel value for "variable"
}
}
static
BasicType
ek_adapter_opt_return_type
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_return_FIRST
&&
ek
<=
_adapter_opt_return_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_return_int
:
return
T_INT
;
case
_adapter_opt_return_long
:
return
T_LONG
;
case
_adapter_opt_return_float
:
return
T_FLOAT
;
case
_adapter_opt_return_double
:
return
T_DOUBLE
;
case
_adapter_opt_return_void
:
return
T_VOID
;
case
_adapter_opt_return_any
:
return
T_CONFLICT
;
// sentinel value for "variable"
default
:
return
T_OBJECT
;
}
}
static
int
ek_adapter_opt_spread_count
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_spread_FIRST
&&
ek
<=
_adapter_opt_spread_LAST
,
""
);
switch
(
ek
)
{
case
_adapter_opt_spread_0
:
return
0
;
case
_adapter_opt_spread_1_ref
:
return
1
;
case
_adapter_opt_spread_2_ref
:
return
2
;
case
_adapter_opt_spread_3_ref
:
return
3
;
case
_adapter_opt_spread_4_ref
:
return
4
;
case
_adapter_opt_spread_5_ref
:
return
5
;
default
:
return
-
1
;
// sentinel value for "variable"
}
}
static
BasicType
ek_adapter_opt_spread_type
(
EntryKind
ek
)
{
assert
(
ek
>=
_adapter_opt_spread_FIRST
&&
ek
<=
_adapter_opt_spread_LAST
,
""
);
switch
(
ek
)
{
// (there is no _adapter_opt_spread_boolean; we use byte)
case
_adapter_opt_spread_byte
:
return
T_BYTE
;
case
_adapter_opt_spread_char
:
return
T_CHAR
;
case
_adapter_opt_spread_short
:
return
T_SHORT
;
case
_adapter_opt_spread_int
:
return
T_INT
;
case
_adapter_opt_spread_long
:
return
T_LONG
;
case
_adapter_opt_spread_float
:
return
T_FLOAT
;
case
_adapter_opt_spread_double
:
return
T_DOUBLE
;
default
:
return
T_OBJECT
;
}
}
...
...
@@ -228,12 +475,21 @@ class MethodHandles: AllStatic {
// Bit mask of conversion_op values. May vary by platform.
static
int
adapter_conversion_ops_supported_mask
();
static
bool
conv_op_supported
(
int
conv_op
)
{
assert
(
conv_op_valid
(
conv_op
),
""
);
return
((
adapter_conversion_ops_supported_mask
()
&
nth_bit
(
conv_op
))
!=
0
);
}
// Offset in words that the interpreter stack pointer moves when an argument is pushed.
// The stack_move value must always be a multiple of this.
static
int
stack_move_unit
()
{
return
frame
::
interpreter_frame_expression_stack_direction
()
*
Interpreter
::
stackElementWords
;
}
// Adapter frame traversal. (Implementation-specific.)
static
frame
ricochet_frame_sender
(
const
frame
&
fr
,
RegisterMap
*
reg_map
);
static
void
ricochet_frame_oops_do
(
const
frame
&
fr
,
OopClosure
*
blk
,
const
RegisterMap
*
reg_map
);
enum
{
CONV_VMINFO_SIGN_FLAG
=
0x80
};
// Shift values for prim-to-prim conversions.
static
int
adapter_prim_to_prim_subword_vminfo
(
BasicType
dest
)
{
...
...
@@ -429,6 +685,7 @@ class MethodHandles: AllStatic {
// Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.)
static
void
init_AdapterMethodHandle
(
Handle
mh
,
Handle
target
,
int
argnum
,
TRAPS
);
static
void
ensure_vmlayout_field
(
Handle
target
,
TRAPS
);
#ifdef ASSERT
static
bool
spot_check_entry_names
();
...
...
@@ -448,12 +705,54 @@ class MethodHandles: AllStatic {
return
same_basic_type_for_arguments
(
src
,
dst
,
raw
,
true
);
}
enum
{
// arg_mask values
static
Symbol
*
convert_to_signature
(
oop
type_str
,
bool
polymorphic
,
TRAPS
);
#ifdef TARGET_ARCH_x86
# include "methodHandles_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
#define TARGET_ARCH_NYI_6939861 1 //FIXME
//# include "methodHandles_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
#define TARGET_ARCH_NYI_6939861 1 //FIXME
//# include "methodHandles_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
#define TARGET_ARCH_NYI_6939861 1 //FIXME
//# include "methodHandles_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
#define TARGET_ARCH_NYI_6939861 1 //FIXME
//# include "methodHandles_ppc.hpp"
#endif
#ifdef TARGET_ARCH_NYI_6939861
// Here are some backward compatible declarations until the 6939861 ports are updated.
#define _adapter_flyby (_EK_LIMIT + 10)
#define _adapter_ricochet (_EK_LIMIT + 11)
#define _adapter_opt_spread_1 _adapter_opt_spread_1_ref
#define _adapter_opt_spread_more _adapter_opt_spread_ref
enum
{
_INSERT_NO_MASK
=
-
1
,
_INSERT_REF_MASK
=
0
,
_INSERT_INT_MASK
=
1
,
_INSERT_LONG_MASK
=
3
};
static
void
get_ek_bound_mh_info
(
EntryKind
ek
,
BasicType
&
arg_type
,
int
&
arg_mask
,
int
&
arg_slots
)
{
arg_type
=
ek_bound_mh_arg_type
(
ek
);
arg_mask
=
0
;
arg_slots
=
type2size
[
arg_type
];;
}
static
void
get_ek_adapter_opt_swap_rot_info
(
EntryKind
ek
,
int
&
swap_bytes
,
int
&
rotate
)
{
int
swap_slots
=
ek_adapter_opt_swap_slots
(
ek
);
rotate
=
ek_adapter_opt_swap_mode
(
ek
);
swap_bytes
=
swap_slots
*
Interpreter
::
stackElementSize
;
}
static
int
get_ek_adapter_opt_spread_info
(
EntryKind
ek
)
{
return
ek_adapter_opt_spread_count
(
ek
);
}
static
void
insert_arg_slots
(
MacroAssembler
*
_masm
,
RegisterOrConstant
arg_slots
,
int
arg_mask
,
...
...
@@ -466,8 +765,7 @@ class MethodHandles: AllStatic {
Register
temp_reg
,
Register
temp2_reg
,
Register
temp3_reg
=
noreg
);
static
void
trace_method_handle
(
MacroAssembler
*
_masm
,
const
char
*
adaptername
)
PRODUCT_RETURN
;
static
Symbol
*
convert_to_signature
(
oop
type_str
,
bool
polymorphic
,
TRAPS
);
#endif //TARGET_ARCH_NYI_6939861
};
...
...
src/share/vm/runtime/frame.cpp
浏览文件 @
2b587c38
...
...
@@ -33,6 +33,7 @@
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
...
...
@@ -169,6 +170,11 @@ void frame::set_pc(address newpc ) {
}
// type testers
bool
frame
::
is_ricochet_frame
()
const
{
RicochetBlob
*
rcb
=
SharedRuntime
::
ricochet_blob
();
return
(
_cb
==
rcb
&&
rcb
!=
NULL
&&
rcb
->
returns_to_bounce_addr
(
_pc
));
}
bool
frame
::
is_deoptimized_frame
()
const
{
assert
(
_deopt_state
!=
unknown
,
"not answerable"
);
return
_deopt_state
==
is_deoptimized
;
...
...
@@ -341,12 +347,18 @@ frame frame::java_sender() const {
frame
frame
::
real_sender
(
RegisterMap
*
map
)
const
{
frame
result
=
sender
(
map
);
while
(
result
.
is_runtime_frame
())
{
while
(
result
.
is_runtime_frame
()
||
result
.
is_ricochet_frame
())
{
result
=
result
.
sender
(
map
);
}
return
result
;
}
frame
frame
::
sender_for_ricochet_frame
(
RegisterMap
*
map
)
const
{
assert
(
is_ricochet_frame
(),
""
);
return
MethodHandles
::
ricochet_frame_sender
(
*
this
,
map
);
}
// Note: called by profiler - NOT for current thread
frame
frame
::
profile_find_Java_sender_frame
(
JavaThread
*
thread
)
{
// If we don't recognize this frame, walk back up the stack until we do
...
...
@@ -529,6 +541,7 @@ jint frame::interpreter_frame_expression_stack_size() const {
const
char
*
frame
::
print_name
()
const
{
if
(
is_native_frame
())
return
"Native"
;
if
(
is_interpreted_frame
())
return
"Interpreted"
;
if
(
is_ricochet_frame
())
return
"Ricochet"
;
if
(
is_compiled_frame
())
{
if
(
is_deoptimized_frame
())
return
"Deoptimized"
;
return
"Compiled"
;
...
...
@@ -715,6 +728,8 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
st
->
print
(
"v ~RuntimeStub::%s"
,
((
RuntimeStub
*
)
_cb
)
->
name
());
}
else
if
(
_cb
->
is_deoptimization_stub
())
{
st
->
print
(
"v ~DeoptimizationBlob"
);
}
else
if
(
_cb
->
is_ricochet_stub
())
{
st
->
print
(
"v ~RichochetBlob"
);
}
else
if
(
_cb
->
is_exception_stub
())
{
st
->
print
(
"v ~ExceptionBlob"
);
}
else
if
(
_cb
->
is_safepoint_stub
())
{
...
...
@@ -978,6 +993,9 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver,
void
frame
::
oops_code_blob_do
(
OopClosure
*
f
,
CodeBlobClosure
*
cf
,
const
RegisterMap
*
reg_map
)
{
assert
(
_cb
!=
NULL
,
"sanity check"
);
if
(
_cb
==
SharedRuntime
::
ricochet_blob
())
{
oops_ricochet_do
(
f
,
reg_map
);
}
if
(
_cb
->
oop_maps
()
!=
NULL
)
{
OopMapSet
::
oops_do
(
this
,
reg_map
,
f
);
...
...
@@ -996,6 +1014,11 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const Register
cf
->
do_code_blob
(
_cb
);
}
void
frame
::
oops_ricochet_do
(
OopClosure
*
f
,
const
RegisterMap
*
map
)
{
assert
(
is_ricochet_frame
(),
""
);
MethodHandles
::
ricochet_frame_oops_do
(
*
this
,
f
,
map
);
}
class
CompiledArgumentOopFinder
:
public
SignatureInfo
{
protected:
OopClosure
*
_f
;
...
...
src/share/vm/runtime/frame.hpp
浏览文件 @
2b587c38
...
...
@@ -135,6 +135,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
bool
is_interpreted_frame
()
const
;
bool
is_java_frame
()
const
;
bool
is_entry_frame
()
const
;
// Java frame called from C?
bool
is_ricochet_frame
()
const
;
bool
is_native_frame
()
const
;
bool
is_runtime_frame
()
const
;
bool
is_compiled_frame
()
const
;
...
...
@@ -175,6 +176,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
// Helper methods for better factored code in frame::sender
frame
sender_for_compiled_frame
(
RegisterMap
*
map
)
const
;
frame
sender_for_entry_frame
(
RegisterMap
*
map
)
const
;
frame
sender_for_ricochet_frame
(
RegisterMap
*
map
)
const
;
frame
sender_for_interpreter_frame
(
RegisterMap
*
map
)
const
;
frame
sender_for_native_frame
(
RegisterMap
*
map
)
const
;
...
...
@@ -400,6 +402,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
// Oops-do's
void
oops_compiled_arguments_do
(
Symbol
*
signature
,
bool
has_receiver
,
const
RegisterMap
*
reg_map
,
OopClosure
*
f
);
void
oops_interpreted_do
(
OopClosure
*
f
,
const
RegisterMap
*
map
,
bool
query_oop_map_cache
=
true
);
void
oops_ricochet_do
(
OopClosure
*
f
,
const
RegisterMap
*
map
);
private:
void
oops_interpreted_arguments_do
(
Symbol
*
signature
,
bool
has_receiver
,
OopClosure
*
f
);
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
2b587c38
...
...
@@ -3708,6 +3708,10 @@ class CommandLineFlags {
diagnostic(bool, OptimizeMethodHandles, true, \
"when constructing method handles, try to improve them") \
\
diagnostic(bool, UseRicochetFrames, true, \
"use ricochet stack frames for method handle combination, " \
"if the platform supports them") \
\
experimental(bool, TrustFinalNonStaticFields, false, \
"trust final non-static declarations for constant folding") \
\
...
...
src/share/vm/runtime/sharedRuntime.cpp
浏览文件 @
2b587c38
...
...
@@ -88,6 +88,8 @@ HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
HS_DTRACE_PROBE_DECL7
(
hotspot
,
method__return
,
int
,
char
*
,
int
,
char
*
,
int
,
char
*
,
int
);
RicochetBlob
*
SharedRuntime
::
_ricochet_blob
=
NULL
;
// Implementation of SharedRuntime
#ifndef PRODUCT
...
...
@@ -460,6 +462,10 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre
if
(
Interpreter
::
contains
(
return_address
))
{
return
Interpreter
::
rethrow_exception_entry
();
}
// Ricochet frame unwind code
if
(
SharedRuntime
::
ricochet_blob
()
!=
NULL
&&
SharedRuntime
::
ricochet_blob
()
->
returns_to_bounce_addr
(
return_address
))
{
return
SharedRuntime
::
ricochet_blob
()
->
exception_addr
();
}
guarantee
(
blob
==
NULL
||
!
blob
->
is_runtime_stub
(),
"caller should have skipped stub"
);
guarantee
(
!
VtableStubs
::
contains
(
return_address
),
"NULL exceptions in vtables should have been handled already!"
);
...
...
@@ -1174,6 +1180,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread*
assert
(
stub_frame
.
is_runtime_frame
(),
"sanity check"
);
frame
caller_frame
=
stub_frame
.
sender
(
&
reg_map
);
assert
(
!
caller_frame
.
is_interpreted_frame
()
&&
!
caller_frame
.
is_entry_frame
(),
"unexpected frame"
);
assert
(
!
caller_frame
.
is_ricochet_frame
(),
"unexpected frame"
);
#endif
/* ASSERT */
methodHandle
callee_method
;
...
...
@@ -1222,6 +1229,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
if
(
caller_frame
.
is_interpreted_frame
()
||
caller_frame
.
is_entry_frame
()
||
caller_frame
.
is_ricochet_frame
()
||
is_mh_invoke_via_adapter
)
{
methodOop
callee
=
thread
->
callee_target
();
guarantee
(
callee
!=
NULL
&&
callee
->
is_method
(),
"bad handshake"
);
...
...
src/share/vm/runtime/sharedRuntime.hpp
浏览文件 @
2b587c38
...
...
@@ -58,6 +58,8 @@ class SharedRuntime: AllStatic {
static
RuntimeStub
*
_resolve_virtual_call_blob
;
static
RuntimeStub
*
_resolve_static_call_blob
;
static
RicochetBlob
*
_ricochet_blob
;
static
SafepointBlob
*
_polling_page_safepoint_handler_blob
;
static
SafepointBlob
*
_polling_page_return_handler_blob
;
#ifdef COMPILER2
...
...
@@ -213,6 +215,16 @@ class SharedRuntime: AllStatic {
return
_resolve_static_call_blob
->
entry_point
();
}
static
RicochetBlob
*
ricochet_blob
()
{
#ifdef X86
// Currently only implemented on x86
assert
(
!
EnableInvokeDynamic
||
_ricochet_blob
!=
NULL
,
"oops"
);
#endif
return
_ricochet_blob
;
}
static
void
generate_ricochet_blob
();
static
SafepointBlob
*
polling_page_return_handler_blob
()
{
return
_polling_page_return_handler_blob
;
}
static
SafepointBlob
*
polling_page_safepoint_handler_blob
()
{
return
_polling_page_safepoint_handler_blob
;
}
...
...
src/share/vm/services/heapDumper.cpp
浏览文件 @
2b587c38
...
...
@@ -1649,6 +1649,9 @@ int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
if
(
fr
->
is_entry_frame
())
{
last_entry_frame
=
fr
;
}
if
(
fr
->
is_ricochet_frame
())
{
fr
->
oops_ricochet_do
(
&
blk
,
vf
->
register_map
());
}
}
vf
=
vf
->
sender
();
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录