Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
975dadf1
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
975dadf1
编写于
9月 17, 2012
作者:
T
twisti
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
7196262: JSR 292: java/lang/invoke/PrivateInvokeTest.java fails on solaris-sparc
Reviewed-by: kvn, jrose, bdelsart
上级
f9c63c12
变更
11
展开全部
显示空白变更内容
内联
并排
Showing
11 changed file
with
290 addition
and
481 deletion
+290
-481
src/cpu/sparc/vm/assembler_sparc.cpp
src/cpu/sparc/vm/assembler_sparc.cpp
+32
-24
src/cpu/sparc/vm/assembler_sparc.hpp
src/cpu/sparc/vm/assembler_sparc.hpp
+0
-3
src/cpu/sparc/vm/methodHandles_sparc.cpp
src/cpu/sparc/vm/methodHandles_sparc.cpp
+43
-48
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+109
-311
src/cpu/x86/vm/methodHandles_x86.cpp
src/cpu/x86/vm/methodHandles_x86.cpp
+18
-23
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+15
-21
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+15
-21
src/share/vm/asm/register.hpp
src/share/vm/asm/register.hpp
+16
-16
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+3
-1
src/share/vm/runtime/sharedRuntime.cpp
src/share/vm/runtime/sharedRuntime.cpp
+31
-7
src/share/vm/runtime/sharedRuntime.hpp
src/share/vm/runtime/sharedRuntime.hpp
+8
-6
未找到文件。
src/cpu/sparc/vm/assembler_sparc.cpp
浏览文件 @
975dadf1
...
...
@@ -725,24 +725,6 @@ void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offs
}
// Convert to C varargs format
void
MacroAssembler
::
set_varargs
(
Argument
inArg
,
Register
d
)
{
// spill register-resident args to their memory slots
// (SPARC calling convention requires callers to have already preallocated these)
// Note that the inArg might in fact be an outgoing argument,
// if a leaf routine or stub does some tricky argument shuffling.
// This routine must work even though one of the saved arguments
// is in the d register (e.g., set_varargs(Argument(0, false), O0)).
for
(
Argument
savePtr
=
inArg
;
savePtr
.
is_register
();
savePtr
=
savePtr
.
successor
())
{
st_ptr
(
savePtr
.
as_register
(),
savePtr
.
address_in_frame
());
}
// return the address of the first memory slot
Address
a
=
inArg
.
address_in_frame
();
add
(
a
.
base
(),
a
.
disp
(),
d
);
}
// Conditional breakpoint (for assertion checks in assembly code)
void
MacroAssembler
::
breakpoint_trap
(
Condition
c
,
CC
cc
)
{
trap
(
c
,
cc
,
G0
,
ST_RESERVED_FOR_USER_0
);
...
...
@@ -2943,6 +2925,20 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
assert
(
itable_index
.
is_constant
()
||
itable_index
.
as_register
()
==
method_result
,
"caller must use same register for non-constant itable index as for method"
);
Label
L_no_such_interface_restore
;
bool
did_save
=
false
;
if
(
scan_temp
==
noreg
||
sethi_temp
==
noreg
)
{
Register
recv_2
=
recv_klass
->
is_global
()
?
recv_klass
:
L0
;
Register
intf_2
=
intf_klass
->
is_global
()
?
intf_klass
:
L1
;
assert
(
method_result
->
is_global
(),
"must be able to return value"
);
scan_temp
=
L2
;
sethi_temp
=
L3
;
save_frame_and_mov
(
0
,
recv_klass
,
recv_2
,
intf_klass
,
intf_2
);
recv_klass
=
recv_2
;
intf_klass
=
intf_2
;
did_save
=
true
;
}
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int
vtable_base
=
InstanceKlass
::
vtable_start_offset
()
*
wordSize
;
int
scan_step
=
itableOffsetEntry
::
size
()
*
wordSize
;
...
...
@@ -2981,7 +2977,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// result = (klass + scan->offset() + itable_index);
// }
// }
Label
search
,
found_method
;
Label
L_search
,
L_
found_method
;
for
(
int
peel
=
1
;
peel
>=
0
;
peel
--
)
{
// %%%% Could load both offset and interface in one ldx, if they were
...
...
@@ -2991,23 +2987,23 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Check that this entry is non-null. A null entry means that
// the receiver class doesn't implement the interface, and wasn't the
// same as when the caller was compiled.
bpr
(
Assembler
::
rc_z
,
false
,
Assembler
::
pn
,
method_result
,
L_no_such_interface
);
bpr
(
Assembler
::
rc_z
,
false
,
Assembler
::
pn
,
method_result
,
did_save
?
L_no_such_interface_restore
:
L_no_such_interface
);
delayed
()
->
cmp
(
method_result
,
intf_klass
);
if
(
peel
)
{
brx
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
found_method
);
brx
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
L_
found_method
);
}
else
{
brx
(
Assembler
::
notEqual
,
false
,
Assembler
::
pn
,
search
);
brx
(
Assembler
::
notEqual
,
false
,
Assembler
::
pn
,
L_
search
);
// (invert the test to fall through to found_method...)
}
delayed
()
->
add
(
scan_temp
,
scan_step
,
scan_temp
);
if
(
!
peel
)
break
;
bind
(
search
);
bind
(
L_
search
);
}
bind
(
found_method
);
bind
(
L_
found_method
);
// Got a hit.
int
ito_offset
=
itableOffsetEntry
::
offset_offset_in_bytes
();
...
...
@@ -3015,6 +3011,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
ito_offset
-=
scan_step
;
lduw
(
scan_temp
,
ito_offset
,
scan_temp
);
ld_ptr
(
recv_klass
,
scan_temp
,
method_result
);
if
(
did_save
)
{
Label
L_done
;
ba
(
L_done
);
delayed
()
->
restore
();
bind
(
L_no_such_interface_restore
);
ba
(
L_no_such_interface
);
delayed
()
->
restore
();
bind
(
L_done
);
}
}
...
...
src/cpu/sparc/vm/assembler_sparc.hpp
浏览文件 @
975dadf1
...
...
@@ -2428,9 +2428,6 @@ public:
static
void
test
();
#endif
// convert an incoming arglist to varargs format; put the pointer in d
void
set_varargs
(
Argument
a
,
Register
d
);
int
total_frame_size_in_bytes
(
int
extraWords
);
// used when extraWords known statically
...
...
src/cpu/sparc/vm/methodHandles_sparc.cpp
浏览文件 @
975dadf1
...
...
@@ -121,6 +121,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
void
MethodHandles
::
jump_from_method_handle
(
MacroAssembler
*
_masm
,
Register
method
,
Register
target
,
Register
temp
,
bool
for_compiler_entry
)
{
assert
(
method
==
G5_method
,
"interpreter calling convention"
);
assert_different_registers
(
method
,
target
,
temp
);
if
(
!
for_compiler_entry
&&
JvmtiExport
::
can_post_interpreter_events
())
{
Label
run_compiled_code
;
...
...
@@ -153,7 +154,7 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
BLOCK_COMMENT
(
"jump_to_lambda_form {"
);
// This is the initial entry point of a lazy method handle.
// After type checking, it picks up the invoker from the LambdaForm.
assert_different_registers
(
recv
,
method_temp
,
temp2
,
temp3
);
assert_different_registers
(
recv
,
method_temp
,
temp2
);
// temp3 is only passed on
assert
(
method_temp
==
G5_method
,
"required register for loading method"
);
//NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
...
...
@@ -165,7 +166,7 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__
load_heap_oop
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_LambdaForm
::
vmentry_offset_in_bytes
())),
method_temp
);
__
verify_oop
(
method_temp
);
// the following assumes that a Method* is normally compressed in the vmtarget field:
__
ld_ptr
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_MemberName
::
vmtarget_offset_in_bytes
())),
method_temp
);
__
ld_ptr
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_MemberName
::
vmtarget_offset_in_bytes
())),
method_temp
);
if
(
VerifyMethodHandles
&&
!
for_compiler_entry
)
{
// make sure recv is already on stack
...
...
@@ -303,25 +304,25 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register
member_reg
,
bool
for_compiler_entry
)
{
assert
(
is_signature_polymorphic
(
iid
),
"expected invoke iid"
);
// temps used in this code are not used in *either* compiled or interpreted calling sequences
Register
temp1
=
(
for_compiler_entry
?
G1_scratch
:
O1
);
Register
temp2
=
(
for_compiler_entry
?
G
4_scratch
:
O4
);
Register
temp3
=
G3_scratch
;
Register
temp4
=
(
for_compiler_entry
?
noreg
:
O
2
);
Register
temp2
=
(
for_compiler_entry
?
G
3_scratch
:
O2
);
Register
temp3
=
(
for_compiler_entry
?
G4_scratch
:
O3
)
;
Register
temp4
=
(
for_compiler_entry
?
noreg
:
O
4
);
if
(
for_compiler_entry
)
{
assert
(
receiver_reg
==
(
iid
==
vmIntrinsics
::
_linkToStatic
?
noreg
:
O0
),
"only valid assignment"
);
assert_different_registers
(
temp1
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp2
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp3
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp4
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
}
else
{
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
O5_savedSP
);
// don't trash lastSP
}
if
(
receiver_reg
!=
noreg
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
receiver_reg
);
if
(
member_reg
!=
noreg
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
member_reg
);
if
(
!
for_compiler_entry
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
O5_savedSP
);
// don't trash lastSP
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form
(
_masm
,
receiver_reg
,
G5_method
,
temp
2
,
temp3
,
for_compiler_entry
);
jump_to_lambda_form
(
_masm
,
receiver_reg
,
G5_method
,
temp
1
,
temp2
,
for_compiler_entry
);
}
else
{
// The method is a member invoker used by direct method handles.
...
...
@@ -378,24 +379,22 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
// O5_savedSP - interpreter linkage (if interpreted)
// O0..O
7,G1,G4
- compiler arguments (if compiled)
// O0..O
5
- compiler arguments (if compiled)
bool
method_is_live
=
false
;
Label
L_incompatible_class_change_error
;
switch
(
iid
)
{
case
vmIntrinsics
::
_linkToSpecial
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp
2
);
}
__
ld_ptr
(
member_vmtarget
,
G5_method
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToStatic
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp
2
);
}
__
ld_ptr
(
member_vmtarget
,
G5_method
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToVirtual
:
...
...
@@ -404,7 +403,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// minus the CP setup and profiling:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeVirtual
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeVirtual
,
member_reg
,
temp
2
);
}
// pick out the vtable index from the MemberName, and then we can discard it:
...
...
@@ -423,7 +422,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// get target Method* & entry point
__
lookup_virtual_method
(
temp1_recv_klass
,
temp2_index
,
G5_method
);
method_is_live
=
true
;
break
;
}
...
...
@@ -432,13 +430,13 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// same as TemplateTable::invokeinterface
// (minus the CP setup and profiling, with different argument motion)
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeInterface
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeInterface
,
member_reg
,
temp
2
);
}
Register
temp
3_intf
=
temp3
;
__
load_heap_oop
(
member_clazz
,
temp
3
_intf
);
load_klass_from_Class
(
_masm
,
temp
3_intf
,
temp2
,
temp4
);
__
verify_klass_ptr
(
temp
3
_intf
);
Register
temp
2_intf
=
temp2
;
__
load_heap_oop
(
member_clazz
,
temp
2
_intf
);
load_klass_from_Class
(
_masm
,
temp
2_intf
,
temp3
,
temp4
);
__
verify_klass_ptr
(
temp
2
_intf
);
Register
G5_index
=
G5_method
;
__
ld_ptr
(
member_vmindex
,
G5_index
);
...
...
@@ -450,37 +448,34 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
}
// given intf, index, and recv klass, dispatch to the implementation method
Label
L_no_such_interface
;
Register
no_sethi_temp
=
noreg
;
__
lookup_interface_method
(
temp1_recv_klass
,
temp3_intf
,
__
lookup_interface_method
(
temp1_recv_klass
,
temp2_intf
,
// note: next two args must be the same:
G5_index
,
G5_method
,
temp2
,
no_sethi_temp
,
L_no_such_interface
);
__
verify_method_ptr
(
G5_method
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp2
,
temp3
,
for_compiler_entry
);
__
bind
(
L_no_such_interface
);
AddressLiteral
icce
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
());
__
jump_to
(
icce
,
temp3
);
__
delayed
()
->
nop
();
temp3
,
temp4
,
L_incompatible_class_change_error
);
break
;
}
default:
fatal
(
err_msg
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
fatal
(
err_msg
_res
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
break
;
}
if
(
method_is_live
)
{
// live at this point: G5_method, O5_savedSP (if interpreted)
// Live at this point:
// G5_method
// O5_savedSP (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
G5_method
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp1
,
temp3
,
for_compiler_entry
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp1
,
temp2
,
for_compiler_entry
);
if
(
iid
==
vmIntrinsics
::
_linkToInterface
)
{
__
BIND
(
L_incompatible_class_change_error
);
AddressLiteral
icce
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
());
__
jump_to
(
icce
,
temp1
);
__
delayed
()
->
nop
();
}
}
}
...
...
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
浏览文件 @
975dadf1
此差异已折叠。
点击以展开。
src/cpu/x86/vm/methodHandles_x86.cpp
浏览文件 @
975dadf1
...
...
@@ -327,10 +327,11 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
assert_different_registers
(
temp3
,
rcx
,
rdx
);
}
#endif
else
{
assert_different_registers
(
temp1
,
temp2
,
temp3
,
saved_last_sp_register
());
// don't trash lastSP
}
assert_different_registers
(
temp1
,
temp2
,
temp3
,
receiver_reg
);
assert_different_registers
(
temp1
,
temp2
,
temp3
,
member_reg
);
if
(
!
for_compiler_entry
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
saved_last_sp_register
());
// don't trash lastSP
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
// indirect through MH.form.vmentry.vmtarget
...
...
@@ -392,14 +393,13 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// rsi/r13 - interpreter linkage (if interpreted)
// rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
bool
method_is_live
=
false
;
Label
L_incompatible_class_change_error
;
switch
(
iid
)
{
case
vmIntrinsics
::
_linkToSpecial
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp3
);
}
__
movptr
(
rbx_method
,
member_vmtarget
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToStatic
:
...
...
@@ -407,7 +407,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp3
);
}
__
movptr
(
rbx_method
,
member_vmtarget
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToVirtual
:
...
...
@@ -436,7 +435,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// get target Method* & entry point
__
lookup_virtual_method
(
temp1_recv_klass
,
temp2_index
,
rbx_method
);
method_is_live
=
true
;
break
;
}
...
...
@@ -464,35 +462,32 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
}
// given intf, index, and recv klass, dispatch to the implementation method
Label
L_no_such_interface
;
__
lookup_interface_method
(
temp1_recv_klass
,
temp3_intf
,
// note: next two args must be the same:
rbx_index
,
rbx_method
,
temp2
,
L_no_such_interface
);
__
verify_method_ptr
(
rbx_method
);
jump_from_method_handle
(
_masm
,
rbx_method
,
temp2
,
for_compiler_entry
);
__
hlt
();
__
bind
(
L_no_such_interface
);
__
jump
(
RuntimeAddress
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
()));
L_incompatible_class_change_error
);
break
;
}
default:
fatal
(
err_msg
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
fatal
(
err_msg
_res
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
break
;
}
if
(
method_is_live
)
{
// live at this point: rbx_method, rsi/r13 (if interpreted)
// Live at this point:
// rbx_method
// rsi/r13 (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
rbx_method
);
jump_from_method_handle
(
_masm
,
rbx_method
,
temp1
,
for_compiler_entry
);
if
(
iid
==
vmIntrinsics
::
_linkToInterface
)
{
__
bind
(
L_incompatible_class_change_error
);
__
jump
(
RuntimeAddress
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
()));
}
}
}
...
...
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
浏览文件 @
975dadf1
...
...
@@ -1346,12 +1346,12 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static
void
verify_oop_args
(
MacroAssembler
*
masm
,
int
total_args_passe
d
,
methodHandle
metho
d
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
Register
temp_reg
=
rbx
;
// not part of any compiled calling seq
if
(
VerifyOops
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
method
->
size_of_parameters
()
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
VMReg
r
=
regs
[
i
].
first
();
...
...
@@ -1368,35 +1368,32 @@ static void verify_oop_args(MacroAssembler* masm,
}
static
void
gen_special_dispatch
(
MacroAssembler
*
masm
,
int
total_args_passed
,
int
comp_args_on_stack
,
vmIntrinsics
::
ID
special_dispatch
,
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
verify_oop_args
(
masm
,
total_args_passed
,
sig_bt
,
regs
);
verify_oop_args
(
masm
,
method
,
sig_bt
,
regs
);
vmIntrinsics
::
ID
iid
=
method
->
intrinsic_id
();
// Now write the args into the outgoing interpreter space
bool
has_receiver
=
false
;
Register
receiver_reg
=
noreg
;
int
member_arg_pos
=
-
1
;
Register
member_reg
=
noreg
;
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
special_dispatch
);
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
iid
);
if
(
ref_kind
!=
0
)
{
member_arg_pos
=
total_args_passed
-
1
;
// trailing MemberName argument
member_arg_pos
=
method
->
size_of_parameters
()
-
1
;
// trailing MemberName argument
member_reg
=
rbx
;
// known to be free at this point
has_receiver
=
MethodHandles
::
ref_kind_has_receiver
(
ref_kind
);
}
else
if
(
special_dispatch
==
vmIntrinsics
::
_invokeBasic
)
{
}
else
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
has_receiver
=
true
;
}
else
{
guarantee
(
false
,
err_msg
(
"special_dispatch=%d"
,
special_dispatch
));
fatal
(
err_msg_res
(
"unexpected intrinsic id %d"
,
iid
));
}
if
(
member_reg
!=
noreg
)
{
// Load the member_arg into register, if necessary.
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
SharedRuntime
::
check_member_name_argument_is_last_argument
(
method
,
sig_bt
,
regs
);
VMReg
r
=
regs
[
member_arg_pos
].
first
();
assert
(
r
->
is_valid
(),
"bad member arg"
);
if
(
r
->
is_stack
())
{
__
movptr
(
member_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1407,7 +1404,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
if
(
has_receiver
)
{
// Make sure the receiver is loaded into a register.
assert
(
total_args_passed
>
0
,
"oob"
);
assert
(
method
->
size_of_parameters
()
>
0
,
"oob"
);
assert
(
sig_bt
[
0
]
==
T_OBJECT
,
"receiver argument must be an object"
);
VMReg
r
=
regs
[
0
].
first
();
assert
(
r
->
is_valid
(),
"bad receiver arg"
);
...
...
@@ -1415,7 +1412,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert
(
false
,
"receiver always in a register"
);
fatal
(
"receiver always in a register"
);
receiver_reg
=
rcx
;
// known to be free at this point
__
movptr
(
receiver_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1425,7 +1422,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
}
// Figure out which address we are really jumping to:
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
special_dispatch
,
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
iid
,
receiver_reg
,
member_reg
,
/*for_compiler_entry:*/
true
);
}
...
...
@@ -1461,8 +1458,6 @@ static void gen_special_dispatch(MacroAssembler* masm,
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_in_args
,
int
comp_args_on_stack
,
BasicType
*
in_sig_bt
,
VMRegPair
*
in_regs
,
BasicType
ret_type
)
{
...
...
@@ -1471,9 +1466,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
intptr_t
start
=
(
intptr_t
)
__
pc
();
int
vep_offset
=
((
intptr_t
)
__
pc
())
-
start
;
gen_special_dispatch
(
masm
,
total_in_args
,
comp_args_on_stack
,
method
->
intrinsic_id
(),
method
,
in_sig_bt
,
in_regs
);
int
frame_complete
=
((
intptr_t
)
__
pc
())
-
start
;
// not complete, period
...
...
@@ -1506,6 +1499,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
const
int
total_in_args
=
method
->
size_of_parameters
();
int
total_c_args
=
total_in_args
;
if
(
!
is_critical_native
)
{
total_c_args
+=
1
;
...
...
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
浏览文件 @
975dadf1
...
...
@@ -1593,12 +1593,12 @@ class ComputeMoveOrder: public StackObj {
};
static
void
verify_oop_args
(
MacroAssembler
*
masm
,
int
total_args_passe
d
,
methodHandle
metho
d
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
Register
temp_reg
=
rbx
;
// not part of any compiled calling seq
if
(
VerifyOops
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
method
->
size_of_parameters
()
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
VMReg
r
=
regs
[
i
].
first
();
...
...
@@ -1615,35 +1615,32 @@ static void verify_oop_args(MacroAssembler* masm,
}
static
void
gen_special_dispatch
(
MacroAssembler
*
masm
,
int
total_args_passed
,
int
comp_args_on_stack
,
vmIntrinsics
::
ID
special_dispatch
,
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
verify_oop_args
(
masm
,
total_args_passed
,
sig_bt
,
regs
);
verify_oop_args
(
masm
,
method
,
sig_bt
,
regs
);
vmIntrinsics
::
ID
iid
=
method
->
intrinsic_id
();
// Now write the args into the outgoing interpreter space
bool
has_receiver
=
false
;
Register
receiver_reg
=
noreg
;
int
member_arg_pos
=
-
1
;
Register
member_reg
=
noreg
;
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
special_dispatch
);
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
iid
);
if
(
ref_kind
!=
0
)
{
member_arg_pos
=
total_args_passed
-
1
;
// trailing MemberName argument
member_arg_pos
=
method
->
size_of_parameters
()
-
1
;
// trailing MemberName argument
member_reg
=
rbx
;
// known to be free at this point
has_receiver
=
MethodHandles
::
ref_kind_has_receiver
(
ref_kind
);
}
else
if
(
special_dispatch
==
vmIntrinsics
::
_invokeBasic
)
{
}
else
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
has_receiver
=
true
;
}
else
{
guarantee
(
false
,
err_msg
(
"special_dispatch=%d"
,
special_dispatch
));
fatal
(
err_msg_res
(
"unexpected intrinsic id %d"
,
iid
));
}
if
(
member_reg
!=
noreg
)
{
// Load the member_arg into register, if necessary.
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
SharedRuntime
::
check_member_name_argument_is_last_argument
(
method
,
sig_bt
,
regs
);
VMReg
r
=
regs
[
member_arg_pos
].
first
();
assert
(
r
->
is_valid
(),
"bad member arg"
);
if
(
r
->
is_stack
())
{
__
movptr
(
member_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1654,7 +1651,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
if
(
has_receiver
)
{
// Make sure the receiver is loaded into a register.
assert
(
total_args_passed
>
0
,
"oob"
);
assert
(
method
->
size_of_parameters
()
>
0
,
"oob"
);
assert
(
sig_bt
[
0
]
==
T_OBJECT
,
"receiver argument must be an object"
);
VMReg
r
=
regs
[
0
].
first
();
assert
(
r
->
is_valid
(),
"bad receiver arg"
);
...
...
@@ -1662,7 +1659,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert
(
false
,
"receiver always in a register"
);
fatal
(
"receiver always in a register"
);
receiver_reg
=
j_rarg0
;
// known to be free at this point
__
movptr
(
receiver_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1672,7 +1669,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
}
// Figure out which address we are really jumping to:
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
special_dispatch
,
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
iid
,
receiver_reg
,
member_reg
,
/*for_compiler_entry:*/
true
);
}
...
...
@@ -1708,8 +1705,6 @@ static void gen_special_dispatch(MacroAssembler* masm,
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_in_args
,
int
comp_args_on_stack
,
BasicType
*
in_sig_bt
,
VMRegPair
*
in_regs
,
BasicType
ret_type
)
{
...
...
@@ -1718,9 +1713,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
intptr_t
start
=
(
intptr_t
)
__
pc
();
int
vep_offset
=
((
intptr_t
)
__
pc
())
-
start
;
gen_special_dispatch
(
masm
,
total_in_args
,
comp_args_on_stack
,
method
->
intrinsic_id
(),
method
,
in_sig_bt
,
in_regs
);
int
frame_complete
=
((
intptr_t
)
__
pc
())
-
start
;
// not complete, period
...
...
@@ -1754,6 +1747,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
const
int
total_in_args
=
method
->
size_of_parameters
();
int
total_c_args
=
total_in_args
;
if
(
!
is_critical_native
)
{
total_c_args
+=
1
;
...
...
src/share/vm/asm/register.hpp
浏览文件 @
975dadf1
...
...
@@ -103,7 +103,7 @@ inline void assert_different_registers(
)
{
assert
(
a
!=
b
,
err_msg
(
"registers must be different: a=%d, b=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d"
,
a
,
b
)
);
}
...
...
@@ -117,7 +117,7 @@ inline void assert_different_registers(
assert
(
a
!=
b
&&
a
!=
c
&&
b
!=
c
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d"
,
a
,
b
,
c
)
);
}
...
...
@@ -133,7 +133,7 @@ inline void assert_different_registers(
a
!=
b
&&
a
!=
c
&&
a
!=
d
&&
b
!=
c
&&
b
!=
d
&&
c
!=
d
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d"
,
a
,
b
,
c
,
d
)
);
}
...
...
@@ -151,7 +151,7 @@ inline void assert_different_registers(
&&
b
!=
c
&&
b
!=
d
&&
b
!=
e
&&
c
!=
d
&&
c
!=
e
&&
d
!=
e
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d"
,
a
,
b
,
c
,
d
,
e
)
);
}
...
...
@@ -171,7 +171,7 @@ inline void assert_different_registers(
&&
c
!=
d
&&
c
!=
e
&&
c
!=
f
&&
d
!=
e
&&
d
!=
f
&&
e
!=
f
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d"
,
a
,
b
,
c
,
d
,
e
,
f
)
);
}
...
...
@@ -193,7 +193,7 @@ inline void assert_different_registers(
&&
d
!=
e
&&
d
!=
f
&&
d
!=
g
&&
e
!=
f
&&
e
!=
g
&&
f
!=
g
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
)
);
}
...
...
@@ -217,7 +217,7 @@ inline void assert_different_registers(
&&
e
!=
f
&&
e
!=
g
&&
e
!=
h
&&
f
!=
g
&&
f
!=
h
&&
g
!=
h
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
)
);
}
...
...
@@ -243,7 +243,7 @@ inline void assert_different_registers(
&&
f
!=
g
&&
f
!=
h
&&
f
!=
i
&&
g
!=
h
&&
g
!=
i
&&
h
!=
i
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d"
,
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
,
i
)
);
}
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
975dadf1
...
...
@@ -700,8 +700,10 @@ nmethod::nmethod(
// then print the requested information
if
(
PrintNativeNMethods
)
{
print_code
();
if
(
oop_maps
!=
NULL
)
{
oop_maps
->
print
();
}
}
if
(
PrintRelocations
)
{
print_relocations
();
}
...
...
src/share/vm/runtime/sharedRuntime.cpp
浏览文件 @
975dadf1
...
...
@@ -1618,6 +1618,31 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
return
callee_method
;
}
#ifdef ASSERT
void
SharedRuntime
::
check_member_name_argument_is_last_argument
(
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
ResourceMark
rm
;
const
int
total_args_passed
=
method
->
size_of_parameters
();
const
VMRegPair
*
regs_with_member_name
=
regs
;
VMRegPair
*
regs_without_member_name
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
-
1
);
const
int
member_arg_pos
=
total_args_passed
-
1
;
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
const
bool
is_outgoing
=
method
->
is_method_handle_intrinsic
();
int
comp_args_on_stack
=
java_calling_convention
(
sig_bt
,
regs_without_member_name
,
total_args_passed
-
1
,
is_outgoing
);
for
(
int
i
=
0
;
i
<
member_arg_pos
;
i
++
)
{
VMReg
a
=
regs_with_member_name
[
i
].
first
();
VMReg
b
=
regs_without_member_name
[
i
].
first
();
assert
(
a
->
value
()
==
b
->
value
(),
err_msg_res
(
"register allocation mismatch: a=%d, b=%d"
,
a
->
value
(),
b
->
value
()));
}
assert
(
regs_with_member_name
[
member_arg_pos
].
first
()
->
is_valid
(),
"bad member arg"
);
}
#endif
// ---------------------------------------------------------------------------
// We are calling the interpreter via a c2i. Normally this would mean that
// we were called by a compiled method. However we could have lost a race
...
...
@@ -2546,10 +2571,10 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
MacroAssembler
_masm
(
&
buffer
);
// Fill in the signature array, for the calling-convention call.
int
total_args_passed
=
method
->
size_of_parameters
();
const
int
total_args_passed
=
method
->
size_of_parameters
();
BasicType
*
sig_bt
=
NEW_RESOURCE_ARRAY
(
BasicType
,
total_args_passed
);
VMRegPair
*
regs
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
);
BasicType
*
sig_bt
=
NEW_RESOURCE_ARRAY
(
BasicType
,
total_args_passed
);
VMRegPair
*
regs
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
);
int
i
=
0
;
if
(
!
method
->
is_static
()
)
// Pass in receiver first
sig_bt
[
i
++
]
=
T_OBJECT
;
...
...
@@ -2559,7 +2584,7 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
if
(
ss
.
type
()
==
T_LONG
||
ss
.
type
()
==
T_DOUBLE
)
sig_bt
[
i
++
]
=
T_VOID
;
// Longs & doubles take 2 Java slots
}
assert
(
i
==
total_args_passed
,
""
);
assert
(
i
==
total_args_passed
,
""
);
BasicType
ret_type
=
ss
.
type
();
// Now get the compiled-Java layout as input (or output) arguments.
...
...
@@ -2572,9 +2597,8 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
nm
=
SharedRuntime
::
generate_native_wrapper
(
&
_masm
,
method
,
compile_id
,
total_args_passed
,
comp_args_on_stack
,
sig_bt
,
regs
,
sig_bt
,
regs
,
ret_type
);
}
}
...
...
src/share/vm/runtime/sharedRuntime.hpp
浏览文件 @
975dadf1
...
...
@@ -345,7 +345,11 @@ class SharedRuntime: AllStatic {
// the bottom of the frame the first 16 words will be skipped and SharedInfo::stack0
// will be just above it. (
// return value is the maximum number of VMReg stack slots the convention will use.
static
int
java_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
,
int
is_outgoing
);
static
int
java_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
,
int
is_outgoing
);
static
void
check_member_name_argument_is_last_argument
(
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
NOT_DEBUG_RETURN
;
// Ditto except for calling C
static
int
c_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
);
...
...
@@ -425,13 +429,11 @@ class SharedRuntime: AllStatic {
// The wrapper may contain special-case code if the given method
// is a JNI critical method, or a compiled method handle adapter,
// such as _invokeBasic, _linkToVirtual, etc.
static
nmethod
*
generate_native_wrapper
(
MacroAssembler
*
masm
,
static
nmethod
*
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_args_passed
,
int
max_arg
,
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
BasicType
ret_type
);
// Block before entering a JNI critical method
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录