Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
975dadf1
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
975dadf1
编写于
9月 17, 2012
作者:
T
twisti
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
7196262: JSR 292: java/lang/invoke/PrivateInvokeTest.java fails on solaris-sparc
Reviewed-by: kvn, jrose, bdelsart
上级
f9c63c12
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
290 addition
and
481 deletion
+290
-481
src/cpu/sparc/vm/assembler_sparc.cpp
src/cpu/sparc/vm/assembler_sparc.cpp
+32
-24
src/cpu/sparc/vm/assembler_sparc.hpp
src/cpu/sparc/vm/assembler_sparc.hpp
+0
-3
src/cpu/sparc/vm/methodHandles_sparc.cpp
src/cpu/sparc/vm/methodHandles_sparc.cpp
+43
-48
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+109
-311
src/cpu/x86/vm/methodHandles_x86.cpp
src/cpu/x86/vm/methodHandles_x86.cpp
+18
-23
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+15
-21
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+15
-21
src/share/vm/asm/register.hpp
src/share/vm/asm/register.hpp
+16
-16
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+3
-1
src/share/vm/runtime/sharedRuntime.cpp
src/share/vm/runtime/sharedRuntime.cpp
+31
-7
src/share/vm/runtime/sharedRuntime.hpp
src/share/vm/runtime/sharedRuntime.hpp
+8
-6
未找到文件。
src/cpu/sparc/vm/assembler_sparc.cpp
浏览文件 @
975dadf1
...
...
@@ -725,24 +725,6 @@ void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offs
}
// Convert to C varargs format
void
MacroAssembler
::
set_varargs
(
Argument
inArg
,
Register
d
)
{
// spill register-resident args to their memory slots
// (SPARC calling convention requires callers to have already preallocated these)
// Note that the inArg might in fact be an outgoing argument,
// if a leaf routine or stub does some tricky argument shuffling.
// This routine must work even though one of the saved arguments
// is in the d register (e.g., set_varargs(Argument(0, false), O0)).
for
(
Argument
savePtr
=
inArg
;
savePtr
.
is_register
();
savePtr
=
savePtr
.
successor
())
{
st_ptr
(
savePtr
.
as_register
(),
savePtr
.
address_in_frame
());
}
// return the address of the first memory slot
Address
a
=
inArg
.
address_in_frame
();
add
(
a
.
base
(),
a
.
disp
(),
d
);
}
// Conditional breakpoint (for assertion checks in assembly code)
void
MacroAssembler
::
breakpoint_trap
(
Condition
c
,
CC
cc
)
{
trap
(
c
,
cc
,
G0
,
ST_RESERVED_FOR_USER_0
);
...
...
@@ -2943,6 +2925,20 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
assert
(
itable_index
.
is_constant
()
||
itable_index
.
as_register
()
==
method_result
,
"caller must use same register for non-constant itable index as for method"
);
Label
L_no_such_interface_restore
;
bool
did_save
=
false
;
if
(
scan_temp
==
noreg
||
sethi_temp
==
noreg
)
{
Register
recv_2
=
recv_klass
->
is_global
()
?
recv_klass
:
L0
;
Register
intf_2
=
intf_klass
->
is_global
()
?
intf_klass
:
L1
;
assert
(
method_result
->
is_global
(),
"must be able to return value"
);
scan_temp
=
L2
;
sethi_temp
=
L3
;
save_frame_and_mov
(
0
,
recv_klass
,
recv_2
,
intf_klass
,
intf_2
);
recv_klass
=
recv_2
;
intf_klass
=
intf_2
;
did_save
=
true
;
}
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int
vtable_base
=
InstanceKlass
::
vtable_start_offset
()
*
wordSize
;
int
scan_step
=
itableOffsetEntry
::
size
()
*
wordSize
;
...
...
@@ -2981,7 +2977,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// result = (klass + scan->offset() + itable_index);
// }
// }
Label
search
,
found_method
;
Label
L_search
,
L_
found_method
;
for
(
int
peel
=
1
;
peel
>=
0
;
peel
--
)
{
// %%%% Could load both offset and interface in one ldx, if they were
...
...
@@ -2991,23 +2987,23 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Check that this entry is non-null. A null entry means that
// the receiver class doesn't implement the interface, and wasn't the
// same as when the caller was compiled.
bpr
(
Assembler
::
rc_z
,
false
,
Assembler
::
pn
,
method_result
,
L_no_such_interface
);
bpr
(
Assembler
::
rc_z
,
false
,
Assembler
::
pn
,
method_result
,
did_save
?
L_no_such_interface_restore
:
L_no_such_interface
);
delayed
()
->
cmp
(
method_result
,
intf_klass
);
if
(
peel
)
{
brx
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
found_method
);
brx
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
L_
found_method
);
}
else
{
brx
(
Assembler
::
notEqual
,
false
,
Assembler
::
pn
,
search
);
brx
(
Assembler
::
notEqual
,
false
,
Assembler
::
pn
,
L_
search
);
// (invert the test to fall through to found_method...)
}
delayed
()
->
add
(
scan_temp
,
scan_step
,
scan_temp
);
if
(
!
peel
)
break
;
bind
(
search
);
bind
(
L_
search
);
}
bind
(
found_method
);
bind
(
L_
found_method
);
// Got a hit.
int
ito_offset
=
itableOffsetEntry
::
offset_offset_in_bytes
();
...
...
@@ -3015,6 +3011,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
ito_offset
-=
scan_step
;
lduw
(
scan_temp
,
ito_offset
,
scan_temp
);
ld_ptr
(
recv_klass
,
scan_temp
,
method_result
);
if
(
did_save
)
{
Label
L_done
;
ba
(
L_done
);
delayed
()
->
restore
();
bind
(
L_no_such_interface_restore
);
ba
(
L_no_such_interface
);
delayed
()
->
restore
();
bind
(
L_done
);
}
}
...
...
src/cpu/sparc/vm/assembler_sparc.hpp
浏览文件 @
975dadf1
...
...
@@ -2428,9 +2428,6 @@ public:
static
void
test
();
#endif
// convert an incoming arglist to varargs format; put the pointer in d
void
set_varargs
(
Argument
a
,
Register
d
);
int
total_frame_size_in_bytes
(
int
extraWords
);
// used when extraWords known statically
...
...
src/cpu/sparc/vm/methodHandles_sparc.cpp
浏览文件 @
975dadf1
...
...
@@ -121,6 +121,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
void
MethodHandles
::
jump_from_method_handle
(
MacroAssembler
*
_masm
,
Register
method
,
Register
target
,
Register
temp
,
bool
for_compiler_entry
)
{
assert
(
method
==
G5_method
,
"interpreter calling convention"
);
assert_different_registers
(
method
,
target
,
temp
);
if
(
!
for_compiler_entry
&&
JvmtiExport
::
can_post_interpreter_events
())
{
Label
run_compiled_code
;
...
...
@@ -153,19 +154,19 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
BLOCK_COMMENT
(
"jump_to_lambda_form {"
);
// This is the initial entry point of a lazy method handle.
// After type checking, it picks up the invoker from the LambdaForm.
assert_different_registers
(
recv
,
method_temp
,
temp2
,
temp3
);
assert_different_registers
(
recv
,
method_temp
,
temp2
);
// temp3 is only passed on
assert
(
method_temp
==
G5_method
,
"required register for loading method"
);
//NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
// Load the invoker, as MH -> MH.form -> LF.vmentry
__
verify_oop
(
recv
);
__
load_heap_oop
(
Address
(
recv
,
NONZERO
(
java_lang_invoke_MethodHandle
::
form_offset_in_bytes
())),
method_temp
);
__
load_heap_oop
(
Address
(
recv
,
NONZERO
(
java_lang_invoke_MethodHandle
::
form_offset_in_bytes
())),
method_temp
);
__
verify_oop
(
method_temp
);
__
load_heap_oop
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_LambdaForm
::
vmentry_offset_in_bytes
())),
method_temp
);
__
load_heap_oop
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_LambdaForm
::
vmentry_offset_in_bytes
())),
method_temp
);
__
verify_oop
(
method_temp
);
// the following assumes that a Method* is normally compressed in the vmtarget field:
__
ld_ptr
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_MemberName
::
vmtarget_offset_in_bytes
())),
method_temp
);
__
ld_ptr
(
Address
(
method_temp
,
NONZERO
(
java_lang_invoke_MemberName
::
vmtarget_offset_in_bytes
())),
method_temp
);
if
(
VerifyMethodHandles
&&
!
for_compiler_entry
)
{
// make sure recv is already on stack
...
...
@@ -303,25 +304,25 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register
member_reg
,
bool
for_compiler_entry
)
{
assert
(
is_signature_polymorphic
(
iid
),
"expected invoke iid"
);
// temps used in this code are not used in *either* compiled or interpreted calling sequences
Register
temp1
=
(
for_compiler_entry
?
G1_scratch
:
O1
);
Register
temp2
=
(
for_compiler_entry
?
G
4_scratch
:
O4
);
Register
temp3
=
G3_scratch
;
Register
temp4
=
(
for_compiler_entry
?
noreg
:
O
2
);
Register
temp2
=
(
for_compiler_entry
?
G
3_scratch
:
O2
);
Register
temp3
=
(
for_compiler_entry
?
G4_scratch
:
O3
)
;
Register
temp4
=
(
for_compiler_entry
?
noreg
:
O
4
);
if
(
for_compiler_entry
)
{
assert
(
receiver_reg
==
(
iid
==
vmIntrinsics
::
_linkToStatic
?
noreg
:
O0
),
"only valid assignment"
);
assert_different_registers
(
temp1
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp2
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp3
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp4
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp1
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp2
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp3
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
assert_different_registers
(
temp4
,
O0
,
O1
,
O2
,
O3
,
O4
,
O5
);
}
else
{
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
O5_savedSP
);
// don't trash lastSP
}
if
(
receiver_reg
!=
noreg
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
receiver_reg
);
if
(
member_reg
!=
noreg
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
member_reg
);
if
(
!
for_compiler_entry
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
temp4
,
O5_savedSP
);
// don't trash lastSP
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form
(
_masm
,
receiver_reg
,
G5_method
,
temp
2
,
temp3
,
for_compiler_entry
);
jump_to_lambda_form
(
_masm
,
receiver_reg
,
G5_method
,
temp
1
,
temp2
,
for_compiler_entry
);
}
else
{
// The method is a member invoker used by direct method handles.
...
...
@@ -378,24 +379,22 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
// O5_savedSP - interpreter linkage (if interpreted)
// O0..O
7,G1,G4
- compiler arguments (if compiled)
// O0..O
5
- compiler arguments (if compiled)
bool
method_is_live
=
false
;
Label
L_incompatible_class_change_error
;
switch
(
iid
)
{
case
vmIntrinsics
::
_linkToSpecial
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp
2
);
}
__
ld_ptr
(
member_vmtarget
,
G5_method
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToStatic
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp
2
);
}
__
ld_ptr
(
member_vmtarget
,
G5_method
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToVirtual
:
...
...
@@ -404,7 +403,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// minus the CP setup and profiling:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeVirtual
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeVirtual
,
member_reg
,
temp
2
);
}
// pick out the vtable index from the MemberName, and then we can discard it:
...
...
@@ -423,7 +422,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// get target Method* & entry point
__
lookup_virtual_method
(
temp1_recv_klass
,
temp2_index
,
G5_method
);
method_is_live
=
true
;
break
;
}
...
...
@@ -432,13 +430,13 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// same as TemplateTable::invokeinterface
// (minus the CP setup and profiling, with different argument motion)
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeInterface
,
member_reg
,
temp
3
);
verify_ref_kind
(
_masm
,
JVM_REF_invokeInterface
,
member_reg
,
temp
2
);
}
Register
temp
3_intf
=
temp3
;
__
load_heap_oop
(
member_clazz
,
temp
3
_intf
);
load_klass_from_Class
(
_masm
,
temp
3_intf
,
temp2
,
temp4
);
__
verify_klass_ptr
(
temp
3
_intf
);
Register
temp
2_intf
=
temp2
;
__
load_heap_oop
(
member_clazz
,
temp
2
_intf
);
load_klass_from_Class
(
_masm
,
temp
2_intf
,
temp3
,
temp4
);
__
verify_klass_ptr
(
temp
2
_intf
);
Register
G5_index
=
G5_method
;
__
ld_ptr
(
member_vmindex
,
G5_index
);
...
...
@@ -450,37 +448,34 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
}
// given intf, index, and recv klass, dispatch to the implementation method
Label
L_no_such_interface
;
Register
no_sethi_temp
=
noreg
;
__
lookup_interface_method
(
temp1_recv_klass
,
temp3_intf
,
__
lookup_interface_method
(
temp1_recv_klass
,
temp2_intf
,
// note: next two args must be the same:
G5_index
,
G5_method
,
temp2
,
no_sethi_temp
,
L_no_such_interface
);
__
verify_method_ptr
(
G5_method
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp2
,
temp3
,
for_compiler_entry
);
__
bind
(
L_no_such_interface
);
AddressLiteral
icce
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
());
__
jump_to
(
icce
,
temp3
);
__
delayed
()
->
nop
();
temp3
,
temp4
,
L_incompatible_class_change_error
);
break
;
}
default:
fatal
(
err_msg
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
fatal
(
err_msg
_res
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
break
;
}
if
(
method_is_live
)
{
// live at this point: G5_method, O5_savedSP (if interpreted)
// Live at this point:
// G5_method
// O5_savedSP (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
G5_method
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp1
,
temp2
,
for_compiler_entry
);
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
G5_method
);
jump_from_method_handle
(
_masm
,
G5_method
,
temp1
,
temp3
,
for_compiler_entry
);
if
(
iid
==
vmIntrinsics
::
_linkToInterface
)
{
__
BIND
(
L_incompatible_class_change_error
);
AddressLiteral
icce
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
());
__
jump_to
(
icce
,
temp1
);
__
delayed
()
->
nop
(
);
}
}
}
...
...
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
浏览文件 @
975dadf1
...
...
@@ -364,9 +364,9 @@ static VMRegPair reg64_to_VMRegPair(Register r) {
// ---------------------------------------------------------------------------
// The compiled Java calling convention. The Java convention always passes
// 64-bit values in adjacent aligned locations (either registers or stack),
// floats in float registers and doubles in aligned float pairs.
Values are
//
packed in the registers. There is no backing varargs store for values in
//
registers. In the 32-bit build, longs are passed in G1 and G4
(cannot be
// floats in float registers and doubles in aligned float pairs.
There is
//
no backing varargs store for values in registers.
//
In the 32-bit build, longs are passed on the stack
(cannot be
// passed in I's, because longs in I's get their heads chopped off at
// interrupt).
int
SharedRuntime
::
java_calling_convention
(
const
BasicType
*
sig_bt
,
...
...
@@ -375,76 +375,13 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
int
is_outgoing
)
{
assert
(
F31
->
as_VMReg
()
->
is_reg
(),
"overlapping stack/register numbers"
);
// Convention is to pack the first 6 int/oop args into the first 6 registers
// (I0-I5), extras spill to the stack. Then pack the first 8 float args
// into F0-F7, extras spill to the stack. Then pad all register sets to
// align. Then put longs and doubles into the same registers as they fit,
// else spill to the stack.
const
int
int_reg_max
=
SPARC_ARGS_IN_REGS_NUM
;
const
int
flt_reg_max
=
8
;
//
// Where 32-bit 1-reg longs start being passed
// In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
// So make it look like we've filled all the G regs that c2 wants to use.
Register
g_reg
=
TieredCompilation
?
noreg
:
G1
;
// Count int/oop and float args. See how many stack slots we'll need and
// where the longs & doubles will go.
int
int_reg_cnt
=
0
;
int
flt_reg_cnt
=
0
;
// int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
// int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
int
stk_reg_pairs
=
0
;
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
switch
(
sig_bt
[
i
])
{
case
T_LONG
:
// LP64, longs compete with int args
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
""
);
#ifdef _LP64
if
(
int_reg_cnt
<
int_reg_max
)
int_reg_cnt
++
;
#endif
break
;
case
T_OBJECT
:
case
T_ARRAY
:
case
T_ADDRESS
:
// Used, e.g., in slow-path locking for the lock's stack address
if
(
int_reg_cnt
<
int_reg_max
)
int_reg_cnt
++
;
#ifndef _LP64
else
stk_reg_pairs
++
;
#endif
break
;
case
T_INT
:
case
T_SHORT
:
case
T_CHAR
:
case
T_BYTE
:
case
T_BOOLEAN
:
if
(
int_reg_cnt
<
int_reg_max
)
int_reg_cnt
++
;
else
stk_reg_pairs
++
;
break
;
case
T_FLOAT
:
if
(
flt_reg_cnt
<
flt_reg_max
)
flt_reg_cnt
++
;
else
stk_reg_pairs
++
;
break
;
case
T_DOUBLE
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
""
);
break
;
case
T_VOID
:
break
;
default:
ShouldNotReachHere
();
}
}
// This is where the longs/doubles start on the stack.
stk_reg_pairs
=
(
stk_reg_pairs
+
1
)
&
~
1
;
// Round
int
flt_reg_pairs
=
(
flt_reg_cnt
+
1
)
&
~
1
;
// int stk_reg = frame::register_save_words*(wordSize>>2);
// int stk_reg = SharedRuntime::out_preserve_stack_slots();
int
stk_reg
=
0
;
int
int_reg
=
0
;
int
flt_reg
=
0
;
int
slot
=
0
;
// Now do the signature layout
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
switch
(
sig_bt
[
i
])
{
case
T_INT
:
...
...
@@ -461,11 +398,14 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
Register
r
=
is_outgoing
?
as_oRegister
(
int_reg
++
)
:
as_iRegister
(
int_reg
++
);
regs
[
i
].
set1
(
r
->
as_VMReg
());
}
else
{
regs
[
i
].
set1
(
VMRegImpl
::
stack2reg
(
s
tk_reg
++
));
regs
[
i
].
set1
(
VMRegImpl
::
stack2reg
(
s
lot
++
));
}
break
;
#ifdef _LP64
case
T_LONG
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting VOID in other half"
);
// fall-through
case
T_OBJECT
:
case
T_ARRAY
:
case
T_ADDRESS
:
// Used, e.g., in slow-path locking for the lock's stack address
...
...
@@ -473,78 +413,57 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
Register
r
=
is_outgoing
?
as_oRegister
(
int_reg
++
)
:
as_iRegister
(
int_reg
++
);
regs
[
i
].
set2
(
r
->
as_VMReg
());
}
else
{
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
stk_reg_pairs
));
stk_reg_pairs
+=
2
;
slot
=
round_to
(
slot
,
2
);
// align
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
slot
));
slot
+=
2
;
}
break
;
#endif // _LP64
#else
case
T_LONG
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting VOID in other half"
);
#ifdef _LP64
if
(
int_reg
<
int_reg_max
)
{
Register
r
=
is_outgoing
?
as_oRegister
(
int_reg
++
)
:
as_iRegister
(
int_reg
++
);
regs
[
i
].
set2
(
r
->
as_VMReg
());
}
else
{
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
stk_reg_pairs
));
stk_reg_pairs
+=
2
;
}
#else
#ifdef COMPILER2
// For 32-bit build, can't pass longs in O-regs because they become
// I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
// spare and available. This convention isn't used by the Sparc ABI or
// anywhere else. If we're tiered then we don't use G-regs because c1
// can't deal with them as a "pair". (Tiered makes this code think g's are filled)
// G0: zero
// G1: 1st Long arg
// G2: global allocated to TLS
// G3: used in inline cache check
// G4: 2nd Long arg
// G5: used in inline cache check
// G6: used by OS
// G7: used by OS
if
(
g_reg
==
G1
)
{
regs
[
i
].
set2
(
G1
->
as_VMReg
());
// This long arg in G1
g_reg
=
G4
;
// Where the next arg goes
}
else
if
(
g_reg
==
G4
)
{
regs
[
i
].
set2
(
G4
->
as_VMReg
());
// The 2nd long arg in G4
g_reg
=
noreg
;
// No more longs in registers
}
else
{
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
stk_reg_pairs
));
stk_reg_pairs
+=
2
;
}
#else // COMPILER2
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
stk_reg_pairs
));
stk_reg_pairs
+=
2
;
#endif // COMPILER2
#endif // _LP64
// On 32-bit SPARC put longs always on the stack to keep the pressure off
// integer argument registers. They should be used for oops.
slot
=
round_to
(
slot
,
2
);
// align
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
slot
));
slot
+=
2
;
#endif
break
;
case
T_FLOAT
:
if
(
flt_reg
<
flt_reg_max
)
regs
[
i
].
set1
(
as_FloatRegister
(
flt_reg
++
)
->
as_VMReg
());
else
regs
[
i
].
set1
(
VMRegImpl
::
stack2reg
(
stk_reg
++
));
if
(
flt_reg
<
flt_reg_max
)
{
FloatRegister
r
=
as_FloatRegister
(
flt_reg
++
);
regs
[
i
].
set1
(
r
->
as_VMReg
());
}
else
{
regs
[
i
].
set1
(
VMRegImpl
::
stack2reg
(
slot
++
));
}
break
;
case
T_DOUBLE
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
if
(
flt_reg_pairs
+
1
<
flt_reg_max
)
{
regs
[
i
].
set2
(
as_FloatRegister
(
flt_reg_pairs
)
->
as_VMReg
());
flt_reg_pairs
+=
2
;
if
(
round_to
(
flt_reg
,
2
)
+
1
<
flt_reg_max
)
{
flt_reg
=
round_to
(
flt_reg
,
2
);
// align
FloatRegister
r
=
as_FloatRegister
(
flt_reg
);
regs
[
i
].
set2
(
r
->
as_VMReg
());
flt_reg
+=
2
;
}
else
{
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
stk_reg_pairs
));
stk_reg_pairs
+=
2
;
slot
=
round_to
(
slot
,
2
);
// align
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
slot
));
slot
+=
2
;
}
break
;
case
T_VOID
:
regs
[
i
].
set_bad
();
break
;
// Halves of longs & doubles
case
T_VOID
:
regs
[
i
].
set_bad
();
// Halves of longs & doubles
break
;
default:
ShouldNotReachHere
();
fatal
(
err_msg_res
(
"unknown basic type %d"
,
sig_bt
[
i
]));
break
;
}
}
// retun the amount of stack space these arguments will need.
return
stk_reg_pairs
;
return
slot
;
}
// Helper class mostly to avoid passing masm everywhere, and handle
...
...
@@ -601,8 +520,7 @@ void AdapterGenerator::patch_callers_callsite() {
Label
L
;
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
code_offset
()),
G3_scratch
);
__
br_null
(
G3_scratch
,
false
,
Assembler
::
pt
,
L
);
// Schedule the branch target address early.
__
delayed
()
->
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
interpreter_entry_offset
()),
G3_scratch
);
__
delayed
()
->
nop
();
// Call into the VM to patch the caller, then jump to compiled callee
__
save_frame
(
4
);
// Args in compiled layout; do not blow them
...
...
@@ -645,7 +563,6 @@ void AdapterGenerator::patch_callers_callsite() {
__
ldx
(
FP
,
-
8
+
STACK_BIAS
,
G1
);
__
ldx
(
FP
,
-
16
+
STACK_BIAS
,
G4
);
__
mov
(
L5
,
G5_method
);
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
interpreter_entry_offset
()),
G3_scratch
);
#endif
/* _LP64 */
__
restore
();
// Restore args
...
...
@@ -726,7 +643,7 @@ void AdapterGenerator::gen_c2i_adapter(
int
comp_args_on_stack
,
// VMRegStackSlots
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
,
Label
&
skip_fixup
)
{
Label
&
L_
skip_fixup
)
{
// Before we get into the guts of the C2I adapter, see if we should be here
// at all. We've come from compiled code and are attempting to jump to the
...
...
@@ -747,7 +664,7 @@ void AdapterGenerator::gen_c2i_adapter(
patch_callers_callsite
();
__
bind
(
skip_fixup
);
__
bind
(
L_
skip_fixup
);
// Since all args are passed on the stack, total_args_passed*wordSize is the
// space we need. Add in varargs area needed by the interpreter. Round up
...
...
@@ -757,46 +674,18 @@ void AdapterGenerator::gen_c2i_adapter(
(
frame
::
varargs_offset
-
frame
::
register_save_words
)
*
wordSize
;
const
int
extraspace
=
round_to
(
arg_size
+
varargs_area
,
2
*
wordSize
);
int
bias
=
STACK_BIAS
;
const
int
bias
=
STACK_BIAS
;
const
int
interp_arg_offset
=
frame
::
varargs_offset
*
wordSize
+
(
total_args_passed
-
1
)
*
Interpreter
::
stackElementSize
;
Register
base
=
SP
;
const
Register
base
=
SP
;
#ifdef _LP64
// In the 64bit build because of wider slots and STACKBIAS we can run
// out of bits in the displacement to do loads and stores. Use g3 as
// temporary displacement.
if
(
!
Assembler
::
is_simm13
(
extraspace
))
{
__
set
(
extraspace
,
G3_scratch
);
__
sub
(
SP
,
G3_scratch
,
SP
);
}
else
{
__
sub
(
SP
,
extraspace
,
SP
);
}
// Make some extra space on the stack.
__
sub
(
SP
,
__
ensure_simm13_or_reg
(
extraspace
,
G3_scratch
),
SP
);
set_Rdisp
(
G3_scratch
);
#else
__
sub
(
SP
,
extraspace
,
SP
);
#endif // _LP64
// First write G1 (if used) to where ever it must go
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
const
int
st_off
=
interp_arg_offset
-
(
i
*
Interpreter
::
stackElementSize
)
+
bias
;
VMReg
r_1
=
regs
[
i
].
first
();
VMReg
r_2
=
regs
[
i
].
second
();
if
(
r_1
==
G1_scratch
->
as_VMReg
())
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
store_c2i_object
(
G1_scratch
,
base
,
st_off
);
}
else
if
(
sig_bt
[
i
]
==
T_LONG
)
{
assert
(
!
TieredCompilation
,
"should not use register args for longs"
);
store_c2i_long
(
G1_scratch
,
base
,
st_off
,
false
);
}
else
{
store_c2i_int
(
G1_scratch
,
base
,
st_off
);
}
}
}
//
Now write the args into the outgoing interpreter space
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
//
Write the args into the outgoing interpreter space.
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
const
int
st_off
=
interp_arg_offset
-
(
i
*
Interpreter
::
stackElementSize
)
+
bias
;
VMReg
r_1
=
regs
[
i
].
first
();
VMReg
r_2
=
regs
[
i
].
second
();
...
...
@@ -804,23 +693,9 @@ void AdapterGenerator::gen_c2i_adapter(
assert
(
!
r_2
->
is_valid
(),
""
);
continue
;
}
// Skip G1 if found as we did it first in order to free it up
if
(
r_1
==
G1_scratch
->
as_VMReg
())
{
continue
;
}
#ifdef ASSERT
bool
G1_forced
=
false
;
#endif // ASSERT
if
(
r_1
->
is_stack
())
{
// Pretend stack targets are loaded into G1
#ifdef _LP64
Register
ld_off
=
Rdisp
;
__
set
(
reg2offset
(
r_1
)
+
extraspace
+
bias
,
ld_off
);
#else
int
ld_off
=
reg2offset
(
r_1
)
+
extraspace
+
bias
;
#endif // _LP64
#ifdef ASSERT
G1_forced
=
true
;
#endif // ASSERT
RegisterOrConstant
ld_off
=
reg2offset
(
r_1
)
+
extraspace
+
bias
;
ld_off
=
__
ensure_simm13_or_reg
(
ld_off
,
Rdisp
);
r_1
=
G1_scratch
->
as_VMReg
();
// as part of the load/store shuffle
if
(
!
r_2
->
is_valid
())
__
ld
(
base
,
ld_off
,
G1_scratch
);
else
__
ldx
(
base
,
ld_off
,
G1_scratch
);
...
...
@@ -831,11 +706,6 @@ void AdapterGenerator::gen_c2i_adapter(
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
store_c2i_object
(
r
,
base
,
st_off
);
}
else
if
(
sig_bt
[
i
]
==
T_LONG
||
sig_bt
[
i
]
==
T_DOUBLE
)
{
#ifndef _LP64
if
(
TieredCompilation
)
{
assert
(
G1_forced
||
sig_bt
[
i
]
!=
T_LONG
,
"should not use register args for longs"
);
}
#endif // _LP64
store_c2i_long
(
r
,
base
,
st_off
,
r_2
->
is_stack
());
}
else
{
store_c2i_int
(
r
,
base
,
st_off
);
...
...
@@ -851,19 +721,12 @@ void AdapterGenerator::gen_c2i_adapter(
}
}
#ifdef _LP64
// Need to reload G3_scratch, used for temporary displacements.
// Load the interpreter entry point.
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
interpreter_entry_offset
()),
G3_scratch
);
// Pass O5_savedSP as an argument to the interpreter.
// The interpreter will restore SP to this value before returning.
__
set
(
extraspace
,
G1
);
__
add
(
SP
,
G1
,
O5_savedSP
);
#else
// Pass O5_savedSP as an argument to the interpreter.
// The interpreter will restore SP to this value before returning.
__
add
(
SP
,
extraspace
,
O5_savedSP
);
#endif // _LP64
__
add
(
SP
,
__
ensure_simm13_or_reg
(
extraspace
,
G1
),
O5_savedSP
);
__
mov
((
frame
::
varargs_offset
)
*
wordSize
-
1
*
Interpreter
::
stackElementSize
+
bias
+
BytesPerWord
,
G1
);
...
...
@@ -971,7 +834,6 @@ void AdapterGenerator::gen_i2c_adapter(
// Outputs:
// G2_thread - TLS
// G1, G4 - Outgoing long args in 32-bit build
// O0-O5 - Outgoing args in compiled layout
// O6 - Adjusted or restored SP
// O7 - Valid return address
...
...
@@ -1016,10 +878,10 @@ void AdapterGenerator::gen_i2c_adapter(
// +--------------+ <--- start of outgoing args
// | pad, align | |
// +--------------+ |
// | ints,
floats | |---Outgoing stack args, packed low.
//
+--------------+ | First few args in register
s.
// :
doubles : |
// |
longs
| |
// | ints,
longs, | |
//
| floats, | |---Outgoing stack arg
s.
// :
doubles : | First few args in registers.
// |
| |
// +--------------+ <--- SP' + 16*wordsize
// | |
// : window :
...
...
@@ -1033,7 +895,6 @@ void AdapterGenerator::gen_i2c_adapter(
// Cut-out for having no stack args. Since up to 6 args are passed
// in registers, we will commonly have no stack args.
if
(
comp_args_on_stack
>
0
)
{
// Convert VMReg stack slots to words.
int
comp_words_on_stack
=
round_to
(
comp_args_on_stack
*
VMRegImpl
::
stack_slot_size
,
wordSize
)
>>
LogBytesPerWord
;
// Round up to miminum stack alignment, in wordSize
...
...
@@ -1044,13 +905,9 @@ void AdapterGenerator::gen_i2c_adapter(
__
sub
(
SP
,
(
comp_words_on_stack
)
*
wordSize
,
SP
);
}
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
from_compiled_offset
()),
G3
);
// Now generate the shuffle code. Pick up all register args and move the
// rest through G1_scratch.
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_VOID
)
{
// Longs and doubles are passed in native word order, but misaligned
// in the 32-bit build.
...
...
@@ -1088,14 +945,13 @@ void AdapterGenerator::gen_i2c_adapter(
next_arg_slot
(
ld_off
)
:
arg_slot
(
ld_off
);
__
ldx
(
Gargs
,
slot
,
r
);
#else
// Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
// stack shuffle. Load the first 2 longs into G1/G4 later.
fatal
(
"longs should be on stack"
);
#endif
}
}
else
{
assert
(
r_1
->
is_FloatRegister
(),
""
);
if
(
!
r_2
->
is_valid
())
{
__
ldf
(
FloatRegisterImpl
::
S
,
Gargs
,
arg_slot
(
ld_off
),
r_1
->
as_FloatRegister
());
__
ldf
(
FloatRegisterImpl
::
S
,
Gargs
,
arg_slot
(
ld_off
),
r_1
->
as_FloatRegister
());
}
else
{
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
...
...
@@ -1104,11 +960,11 @@ void AdapterGenerator::gen_i2c_adapter(
// spare float register.
RegisterOrConstant
slot
=
(
sig_bt
[
i
]
==
T_LONG
||
sig_bt
[
i
]
==
T_DOUBLE
)
?
next_arg_slot
(
ld_off
)
:
arg_slot
(
ld_off
);
__
ldf
(
FloatRegisterImpl
::
D
,
Gargs
,
slot
,
r_1
->
as_FloatRegister
());
__
ldf
(
FloatRegisterImpl
::
D
,
Gargs
,
slot
,
r_1
->
as_FloatRegister
());
#else
// Need to marshal 64-bit value from misaligned Lesp loads
__
ldf
(
FloatRegisterImpl
::
S
,
Gargs
,
next_arg_slot
(
ld_off
),
r_1
->
as_FloatRegister
());
__
ldf
(
FloatRegisterImpl
::
S
,
Gargs
,
arg_slot
(
ld_off
),
r_2
->
as_FloatRegister
());
__
ldf
(
FloatRegisterImpl
::
S
,
Gargs
,
arg_slot
(
ld_off
),
r_2
->
as_FloatRegister
());
#endif
}
}
...
...
@@ -1124,76 +980,35 @@ void AdapterGenerator::gen_i2c_adapter(
else
__
stf
(
FloatRegisterImpl
::
D
,
r_1
->
as_FloatRegister
(),
SP
,
slot
);
}
}
bool
made_space
=
false
;
#ifndef _LP64
// May need to pick up a few long args in G1/G4
bool
g4_crushed
=
false
;
bool
g3_crushed
=
false
;
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
if
(
regs
[
i
].
first
()
->
is_Register
()
&&
regs
[
i
].
second
()
->
is_valid
())
{
// Load in argument order going down
int
ld_off
=
(
total_args_passed
-
i
)
*
Interpreter
::
stackElementSize
;
// Need to marshal 64-bit value from misaligned Lesp loads
Register
r
=
regs
[
i
].
first
()
->
as_Register
()
->
after_restore
();
if
(
r
==
G1
||
r
==
G4
)
{
assert
(
!
g4_crushed
,
"ordering problem"
);
if
(
r
==
G4
){
g4_crushed
=
true
;
__
lduw
(
Gargs
,
arg_slot
(
ld_off
)
,
G3_scratch
);
// Load lo bits
__
ld
(
Gargs
,
next_arg_slot
(
ld_off
),
r
);
// Load hi bits
}
else
{
// better schedule this way
__
ld
(
Gargs
,
next_arg_slot
(
ld_off
),
r
);
// Load hi bits
__
lduw
(
Gargs
,
arg_slot
(
ld_off
)
,
G3_scratch
);
// Load lo bits
}
g3_crushed
=
true
;
__
sllx
(
r
,
32
,
r
);
__
or3
(
G3_scratch
,
r
,
r
);
}
else
{
assert
(
r
->
is_out
(),
"longs passed in two O registers"
);
__
ld
(
Gargs
,
arg_slot
(
ld_off
)
,
r
->
successor
());
// Load lo bits
__
ld
(
Gargs
,
next_arg_slot
(
ld_off
),
r
);
// Load hi bits
}
}
}
#endif
// Jump to the compiled code just as if compiled code was doing it.
//
#ifndef _LP64
if
(
g3_crushed
)
{
// Rats load was wasted, at least it is in cache...
__
ld_ptr
(
G5_method
,
Method
::
from_compiled_offset
(),
G3
);
}
#endif
/* _LP64 */
// 6243940 We might end up in handle_wrong_method if
// the callee is deoptimized as we race thru here. If that
// happens we don't want to take a safepoint because the
// caller frame will look interpreted and arguments are now
// "compiled" so it is much better to make this transition
// invisible to the stack walking code. Unfortunately if
// we try and find the callee by normal means a safepoint
// is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur.
Address
callee_target_addr
(
G2_thread
,
JavaThread
::
callee_target_offset
());
__
st_ptr
(
G5_method
,
callee_target_addr
);
if
(
StressNonEntrant
)
{
// Open a big window for deopt failure
__
save_frame
(
0
);
__
mov
(
G0
,
L0
);
Label
loop
;
__
bind
(
loop
);
__
sub
(
L0
,
1
,
L0
);
__
br_null_short
(
L0
,
Assembler
::
pt
,
loop
);
__
restore
();
}
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
from_compiled_offset
()),
G3
);
// 6243940 We might end up in handle_wrong_method if
// the callee is deoptimized as we race thru here. If that
// happens we don't want to take a safepoint because the
// caller frame will look interpreted and arguments are now
// "compiled" so it is much better to make this transition
// invisible to the stack walking code. Unfortunately if
// we try and find the callee by normal means a safepoint
// is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur.
Address
callee_target_addr
(
G2_thread
,
JavaThread
::
callee_target_offset
());
__
st_ptr
(
G5_method
,
callee_target_addr
);
if
(
StressNonEntrant
)
{
// Open a big window for deopt failure
__
save_frame
(
0
);
__
mov
(
G0
,
L0
);
Label
loop
;
__
bind
(
loop
);
__
sub
(
L0
,
1
,
L0
);
__
br_null_short
(
L0
,
Assembler
::
pt
,
loop
);
__
restore
();
}
__
jmpl
(
G3
,
0
,
G0
);
__
delayed
()
->
nop
();
__
jmpl
(
G3
,
0
,
G0
);
__
delayed
()
->
nop
();
}
// ---------------------------------------------------------------
...
...
@@ -1221,28 +1036,17 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
// compiled code, which relys solely on SP and not FP, get sick).
address
c2i_unverified_entry
=
__
pc
();
Label
skip_fixup
;
Label
L_
skip_fixup
;
{
#if !defined(_LP64) && defined(COMPILER2)
Register
R_temp
=
L0
;
// another scratch register
#else
Register
R_temp
=
G1
;
// another scratch register
#endif
Register
R_temp
=
G1
;
// another scratch register
AddressLiteral
ic_miss
(
SharedRuntime
::
get_ic_miss_stub
());
__
verify_oop
(
O0
);
__
load_klass
(
O0
,
G3_scratch
);
#if !defined(_LP64) && defined(COMPILER2)
__
save
(
SP
,
-
frame
::
register_save_words
*
wordSize
,
SP
);
__
ld_ptr
(
G5_method
,
CompiledICHolder
::
holder_klass_offset
(),
R_temp
);
__
cmp
(
G3_scratch
,
R_temp
);
__
restore
();
#else
__
ld_ptr
(
G5_method
,
CompiledICHolder
::
holder_klass_offset
(),
R_temp
);
__
cmp
(
G3_scratch
,
R_temp
);
#endif
Label
ok
,
ok2
;
__
brx
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
ok
);
...
...
@@ -1256,8 +1060,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
// the call site corrected.
__
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
code_offset
()),
G3_scratch
);
__
bind
(
ok2
);
__
br_null
(
G3_scratch
,
false
,
Assembler
::
pt
,
skip_fixup
);
__
delayed
()
->
ld_ptr
(
G5_method
,
in_bytes
(
Method
::
interpreter_entry_offset
()),
G3_scratch
);
__
br_null
(
G3_scratch
,
false
,
Assembler
::
pt
,
L_
skip_fixup
);
__
delayed
()
->
nop
(
);
__
jump_to
(
ic_miss
,
G3_scratch
);
__
delayed
()
->
nop
();
...
...
@@ -1265,7 +1069,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address
c2i_entry
=
__
pc
();
agen
.
gen_c2i_adapter
(
total_args_passed
,
comp_args_on_stack
,
sig_bt
,
regs
,
skip_fixup
);
agen
.
gen_c2i_adapter
(
total_args_passed
,
comp_args_on_stack
,
sig_bt
,
regs
,
L_
skip_fixup
);
__
flush
();
return
AdapterHandlerLibrary
::
new_entry
(
fingerprint
,
i2c_entry
,
c2i_entry
,
c2i_unverified_entry
);
...
...
@@ -1985,12 +1789,12 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static
void
verify_oop_args
(
MacroAssembler
*
masm
,
int
total_args_passe
d
,
methodHandle
metho
d
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
Register
temp_reg
=
G5_method
;
// not part of any compiled calling seq
if
(
VerifyOops
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
method
->
size_of_parameters
()
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
VMReg
r
=
regs
[
i
].
first
();
...
...
@@ -2009,35 +1813,32 @@ static void verify_oop_args(MacroAssembler* masm,
}
static
void
gen_special_dispatch
(
MacroAssembler
*
masm
,
int
total_args_passed
,
int
comp_args_on_stack
,
vmIntrinsics
::
ID
special_dispatch
,
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
verify_oop_args
(
masm
,
total_args_passed
,
sig_bt
,
regs
);
verify_oop_args
(
masm
,
method
,
sig_bt
,
regs
);
vmIntrinsics
::
ID
iid
=
method
->
intrinsic_id
();
// Now write the args into the outgoing interpreter space
bool
has_receiver
=
false
;
Register
receiver_reg
=
noreg
;
int
member_arg_pos
=
-
1
;
Register
member_reg
=
noreg
;
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
special_dispatch
);
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
iid
);
if
(
ref_kind
!=
0
)
{
member_arg_pos
=
total_args_passed
-
1
;
// trailing MemberName argument
member_arg_pos
=
method
->
size_of_parameters
()
-
1
;
// trailing MemberName argument
member_reg
=
G5_method
;
// known to be free at this point
has_receiver
=
MethodHandles
::
ref_kind_has_receiver
(
ref_kind
);
}
else
if
(
special_dispatch
==
vmIntrinsics
::
_invokeBasic
)
{
}
else
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
has_receiver
=
true
;
}
else
{
fatal
(
err_msg
(
"special_dispatch=%d"
,
special_dispatch
));
fatal
(
err_msg
_res
(
"unexpected intrinsic id %d"
,
iid
));
}
if
(
member_reg
!=
noreg
)
{
// Load the member_arg into register, if necessary.
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
SharedRuntime
::
check_member_name_argument_is_last_argument
(
method
,
sig_bt
,
regs
);
VMReg
r
=
regs
[
member_arg_pos
].
first
();
assert
(
r
->
is_valid
(),
"bad member arg"
);
if
(
r
->
is_stack
())
{
RegisterOrConstant
ld_off
=
reg2offset
(
r
)
+
STACK_BIAS
;
ld_off
=
__
ensure_simm13_or_reg
(
ld_off
,
member_reg
);
...
...
@@ -2050,7 +1851,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
if
(
has_receiver
)
{
// Make sure the receiver is loaded into a register.
assert
(
total_args_passed
>
0
,
"oob"
);
assert
(
method
->
size_of_parameters
()
>
0
,
"oob"
);
assert
(
sig_bt
[
0
]
==
T_OBJECT
,
"receiver argument must be an object"
);
VMReg
r
=
regs
[
0
].
first
();
assert
(
r
->
is_valid
(),
"bad receiver arg"
);
...
...
@@ -2058,7 +1859,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert
(
false
,
"receiver always in a register"
);
fatal
(
"receiver always in a register"
);
receiver_reg
=
G3_scratch
;
// known to be free at this point
RegisterOrConstant
ld_off
=
reg2offset
(
r
)
+
STACK_BIAS
;
ld_off
=
__
ensure_simm13_or_reg
(
ld_off
,
member_reg
);
...
...
@@ -2070,7 +1871,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
}
// Figure out which address we are really jumping to:
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
special_dispatch
,
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
iid
,
receiver_reg
,
member_reg
,
/*for_compiler_entry:*/
true
);
}
...
...
@@ -2103,11 +1904,9 @@ static void gen_special_dispatch(MacroAssembler* masm,
// transition back to thread_in_Java
// return to caller
//
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_in_args
,
int
comp_args_on_stack
,
// in VMRegStackSlots
BasicType
*
in_sig_bt
,
VMRegPair
*
in_regs
,
BasicType
ret_type
)
{
...
...
@@ -2116,9 +1915,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
intptr_t
start
=
(
intptr_t
)
__
pc
();
int
vep_offset
=
((
intptr_t
)
__
pc
())
-
start
;
gen_special_dispatch
(
masm
,
total_in_args
,
comp_args_on_stack
,
method
->
intrinsic_id
(),
method
,
in_sig_bt
,
in_regs
);
int
frame_complete
=
((
intptr_t
)
__
pc
())
-
start
;
// not complete, period
...
...
@@ -2220,6 +2017,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
const
int
total_in_args
=
method
->
size_of_parameters
();
int
total_c_args
=
total_in_args
;
int
total_save_slots
=
6
*
VMRegImpl
::
slots_per_word
;
if
(
!
is_critical_native
)
{
...
...
src/cpu/x86/vm/methodHandles_x86.cpp
浏览文件 @
975dadf1
...
...
@@ -327,10 +327,11 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
assert_different_registers
(
temp3
,
rcx
,
rdx
);
}
#endif
else
{
assert_different_registers
(
temp1
,
temp2
,
temp3
,
saved_last_sp_register
());
// don't trash lastSP
}
assert_different_registers
(
temp1
,
temp2
,
temp3
,
receiver_reg
);
assert_different_registers
(
temp1
,
temp2
,
temp3
,
member_reg
);
if
(
!
for_compiler_entry
)
assert_different_registers
(
temp1
,
temp2
,
temp3
,
saved_last_sp_register
());
// don't trash lastSP
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
// indirect through MH.form.vmentry.vmtarget
...
...
@@ -392,14 +393,13 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// rsi/r13 - interpreter linkage (if interpreted)
// rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
bool
method_is_live
=
false
;
Label
L_incompatible_class_change_error
;
switch
(
iid
)
{
case
vmIntrinsics
::
_linkToSpecial
:
if
(
VerifyMethodHandles
)
{
verify_ref_kind
(
_masm
,
JVM_REF_invokeSpecial
,
member_reg
,
temp3
);
}
__
movptr
(
rbx_method
,
member_vmtarget
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToStatic
:
...
...
@@ -407,7 +407,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind
(
_masm
,
JVM_REF_invokeStatic
,
member_reg
,
temp3
);
}
__
movptr
(
rbx_method
,
member_vmtarget
);
method_is_live
=
true
;
break
;
case
vmIntrinsics
::
_linkToVirtual
:
...
...
@@ -436,7 +435,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// get target Method* & entry point
__
lookup_virtual_method
(
temp1_recv_klass
,
temp2_index
,
rbx_method
);
method_is_live
=
true
;
break
;
}
...
...
@@ -464,35 +462,32 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
}
// given intf, index, and recv klass, dispatch to the implementation method
Label
L_no_such_interface
;
__
lookup_interface_method
(
temp1_recv_klass
,
temp3_intf
,
// note: next two args must be the same:
rbx_index
,
rbx_method
,
temp2
,
L_no_such_interface
);
__
verify_method_ptr
(
rbx_method
);
jump_from_method_handle
(
_masm
,
rbx_method
,
temp2
,
for_compiler_entry
);
__
hlt
();
__
bind
(
L_no_such_interface
);
__
jump
(
RuntimeAddress
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
()));
L_incompatible_class_change_error
);
break
;
}
default:
fatal
(
err_msg
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
fatal
(
err_msg
_res
(
"unexpected intrinsic %d: %s"
,
iid
,
vmIntrinsics
::
name_at
(
iid
)));
break
;
}
if
(
method_is_live
)
{
// live at this point: rbx_method, rsi/r13 (if interpreted)
// Live at this point:
// rbx_method
// rsi/r13 (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
rbx_method
);
jump_from_method_handle
(
_masm
,
rbx_method
,
temp1
,
for_compiler_entry
);
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__
verify_method_ptr
(
rbx_method
);
jump_from_method_handle
(
_masm
,
rbx_method
,
temp1
,
for_compiler_entry
);
if
(
iid
==
vmIntrinsics
::
_linkToInterface
)
{
__
bind
(
L_incompatible_class_change_error
);
__
jump
(
RuntimeAddress
(
StubRoutines
::
throw_IncompatibleClassChangeError_entry
()));
}
}
}
...
...
src/cpu/x86/vm/sharedRuntime_x86_32.cpp
浏览文件 @
975dadf1
...
...
@@ -1346,12 +1346,12 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static
void
verify_oop_args
(
MacroAssembler
*
masm
,
int
total_args_passe
d
,
methodHandle
metho
d
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
Register
temp_reg
=
rbx
;
// not part of any compiled calling seq
if
(
VerifyOops
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
method
->
size_of_parameters
()
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
VMReg
r
=
regs
[
i
].
first
();
...
...
@@ -1368,35 +1368,32 @@ static void verify_oop_args(MacroAssembler* masm,
}
static
void
gen_special_dispatch
(
MacroAssembler
*
masm
,
int
total_args_passed
,
int
comp_args_on_stack
,
vmIntrinsics
::
ID
special_dispatch
,
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
verify_oop_args
(
masm
,
total_args_passed
,
sig_bt
,
regs
);
verify_oop_args
(
masm
,
method
,
sig_bt
,
regs
);
vmIntrinsics
::
ID
iid
=
method
->
intrinsic_id
();
// Now write the args into the outgoing interpreter space
bool
has_receiver
=
false
;
Register
receiver_reg
=
noreg
;
int
member_arg_pos
=
-
1
;
Register
member_reg
=
noreg
;
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
special_dispatch
);
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
iid
);
if
(
ref_kind
!=
0
)
{
member_arg_pos
=
total_args_passed
-
1
;
// trailing MemberName argument
member_arg_pos
=
method
->
size_of_parameters
()
-
1
;
// trailing MemberName argument
member_reg
=
rbx
;
// known to be free at this point
has_receiver
=
MethodHandles
::
ref_kind_has_receiver
(
ref_kind
);
}
else
if
(
special_dispatch
==
vmIntrinsics
::
_invokeBasic
)
{
}
else
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
has_receiver
=
true
;
}
else
{
guarantee
(
false
,
err_msg
(
"special_dispatch=%d"
,
special_dispatch
));
fatal
(
err_msg_res
(
"unexpected intrinsic id %d"
,
iid
));
}
if
(
member_reg
!=
noreg
)
{
// Load the member_arg into register, if necessary.
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
SharedRuntime
::
check_member_name_argument_is_last_argument
(
method
,
sig_bt
,
regs
);
VMReg
r
=
regs
[
member_arg_pos
].
first
();
assert
(
r
->
is_valid
(),
"bad member arg"
);
if
(
r
->
is_stack
())
{
__
movptr
(
member_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1407,7 +1404,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
if
(
has_receiver
)
{
// Make sure the receiver is loaded into a register.
assert
(
total_args_passed
>
0
,
"oob"
);
assert
(
method
->
size_of_parameters
()
>
0
,
"oob"
);
assert
(
sig_bt
[
0
]
==
T_OBJECT
,
"receiver argument must be an object"
);
VMReg
r
=
regs
[
0
].
first
();
assert
(
r
->
is_valid
(),
"bad receiver arg"
);
...
...
@@ -1415,7 +1412,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert
(
false
,
"receiver always in a register"
);
fatal
(
"receiver always in a register"
);
receiver_reg
=
rcx
;
// known to be free at this point
__
movptr
(
receiver_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1425,7 +1422,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
}
// Figure out which address we are really jumping to:
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
special_dispatch
,
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
iid
,
receiver_reg
,
member_reg
,
/*for_compiler_entry:*/
true
);
}
...
...
@@ -1461,8 +1458,6 @@ static void gen_special_dispatch(MacroAssembler* masm,
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_in_args
,
int
comp_args_on_stack
,
BasicType
*
in_sig_bt
,
VMRegPair
*
in_regs
,
BasicType
ret_type
)
{
...
...
@@ -1471,9 +1466,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
intptr_t
start
=
(
intptr_t
)
__
pc
();
int
vep_offset
=
((
intptr_t
)
__
pc
())
-
start
;
gen_special_dispatch
(
masm
,
total_in_args
,
comp_args_on_stack
,
method
->
intrinsic_id
(),
method
,
in_sig_bt
,
in_regs
);
int
frame_complete
=
((
intptr_t
)
__
pc
())
-
start
;
// not complete, period
...
...
@@ -1506,6 +1499,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
const
int
total_in_args
=
method
->
size_of_parameters
();
int
total_c_args
=
total_in_args
;
if
(
!
is_critical_native
)
{
total_c_args
+=
1
;
...
...
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
浏览文件 @
975dadf1
...
...
@@ -1593,12 +1593,12 @@ class ComputeMoveOrder: public StackObj {
};
static
void
verify_oop_args
(
MacroAssembler
*
masm
,
int
total_args_passe
d
,
methodHandle
metho
d
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
Register
temp_reg
=
rbx
;
// not part of any compiled calling seq
if
(
VerifyOops
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
for
(
int
i
=
0
;
i
<
method
->
size_of_parameters
()
;
i
++
)
{
if
(
sig_bt
[
i
]
==
T_OBJECT
||
sig_bt
[
i
]
==
T_ARRAY
)
{
VMReg
r
=
regs
[
i
].
first
();
...
...
@@ -1615,35 +1615,32 @@ static void verify_oop_args(MacroAssembler* masm,
}
static
void
gen_special_dispatch
(
MacroAssembler
*
masm
,
int
total_args_passed
,
int
comp_args_on_stack
,
vmIntrinsics
::
ID
special_dispatch
,
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
verify_oop_args
(
masm
,
total_args_passed
,
sig_bt
,
regs
);
verify_oop_args
(
masm
,
method
,
sig_bt
,
regs
);
vmIntrinsics
::
ID
iid
=
method
->
intrinsic_id
();
// Now write the args into the outgoing interpreter space
bool
has_receiver
=
false
;
Register
receiver_reg
=
noreg
;
int
member_arg_pos
=
-
1
;
Register
member_reg
=
noreg
;
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
special_dispatch
);
int
ref_kind
=
MethodHandles
::
signature_polymorphic_intrinsic_ref_kind
(
iid
);
if
(
ref_kind
!=
0
)
{
member_arg_pos
=
total_args_passed
-
1
;
// trailing MemberName argument
member_arg_pos
=
method
->
size_of_parameters
()
-
1
;
// trailing MemberName argument
member_reg
=
rbx
;
// known to be free at this point
has_receiver
=
MethodHandles
::
ref_kind_has_receiver
(
ref_kind
);
}
else
if
(
special_dispatch
==
vmIntrinsics
::
_invokeBasic
)
{
}
else
if
(
iid
==
vmIntrinsics
::
_invokeBasic
)
{
has_receiver
=
true
;
}
else
{
guarantee
(
false
,
err_msg
(
"special_dispatch=%d"
,
special_dispatch
));
fatal
(
err_msg_res
(
"unexpected intrinsic id %d"
,
iid
));
}
if
(
member_reg
!=
noreg
)
{
// Load the member_arg into register, if necessary.
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
SharedRuntime
::
check_member_name_argument_is_last_argument
(
method
,
sig_bt
,
regs
);
VMReg
r
=
regs
[
member_arg_pos
].
first
();
assert
(
r
->
is_valid
(),
"bad member arg"
);
if
(
r
->
is_stack
())
{
__
movptr
(
member_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1654,7 +1651,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
if
(
has_receiver
)
{
// Make sure the receiver is loaded into a register.
assert
(
total_args_passed
>
0
,
"oob"
);
assert
(
method
->
size_of_parameters
()
>
0
,
"oob"
);
assert
(
sig_bt
[
0
]
==
T_OBJECT
,
"receiver argument must be an object"
);
VMReg
r
=
regs
[
0
].
first
();
assert
(
r
->
is_valid
(),
"bad receiver arg"
);
...
...
@@ -1662,7 +1659,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert
(
false
,
"receiver always in a register"
);
fatal
(
"receiver always in a register"
);
receiver_reg
=
j_rarg0
;
// known to be free at this point
__
movptr
(
receiver_reg
,
Address
(
rsp
,
r
->
reg2stack
()
*
VMRegImpl
::
stack_slot_size
+
wordSize
));
}
else
{
...
...
@@ -1672,7 +1669,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
}
// Figure out which address we are really jumping to:
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
special_dispatch
,
MethodHandles
::
generate_method_handle_dispatch
(
masm
,
iid
,
receiver_reg
,
member_reg
,
/*for_compiler_entry:*/
true
);
}
...
...
@@ -1708,8 +1705,6 @@ static void gen_special_dispatch(MacroAssembler* masm,
nmethod
*
SharedRuntime
::
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_in_args
,
int
comp_args_on_stack
,
BasicType
*
in_sig_bt
,
VMRegPair
*
in_regs
,
BasicType
ret_type
)
{
...
...
@@ -1718,9 +1713,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
intptr_t
start
=
(
intptr_t
)
__
pc
();
int
vep_offset
=
((
intptr_t
)
__
pc
())
-
start
;
gen_special_dispatch
(
masm
,
total_in_args
,
comp_args_on_stack
,
method
->
intrinsic_id
(),
method
,
in_sig_bt
,
in_regs
);
int
frame_complete
=
((
intptr_t
)
__
pc
())
-
start
;
// not complete, period
...
...
@@ -1754,6 +1747,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
const
int
total_in_args
=
method
->
size_of_parameters
();
int
total_c_args
=
total_in_args
;
if
(
!
is_critical_native
)
{
total_c_args
+=
1
;
...
...
src/share/vm/asm/register.hpp
浏览文件 @
975dadf1
...
...
@@ -103,8 +103,8 @@ inline void assert_different_registers(
)
{
assert
(
a
!=
b
,
err_msg
(
"registers must be different: a=%d, b=%d"
,
a
,
b
)
err_msg
_res
(
"registers must be different: a=%d, b=%d"
,
a
,
b
)
);
}
...
...
@@ -117,8 +117,8 @@ inline void assert_different_registers(
assert
(
a
!=
b
&&
a
!=
c
&&
b
!=
c
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d"
,
a
,
b
,
c
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d"
,
a
,
b
,
c
)
);
}
...
...
@@ -133,8 +133,8 @@ inline void assert_different_registers(
a
!=
b
&&
a
!=
c
&&
a
!=
d
&&
b
!=
c
&&
b
!=
d
&&
c
!=
d
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d"
,
a
,
b
,
c
,
d
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d"
,
a
,
b
,
c
,
d
)
);
}
...
...
@@ -151,8 +151,8 @@ inline void assert_different_registers(
&&
b
!=
c
&&
b
!=
d
&&
b
!=
e
&&
c
!=
d
&&
c
!=
e
&&
d
!=
e
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d"
,
a
,
b
,
c
,
d
,
e
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d"
,
a
,
b
,
c
,
d
,
e
)
);
}
...
...
@@ -171,8 +171,8 @@ inline void assert_different_registers(
&&
c
!=
d
&&
c
!=
e
&&
c
!=
f
&&
d
!=
e
&&
d
!=
f
&&
e
!=
f
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d"
,
a
,
b
,
c
,
d
,
e
,
f
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d"
,
a
,
b
,
c
,
d
,
e
,
f
)
);
}
...
...
@@ -193,8 +193,8 @@ inline void assert_different_registers(
&&
d
!=
e
&&
d
!=
f
&&
d
!=
g
&&
e
!=
f
&&
e
!=
g
&&
f
!=
g
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
)
);
}
...
...
@@ -217,8 +217,8 @@ inline void assert_different_registers(
&&
e
!=
f
&&
e
!=
g
&&
e
!=
h
&&
f
!=
g
&&
f
!=
h
&&
g
!=
h
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
)
);
}
...
...
@@ -243,8 +243,8 @@ inline void assert_different_registers(
&&
f
!=
g
&&
f
!=
h
&&
f
!=
i
&&
g
!=
h
&&
g
!=
i
&&
h
!=
i
,
err_msg
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
,
i
)
err_msg
_res
(
"registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d"
,
a
,
b
,
c
,
d
,
e
,
f
,
g
,
h
,
i
)
);
}
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
975dadf1
...
...
@@ -700,7 +700,9 @@ nmethod::nmethod(
// then print the requested information
if
(
PrintNativeNMethods
)
{
print_code
();
oop_maps
->
print
();
if
(
oop_maps
!=
NULL
)
{
oop_maps
->
print
();
}
}
if
(
PrintRelocations
)
{
print_relocations
();
...
...
src/share/vm/runtime/sharedRuntime.cpp
浏览文件 @
975dadf1
...
...
@@ -1618,6 +1618,31 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
return
callee_method
;
}
#ifdef ASSERT
void
SharedRuntime
::
check_member_name_argument_is_last_argument
(
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
{
ResourceMark
rm
;
const
int
total_args_passed
=
method
->
size_of_parameters
();
const
VMRegPair
*
regs_with_member_name
=
regs
;
VMRegPair
*
regs_without_member_name
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
-
1
);
const
int
member_arg_pos
=
total_args_passed
-
1
;
assert
(
member_arg_pos
>=
0
&&
member_arg_pos
<
total_args_passed
,
"oob"
);
assert
(
sig_bt
[
member_arg_pos
]
==
T_OBJECT
,
"dispatch argument must be an object"
);
const
bool
is_outgoing
=
method
->
is_method_handle_intrinsic
();
int
comp_args_on_stack
=
java_calling_convention
(
sig_bt
,
regs_without_member_name
,
total_args_passed
-
1
,
is_outgoing
);
for
(
int
i
=
0
;
i
<
member_arg_pos
;
i
++
)
{
VMReg
a
=
regs_with_member_name
[
i
].
first
();
VMReg
b
=
regs_without_member_name
[
i
].
first
();
assert
(
a
->
value
()
==
b
->
value
(),
err_msg_res
(
"register allocation mismatch: a=%d, b=%d"
,
a
->
value
(),
b
->
value
()));
}
assert
(
regs_with_member_name
[
member_arg_pos
].
first
()
->
is_valid
(),
"bad member arg"
);
}
#endif
// ---------------------------------------------------------------------------
// We are calling the interpreter via a c2i. Normally this would mean that
// we were called by a compiled method. However we could have lost a race
...
...
@@ -2546,10 +2571,10 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
MacroAssembler
_masm
(
&
buffer
);
// Fill in the signature array, for the calling-convention call.
int
total_args_passed
=
method
->
size_of_parameters
();
const
int
total_args_passed
=
method
->
size_of_parameters
();
BasicType
*
sig_bt
=
NEW_RESOURCE_ARRAY
(
BasicType
,
total_args_passed
);
VMRegPair
*
regs
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
);
BasicType
*
sig_bt
=
NEW_RESOURCE_ARRAY
(
BasicType
,
total_args_passed
);
VMRegPair
*
regs
=
NEW_RESOURCE_ARRAY
(
VMRegPair
,
total_args_passed
);
int
i
=
0
;
if
(
!
method
->
is_static
()
)
// Pass in receiver first
sig_bt
[
i
++
]
=
T_OBJECT
;
...
...
@@ -2559,7 +2584,7 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
if
(
ss
.
type
()
==
T_LONG
||
ss
.
type
()
==
T_DOUBLE
)
sig_bt
[
i
++
]
=
T_VOID
;
// Longs & doubles take 2 Java slots
}
assert
(
i
==
total_args_passed
,
""
);
assert
(
i
==
total_args_passed
,
""
);
BasicType
ret_type
=
ss
.
type
();
// Now get the compiled-Java layout as input (or output) arguments.
...
...
@@ -2572,9 +2597,8 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int c
nm
=
SharedRuntime
::
generate_native_wrapper
(
&
_masm
,
method
,
compile_id
,
total_args_passed
,
comp_args_on_stack
,
sig_bt
,
regs
,
sig_bt
,
regs
,
ret_type
);
}
}
...
...
src/share/vm/runtime/sharedRuntime.hpp
浏览文件 @
975dadf1
...
...
@@ -345,7 +345,11 @@ class SharedRuntime: AllStatic {
// the bottom of the frame the first 16 words will be skipped and SharedInfo::stack0
// will be just above it. (
// return value is the maximum number of VMReg stack slots the convention will use.
static
int
java_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
,
int
is_outgoing
);
static
int
java_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
,
int
is_outgoing
);
static
void
check_member_name_argument_is_last_argument
(
methodHandle
method
,
const
BasicType
*
sig_bt
,
const
VMRegPair
*
regs
)
NOT_DEBUG_RETURN
;
// Ditto except for calling C
static
int
c_calling_convention
(
const
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
int
total_args_passed
);
...
...
@@ -425,13 +429,11 @@ class SharedRuntime: AllStatic {
// The wrapper may contain special-case code if the given method
// is a JNI critical method, or a compiled method handle adapter,
// such as _invokeBasic, _linkToVirtual, etc.
static
nmethod
*
generate_native_wrapper
(
MacroAssembler
*
masm
,
static
nmethod
*
generate_native_wrapper
(
MacroAssembler
*
masm
,
methodHandle
method
,
int
compile_id
,
int
total_args_passed
,
int
max_arg
,
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
BasicType
*
sig_bt
,
VMRegPair
*
regs
,
BasicType
ret_type
);
// Block before entering a JNI critical method
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录