Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
49a6ff0d
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
49a6ff0d
编写于
12月 22, 2009
作者:
I
iveresov
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
ad1d0faf
c8a1c782
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
210 addition
and
122 deletion
+210
-122
src/cpu/x86/vm/assembler_x86.cpp
src/cpu/x86/vm/assembler_x86.cpp
+1
-1
src/cpu/x86/vm/interp_masm_x86_32.cpp
src/cpu/x86/vm/interp_masm_x86_32.cpp
+3
-0
src/cpu/x86/vm/interp_masm_x86_64.cpp
src/cpu/x86/vm/interp_masm_x86_64.cpp
+33
-7
src/cpu/x86/vm/interp_masm_x86_64.hpp
src/cpu/x86/vm/interp_masm_x86_64.hpp
+5
-3
src/cpu/x86/vm/interpreter_x86_64.cpp
src/cpu/x86/vm/interpreter_x86_64.cpp
+8
-6
src/cpu/x86/vm/methodHandles_x86.cpp
src/cpu/x86/vm/methodHandles_x86.cpp
+1
-1
src/cpu/x86/vm/stubGenerator_x86_64.cpp
src/cpu/x86/vm/stubGenerator_x86_64.cpp
+10
-0
src/cpu/x86/vm/templateInterpreter_x86_64.cpp
src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+26
-7
src/cpu/x86/vm/templateTable_x86_32.cpp
src/cpu/x86/vm/templateTable_x86_32.cpp
+0
-1
src/cpu/x86/vm/templateTable_x86_64.cpp
src/cpu/x86/vm/templateTable_x86_64.cpp
+63
-37
src/cpu/x86/vm/templateTable_x86_64.hpp
src/cpu/x86/vm/templateTable_x86_64.hpp
+1
-2
src/share/vm/classfile/classFileParser.cpp
src/share/vm/classfile/classFileParser.cpp
+6
-17
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+43
-34
src/share/vm/code/nmethod.hpp
src/share/vm/code/nmethod.hpp
+10
-6
未找到文件。
src/cpu/x86/vm/assembler_x86.cpp
浏览文件 @
49a6ff0d
...
...
@@ -7666,7 +7666,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
#ifdef ASSERT
Label
L
;
test
l
(
tmp
,
tmp
);
test
ptr
(
tmp
,
tmp
);
jccb
(
Assembler
::
notZero
,
L
);
hlt
();
bind
(
L
);
...
...
src/cpu/x86/vm/interp_masm_x86_32.cpp
浏览文件 @
49a6ff0d
...
...
@@ -196,6 +196,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
}
else
{
assert
(
EnableInvokeDynamic
,
"giant index used only for EnableInvokeDynamic"
);
movl
(
reg
,
Address
(
rsi
,
bcp_offset
));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
assert
(
constantPoolCacheOopDesc
::
decode_secondary_index
(
~
123
)
==
123
,
"else change next line"
);
notl
(
reg
);
// convert to plain index
}
...
...
src/cpu/x86/vm/interp_masm_x86_64.cpp
浏览文件 @
49a6ff0d
...
...
@@ -185,12 +185,30 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
}
void
InterpreterMacroAssembler
::
get_cache_index_at_bcp
(
Register
index
,
int
bcp_offset
,
bool
giant_index
)
{
assert
(
bcp_offset
>
0
,
"bcp is still pointing to start of bytecode"
);
if
(
!
giant_index
)
{
load_unsigned_short
(
index
,
Address
(
r13
,
bcp_offset
));
}
else
{
assert
(
EnableInvokeDynamic
,
"giant index used only for EnableInvokeDynamic"
);
movl
(
index
,
Address
(
r13
,
bcp_offset
));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
assert
(
constantPoolCacheOopDesc
::
decode_secondary_index
(
~
123
)
==
123
,
"else change next line"
);
notl
(
index
);
// convert to plain index
}
}
void
InterpreterMacroAssembler
::
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
)
{
assert
(
bcp_offset
>
0
,
"bcp is still pointing to start of bytecode"
);
int
bcp_offset
,
bool
giant_index
)
{
assert
(
cache
!=
index
,
"must use different registers"
);
load_unsigned_short
(
index
,
Address
(
r13
,
bcp_offset
)
);
get_cache_index_at_bcp
(
index
,
bcp_offset
,
giant_index
);
movptr
(
cache
,
Address
(
rbp
,
frame
::
interpreter_frame_cache_offset
*
wordSize
));
assert
(
sizeof
(
ConstantPoolCacheEntry
)
==
4
*
wordSize
,
"adjust code below"
);
// convert from field index to ConstantPoolCacheEntry index
...
...
@@ -200,10 +218,10 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void
InterpreterMacroAssembler
::
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
)
{
assert
(
bcp_offset
>
0
,
"bcp is still pointing to start of bytecode"
);
int
bcp_offset
,
bool
giant_index
)
{
assert
(
cache
!=
tmp
,
"must use different register"
);
load_unsigned_short
(
tmp
,
Address
(
r13
,
bcp_offset
)
);
get_cache_index_at_bcp
(
tmp
,
bcp_offset
,
giant_index
);
assert
(
sizeof
(
ConstantPoolCacheEntry
)
==
4
*
wordSize
,
"adjust code below"
);
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
...
...
@@ -1236,7 +1254,8 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void
InterpreterMacroAssembler
::
profile_virtual_call
(
Register
receiver
,
Register
mdp
,
Register
reg2
)
{
Register
reg2
,
bool
receiver_can_be_null
)
{
if
(
ProfileInterpreter
)
{
Label
profile_continue
;
...
...
@@ -1246,8 +1265,15 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
// We are making a call. Increment the count.
increment_mdp_data_at
(
mdp
,
in_bytes
(
CounterData
::
count_offset
()));
Label
skip_receiver_profile
;
if
(
receiver_can_be_null
)
{
testptr
(
receiver
,
receiver
);
jcc
(
Assembler
::
zero
,
skip_receiver_profile
);
}
// Record the receiver type.
record_klass_in_profile
(
receiver
,
mdp
,
reg2
);
bind
(
skip_receiver_profile
);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant
(
mdp
,
...
...
src/cpu/x86/vm/interp_masm_x86_64.hpp
浏览文件 @
49a6ff0d
...
...
@@ -95,9 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void
get_unsigned_2_byte_index_at_bcp
(
Register
reg
,
int
bcp_offset
);
void
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
);
int
bcp_offset
,
bool
giant_index
=
false
);
void
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
);
int
bcp_offset
,
bool
giant_index
=
false
);
void
get_cache_index_at_bcp
(
Register
index
,
int
bcp_offset
,
bool
giant_index
=
false
);
void
pop_ptr
(
Register
r
=
rax
);
...
...
@@ -236,7 +237,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void
profile_call
(
Register
mdp
);
void
profile_final_call
(
Register
mdp
);
void
profile_virtual_call
(
Register
receiver
,
Register
mdp
,
Register
scratch2
);
Register
scratch2
,
bool
receiver_can_be_null
=
false
);
void
profile_ret
(
Register
return_bci
,
Register
mdp
);
void
profile_null_seen
(
Register
mdp
);
void
profile_typecheck
(
Register
mdp
,
Register
klass
,
Register
scratch
);
...
...
src/cpu/x86/vm/interpreter_x86_64.cpp
浏览文件 @
49a6ff0d
...
...
@@ -277,12 +277,11 @@ address InterpreterGenerator::generate_abstract_entry(void) {
address
entry_point
=
__
pc
();
// abstract method entry
// remove return address. Not really needed, since exception
// handling throws away expression stack
__
pop
(
rbx
);
// adjust stack to what a normal return would do
__
mov
(
rsp
,
r13
);
// pop return address, reset last_sp to NULL
__
empty_expression_stack
();
__
restore_bcp
();
// rsi must be correct for exception handler (was destroyed)
__
restore_locals
();
// make sure locals pointer is correct as well (was destroyed)
// throw exception
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
...
...
@@ -300,7 +299,10 @@ address InterpreterGenerator::generate_method_handle_entry(void) {
if
(
!
EnableMethodHandles
)
{
return
generate_abstract_entry
();
}
return
generate_abstract_entry
();
//6815692//
address
entry_point
=
MethodHandles
::
generate_method_handle_interpreter_entry
(
_masm
);
return
entry_point
;
}
...
...
src/cpu/x86/vm/methodHandles_x86.cpp
浏览文件 @
49a6ff0d
...
...
@@ -448,7 +448,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
rbx_index
,
Address
::
times_ptr
,
base
+
vtableEntry
::
method_offset_in_bytes
());
Register
rbx_method
=
rbx_temp
;
__
mov
l
(
rbx_method
,
vtable_entry_addr
);
__
mov
ptr
(
rbx_method
,
vtable_entry_addr
);
__
verify_oop
(
rbx_method
);
__
jmp
(
rbx_method_fie
);
...
...
src/cpu/x86/vm/stubGenerator_x86_64.cpp
浏览文件 @
49a6ff0d
...
...
@@ -2935,6 +2935,16 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs
();
// generic method handle stubs
if
(
EnableMethodHandles
&&
SystemDictionary
::
MethodHandle_klass
()
!=
NULL
)
{
for
(
MethodHandles
::
EntryKind
ek
=
MethodHandles
::
_EK_FIRST
;
ek
<
MethodHandles
::
_EK_LIMIT
;
ek
=
MethodHandles
::
EntryKind
(
1
+
(
int
)
ek
))
{
StubCodeMark
mark
(
this
,
"MethodHandle"
,
MethodHandles
::
entry_name
(
ek
));
MethodHandles
::
generate_method_handle_stub
(
_masm
,
ek
);
}
}
}
public:
...
...
src/cpu/x86/vm/templateInterpreter_x86_64.cpp
浏览文件 @
49a6ff0d
...
...
@@ -100,21 +100,26 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
return
entry
;
}
// Arguments are: required type
in rarg1, failing object (or NULL) in rarg2
// Arguments are: required type
at TOS+8, failing object (or NULL) at TOS+4.
address
TemplateInterpreterGenerator
::
generate_WrongMethodType_handler
()
{
address
entry
=
__
pc
();
__
pop
(
c_rarg2
);
// failing object is at TOS
__
pop
(
c_rarg1
);
// required type is at TOS+8
// expression stack must be empty before entering the VM if an
// exception happened
__
verify_oop
(
c_rarg1
);
__
verify_oop
(
c_rarg2
);
// Various method handle types use interpreter registers as temps.
__
restore_bcp
();
__
restore_locals
();
// Expression stack must be empty before entering the VM for an exception.
__
empty_expression_stack
();
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
throw_WrongMethodTypeException
),
InterpreterRuntime
::
throw_WrongMethodTypeException
),
// pass required type, failing object (or NULL)
c_rarg1
,
c_rarg2
);
return
entry
;
...
...
@@ -182,15 +187,29 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__
restore_bcp
();
__
restore_locals
();
__
get_cache_and_index_at_bcp
(
rbx
,
rcx
,
1
);
Label
L_got_cache
,
L_giant_index
;
if
(
EnableInvokeDynamic
)
{
__
cmpb
(
Address
(
r13
,
0
),
Bytecodes
::
_invokedynamic
);
__
jcc
(
Assembler
::
equal
,
L_giant_index
);
}
__
get_cache_and_index_at_bcp
(
rbx
,
rcx
,
1
,
false
);
__
bind
(
L_got_cache
);
__
movl
(
rbx
,
Address
(
rbx
,
rcx
,
Address
::
times_
8
,
Address
::
times_
ptr
,
in_bytes
(
constantPoolCacheOopDesc
::
base_offset
())
+
3
*
wordSize
));
__
andl
(
rbx
,
0xFF
);
if
(
TaggedStackInterpreter
)
__
shll
(
rbx
,
1
);
// 2 slots per parameter.
__
lea
(
rsp
,
Address
(
rsp
,
rbx
,
Address
::
times_8
));
__
dispatch_next
(
state
,
step
);
// out of the main line of code...
if
(
EnableInvokeDynamic
)
{
__
bind
(
L_giant_index
);
__
get_cache_and_index_at_bcp
(
rbx
,
rcx
,
1
,
true
);
__
jmp
(
L_got_cache
);
}
return
entry
;
}
...
...
src/cpu/x86/vm/templateTable_x86_32.cpp
浏览文件 @
49a6ff0d
...
...
@@ -3146,7 +3146,6 @@ void TemplateTable::invokedynamic(int byte_no) {
__
profile_call
(
rsi
);
}
Label
handle_unlinked_site
;
__
movptr
(
rcx
,
Address
(
rax
,
__
delayed_value
(
java_dyn_CallSite
::
target_offset_in_bytes
,
rcx
)));
__
null_check
(
rcx
);
__
prepare_to_jump_from_interpreted
();
...
...
src/cpu/x86/vm/templateTable_x86_64.cpp
浏览文件 @
49a6ff0d
...
...
@@ -203,18 +203,15 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
__
jcc
(
Assembler
::
notEqual
,
fast_patch
);
__
get_method
(
scratch
);
// Let breakpoint table handling rewrite to quicker bytecode
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
scratch
,
r13
,
bc
);
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
scratch
,
r13
,
bc
);
#ifndef ASSERT
__
jmpb
(
patch_done
);
__
bind
(
fast_patch
);
}
#else
__
jmp
(
patch_done
);
#endif
__
bind
(
fast_patch
);
}
#ifdef ASSERT
Label
okay
;
__
load_unsigned_byte
(
scratch
,
at_bcp
(
0
));
__
cmpl
(
scratch
,
(
int
)
Bytecodes
::
java_code
(
bytecode
));
...
...
@@ -2054,26 +2051,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
}
void
TemplateTable
::
resolve_cache_and_index
(
int
byte_no
,
Register
Rcache
,
Register
index
)
{
void
TemplateTable
::
resolve_cache_and_index
(
int
byte_no
,
Register
Rcache
,
Register
index
)
{
assert
(
byte_no
==
1
||
byte_no
==
2
,
"byte_no out of range"
);
bool
is_invokedynamic
=
(
bytecode
()
==
Bytecodes
::
_invokedynamic
);
const
Register
temp
=
rbx
;
assert_different_registers
(
Rcache
,
index
,
temp
);
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
Label
resolved
;
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
);
__
movl
(
temp
,
Address
(
Rcache
,
index
,
Address
::
times_8
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
__
shrl
(
temp
,
shift_count
);
// have we resolved this bytecode?
__
andl
(
temp
,
0xFF
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
__
jcc
(
Assembler
::
equal
,
resolved
);
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
is_invokedynamic
);
if
(
is_invokedynamic
)
{
// we are resolved if the f1 field contains a non-null CallSite object
__
cmpptr
(
Address
(
Rcache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
f1_offset
()),
(
int32_t
)
NULL_WORD
);
__
jcc
(
Assembler
::
notEqual
,
resolved
);
}
else
{
__
movl
(
temp
,
Address
(
Rcache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
__
shrl
(
temp
,
shift_count
);
// have we resolved this bytecode?
__
andl
(
temp
,
0xFF
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
__
jcc
(
Assembler
::
equal
,
resolved
);
}
// resolve first time through
address
entry
;
...
...
@@ -2090,6 +2089,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case
Bytecodes
::
_invokeinterface
:
entry
=
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
resolve_invoke
);
break
;
case
Bytecodes
::
_invokedynamic
:
entry
=
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
resolve_invokedynamic
);
break
;
default:
ShouldNotReachHere
();
break
;
...
...
@@ -2098,7 +2100,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__
call_VM
(
noreg
,
entry
,
temp
);
// Update registers with resolved info
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
);
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
is_invokedynamic
);
__
bind
(
resolved
);
}
...
...
@@ -2832,15 +2834,14 @@ void TemplateTable::count_calls(Register method, Register temp) {
ShouldNotReachHere
();
}
void
TemplateTable
::
prepare_invoke
(
Register
method
,
Register
index
,
int
byte_no
,
Bytecodes
::
Code
code
)
{
void
TemplateTable
::
prepare_invoke
(
Register
method
,
Register
index
,
int
byte_no
)
{
// determine flags
Bytecodes
::
Code
code
=
bytecode
();
const
bool
is_invokeinterface
=
code
==
Bytecodes
::
_invokeinterface
;
const
bool
is_invokedynamic
=
code
==
Bytecodes
::
_invokedynamic
;
const
bool
is_invokevirtual
=
code
==
Bytecodes
::
_invokevirtual
;
const
bool
is_invokespecial
=
code
==
Bytecodes
::
_invokespecial
;
const
bool
load_receiver
=
code
!=
Bytecodes
::
_invokestatic
;
const
bool
load_receiver
=
(
code
!=
Bytecodes
::
_invokestatic
&&
code
!=
Bytecodes
::
_invokedynamic
)
;
const
bool
receiver_null_check
=
is_invokespecial
;
const
bool
save_flags
=
is_invokeinterface
||
is_invokevirtual
;
// setup registers & access constant pool cache
...
...
@@ -2858,9 +2859,13 @@ void TemplateTable::prepare_invoke(Register method,
__
movl
(
recv
,
flags
);
__
andl
(
recv
,
0xFF
);
if
(
TaggedStackInterpreter
)
__
shll
(
recv
,
1
);
// index*2
__
movptr
(
recv
,
Address
(
rsp
,
recv
,
Address
::
times_8
,
-
Interpreter
::
expr_offset_in_bytes
(
1
)));
__
verify_oop
(
recv
);
Address
recv_addr
(
rsp
,
recv
,
Address
::
times_8
,
-
Interpreter
::
expr_offset_in_bytes
(
1
));
if
(
is_invokedynamic
)
{
__
lea
(
recv
,
recv_addr
);
}
else
{
__
movptr
(
recv
,
recv_addr
);
__
verify_oop
(
recv
);
}
}
// do null check if needed
...
...
@@ -2878,10 +2883,14 @@ void TemplateTable::prepare_invoke(Register method,
ConstantPoolCacheEntry
::
verify_tosBits
();
// load return address
{
ExternalAddress
return_5
((
address
)
Interpreter
::
return_5_addrs_by_index_table
());
ExternalAddress
return_3
((
address
)
Interpreter
::
return_3_addrs_by_index_table
());
__
lea
(
rscratch1
,
(
is_invokeinterface
?
return_5
:
return_3
));
__
movptr
(
flags
,
Address
(
rscratch1
,
flags
,
Address
::
times_8
));
address
table_addr
;
if
(
is_invokeinterface
||
is_invokedynamic
)
table_addr
=
(
address
)
Interpreter
::
return_5_addrs_by_index_table
();
else
table_addr
=
(
address
)
Interpreter
::
return_3_addrs_by_index_table
();
ExternalAddress
table
(
table_addr
);
__
lea
(
rscratch1
,
table
);
__
movptr
(
flags
,
Address
(
rscratch1
,
flags
,
Address
::
times_ptr
));
}
// push return address
...
...
@@ -2947,7 +2956,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void
TemplateTable
::
invokevirtual
(
int
byte_no
)
{
transition
(
vtos
,
vtos
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
,
bytecode
()
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
);
// rbx: index
// rcx: receiver
...
...
@@ -2959,7 +2968,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void
TemplateTable
::
invokespecial
(
int
byte_no
)
{
transition
(
vtos
,
vtos
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
,
bytecode
()
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
);
// do the call
__
verify_oop
(
rbx
);
__
profile_call
(
rax
);
...
...
@@ -2969,7 +2978,7 @@ void TemplateTable::invokespecial(int byte_no) {
void
TemplateTable
::
invokestatic
(
int
byte_no
)
{
transition
(
vtos
,
vtos
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
,
bytecode
()
);
prepare_invoke
(
rbx
,
noreg
,
byte_no
);
// do the call
__
verify_oop
(
rbx
);
__
profile_call
(
rax
);
...
...
@@ -2983,7 +2992,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void
TemplateTable
::
invokeinterface
(
int
byte_no
)
{
transition
(
vtos
,
vtos
);
prepare_invoke
(
rax
,
rbx
,
byte_no
,
bytecode
()
);
prepare_invoke
(
rax
,
rbx
,
byte_no
);
// rax: Interface
// rbx: index
...
...
@@ -3072,7 +3081,24 @@ void TemplateTable::invokedynamic(int byte_no) {
return
;
}
__
stop
(
"invokedynamic NYI"
);
//6815692//
prepare_invoke
(
rax
,
rbx
,
byte_no
);
// rax: CallSite object (f1)
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
if
(
ProfileInterpreter
)
{
Label
L
;
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__
profile_call
(
r13
);
}
__
movptr
(
rcx
,
Address
(
rax
,
__
delayed_value
(
java_dyn_CallSite
::
target_offset_in_bytes
,
rcx
)));
__
null_check
(
rcx
);
__
prepare_to_jump_from_interpreted
();
__
jump_to_method_handle_entry
(
rcx
,
rdx
);
}
...
...
src/cpu/x86/vm/templateTable_x86_64.hpp
浏览文件 @
49a6ff0d
...
...
@@ -22,8 +22,7 @@
*
*/
static
void
prepare_invoke
(
Register
method
,
Register
index
,
int
byte_no
,
Bytecodes
::
Code
code
);
static
void
prepare_invoke
(
Register
method
,
Register
index
,
int
byte_no
);
static
void
invokevirtual_helper
(
Register
index
,
Register
recv
,
Register
flags
);
static
void
volatile_barrier
(
Assembler
::
Membar_mask_bits
order_constraint
);
...
...
src/share/vm/classfile/classFileParser.cpp
浏览文件 @
49a6ff0d
...
...
@@ -2511,23 +2511,12 @@ void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
fac_ptr
->
nonstatic_byte_count
-=
1
;
(
*
fields_ptr
)
->
ushort_at_put
(
i
+
instanceKlass
::
signature_index_offset
,
word_sig_index
);
if
(
wordSize
==
jintSize
)
{
fac_ptr
->
nonstatic_word_count
+=
1
;
}
else
{
fac_ptr
->
nonstatic_double_count
+=
1
;
}
fac_ptr
->
nonstatic_word_count
+=
1
;
FieldAllocationType
atype
=
(
FieldAllocationType
)
(
*
fields_ptr
)
->
ushort_at
(
i
+
4
);
FieldAllocationType
atype
=
(
FieldAllocationType
)
(
*
fields_ptr
)
->
ushort_at
(
i
+
instanceKlass
::
low_offset
);
assert
(
atype
==
NONSTATIC_BYTE
,
""
);
FieldAllocationType
new_atype
=
NONSTATIC_WORD
;
if
(
wordSize
>
jintSize
)
{
if
(
Universe
::
field_type_should_be_aligned
(
T_LONG
))
{
atype
=
NONSTATIC_ALIGNED_DOUBLE
;
}
else
{
atype
=
NONSTATIC_DOUBLE
;
}
}
(
*
fields_ptr
)
->
ushort_at_put
(
i
+
4
,
new_atype
);
(
*
fields_ptr
)
->
ushort_at_put
(
i
+
instanceKlass
::
low_offset
,
new_atype
);
found_vmentry
=
true
;
break
;
...
...
@@ -3085,7 +3074,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
int
len
=
fields
->
length
();
for
(
int
i
=
0
;
i
<
len
;
i
+=
instanceKlass
::
next_offset
)
{
int
real_offset
;
FieldAllocationType
atype
=
(
FieldAllocationType
)
fields
->
ushort_at
(
i
+
4
);
FieldAllocationType
atype
=
(
FieldAllocationType
)
fields
->
ushort_at
(
i
+
instanceKlass
::
low_offset
);
switch
(
atype
)
{
case
STATIC_OOP
:
real_offset
=
next_static_oop_offset
;
...
...
@@ -3173,8 +3162,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
default:
ShouldNotReachHere
();
}
fields
->
short_at_put
(
i
+
4
,
extract_low_short_from_int
(
real_offset
)
);
fields
->
short_at_put
(
i
+
5
,
extract_high_short_from_int
(
real_offset
)
);
fields
->
short_at_put
(
i
+
instanceKlass
::
low_offset
,
extract_low_short_from_int
(
real_offset
)
);
fields
->
short_at_put
(
i
+
instanceKlass
::
high_offset
,
extract_high_short_from_int
(
real_offset
)
);
}
// Size of instances
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
49a6ff0d
...
...
@@ -414,9 +414,8 @@ int nmethod::total_size() const {
}
const
char
*
nmethod
::
compile_kind
()
const
{
if
(
method
()
==
NULL
)
return
"unloaded"
;
if
(
is_native_method
())
return
"c2n"
;
if
(
is_osr_method
())
return
"osr"
;
if
(
method
()
!=
NULL
&&
is_native_method
())
return
"c2n"
;
return
NULL
;
}
...
...
@@ -1127,6 +1126,9 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
}
flags
.
state
=
unloaded
;
// Log the unloading.
log_state_change
();
// The methodOop is gone at this point
assert
(
_method
==
NULL
,
"Tautology"
);
...
...
@@ -1137,8 +1139,6 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
void
nmethod
::
invalidate_osr_method
()
{
assert
(
_entry_bci
!=
InvocationEntryBci
,
"wrong kind of nmethod"
);
if
(
_entry_bci
!=
InvalidOSREntryBci
)
inc_decompile_count
();
// Remove from list of active nmethods
if
(
method
()
!=
NULL
)
instanceKlass
::
cast
(
method
()
->
method_holder
())
->
remove_osr_nmethod
(
this
);
...
...
@@ -1146,59 +1146,63 @@ void nmethod::invalidate_osr_method() {
_entry_bci
=
InvalidOSREntryBci
;
}
void
nmethod
::
log_state_change
(
int
state
)
const
{
void
nmethod
::
log_state_change
()
const
{
if
(
LogCompilation
)
{
if
(
xtty
!=
NULL
)
{
ttyLocker
ttyl
;
// keep the following output all in one block
xtty
->
begin_elem
(
"make_not_entrant %sthread='"
UINTX_FORMAT
"'"
,
(
state
==
zombie
?
"zombie='1' "
:
""
),
os
::
current_thread_id
());
if
(
flags
.
state
==
unloaded
)
{
xtty
->
begin_elem
(
"make_unloaded thread='"
UINTX_FORMAT
"'"
,
os
::
current_thread_id
());
}
else
{
xtty
->
begin_elem
(
"make_not_entrant thread='"
UINTX_FORMAT
"'%s"
,
os
::
current_thread_id
(),
(
flags
.
state
==
zombie
?
" zombie='1'"
:
""
));
}
log_identity
(
xtty
);
xtty
->
stamp
();
xtty
->
end_elem
();
}
}
if
(
PrintCompilation
)
{
print_on
(
tty
,
state
==
zombie
?
"made zombie "
:
"made not entrant "
);
if
(
PrintCompilation
&&
flags
.
state
!=
unloaded
)
{
print_on
(
tty
,
flags
.
state
==
zombie
?
"made zombie "
:
"made not entrant "
);
tty
->
cr
();
}
}
// Common functionality for both make_not_entrant and make_zombie
void
nmethod
::
make_not_entrant_or_zombie
(
int
state
)
{
bool
nmethod
::
make_not_entrant_or_zombie
(
int
state
)
{
assert
(
state
==
zombie
||
state
==
not_entrant
,
"must be zombie or not_entrant"
);
// Code for an on-stack-replacement nmethod is removed when a class gets unloaded.
// They never become zombie/non-entrant, so the nmethod sweeper will never remove
// them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod
// will never be used anymore. That the nmethods only gets removed when class unloading
// happens, make life much simpler, since the nmethods are not just going to disappear
// out of the blue.
if
(
is_osr_method
())
{
if
(
osr_entry_bci
()
!=
InvalidOSREntryBci
)
{
// only log this once
log_state_change
(
state
);
}
invalidate_osr_method
();
return
;
}
// If the method is already zombie or set to the state we want, nothing to do
if
(
is_zombie
()
||
(
state
==
not_entrant
&&
is_not_entrant
()))
{
return
;
// If the method is already zombie there is nothing to do
if
(
is_zombie
())
{
return
false
;
}
log_state_change
(
state
);
// Make sure the nmethod is not flushed in case of a safepoint in code below.
nmethodLocker
nml
(
this
);
{
// invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock.
// This logic is equivalent to the logic below for patching the
// verified entry point of regular methods.
if
(
is_osr_method
())
{
// this effectively makes the osr nmethod not entrant
invalidate_osr_method
();
}
// Enter critical section. Does not block for safepoint.
MutexLockerEx
pl
(
Patching_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
flags
.
state
==
state
)
{
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return
false
;
}
// The caller can be calling the method statically or through an inline
// cache call.
if
(
!
is_not_entrant
())
{
if
(
!
is_
osr_method
()
&&
!
is_
not_entrant
())
{
NativeJump
::
patch_verified_entry
(
entry_point
(),
verified_entry_point
(),
SharedRuntime
::
get_handle_wrong_method_stub
());
assert
(
NativeJump
::
instruction_size
==
nmethod
::
_zombie_instruction_size
,
""
);
...
...
@@ -1217,6 +1221,10 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// Change state
flags
.
state
=
state
;
// Log the transition once
log_state_change
();
}
// leave critical region under Patching_lock
if
(
state
==
not_entrant
)
{
...
...
@@ -1240,7 +1248,6 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// It's a true state change, so mark the method as decompiled.
inc_decompile_count
();
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
...
...
@@ -1268,7 +1275,7 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// Check whether method got unloaded at a safepoint before this,
// if so we can skip the flushing steps below
if
(
method
()
==
NULL
)
return
;
if
(
method
()
==
NULL
)
return
true
;
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
...
...
@@ -1282,6 +1289,8 @@ void nmethod::make_not_entrant_or_zombie(int state) {
HandleMark
hm
;
method
()
->
clear_code
();
}
return
true
;
}
...
...
src/share/vm/code/nmethod.hpp
浏览文件 @
49a6ff0d
...
...
@@ -252,7 +252,9 @@ class nmethod : public CodeBlob {
void
*
operator
new
(
size_t
size
,
int
nmethod_size
);
const
char
*
reloc_string_for
(
u_char
*
begin
,
u_char
*
end
);
void
make_not_entrant_or_zombie
(
int
state
);
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
bool
make_not_entrant_or_zombie
(
int
state
);
void
inc_decompile_count
();
// used to check that writes to nmFlags are done consistently.
...
...
@@ -375,10 +377,12 @@ class nmethod : public CodeBlob {
bool
is_zombie
()
const
{
return
flags
.
state
==
zombie
;
}
bool
is_unloaded
()
const
{
return
flags
.
state
==
unloaded
;
}
// Make the nmethod non entrant. The nmethod will continue to be alive.
// It is used when an uncommon trap happens.
void
make_not_entrant
()
{
make_not_entrant_or_zombie
(
not_entrant
);
}
void
make_zombie
()
{
make_not_entrant_or_zombie
(
zombie
);
}
// Make the nmethod non entrant. The nmethod will continue to be
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
bool
make_not_entrant
()
{
return
make_not_entrant_or_zombie
(
not_entrant
);
}
bool
make_zombie
()
{
return
make_not_entrant_or_zombie
(
zombie
);
}
// used by jvmti to track if the unload event has been reported
bool
unload_reported
()
{
return
_unload_reported
;
}
...
...
@@ -563,7 +567,7 @@ class nmethod : public CodeBlob {
// Logging
void
log_identity
(
xmlStream
*
log
)
const
;
void
log_new_nmethod
()
const
;
void
log_state_change
(
int
state
)
const
;
void
log_state_change
()
const
;
// Prints a comment for one native instruction (reloc info, pc desc)
void
print_code_comment_on
(
outputStream
*
st
,
int
column
,
address
begin
,
address
end
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录