Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
af93316f
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
af93316f
编写于
8月 16, 2011
作者:
T
twisti
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
7071653: JSR 292: call site change notification should be pushed not pulled
Reviewed-by: kvn, never, bdelsart
上级
46aea118
变更
27
隐藏空白更改
内联
并排
Showing
27 changed file
with
909 addition
and
529 deletion
+909
-529
src/cpu/sparc/vm/interp_masm_sparc.cpp
src/cpu/sparc/vm/interp_masm_sparc.cpp
+14
-0
src/cpu/sparc/vm/interp_masm_sparc.hpp
src/cpu/sparc/vm/interp_masm_sparc.hpp
+1
-0
src/cpu/sparc/vm/templateTable_sparc.cpp
src/cpu/sparc/vm/templateTable_sparc.cpp
+158
-120
src/cpu/x86/vm/interp_masm_x86_32.cpp
src/cpu/x86/vm/interp_masm_x86_32.cpp
+15
-1
src/cpu/x86/vm/interp_masm_x86_32.hpp
src/cpu/x86/vm/interp_masm_x86_32.hpp
+1
-0
src/cpu/x86/vm/interp_masm_x86_64.cpp
src/cpu/x86/vm/interp_masm_x86_64.cpp
+17
-1
src/cpu/x86/vm/interp_masm_x86_64.hpp
src/cpu/x86/vm/interp_masm_x86_64.hpp
+3
-5
src/cpu/x86/vm/templateTable_x86_32.cpp
src/cpu/x86/vm/templateTable_x86_32.cpp
+164
-124
src/cpu/x86/vm/templateTable_x86_64.cpp
src/cpu/x86/vm/templateTable_x86_64.cpp
+132
-86
src/share/vm/ci/ciCallSite.cpp
src/share/vm/ci/ciCallSite.cpp
+10
-0
src/share/vm/ci/ciCallSite.hpp
src/share/vm/ci/ciCallSite.hpp
+4
-0
src/share/vm/ci/ciField.hpp
src/share/vm/ci/ciField.hpp
+3
-1
src/share/vm/classfile/systemDictionary.cpp
src/share/vm/classfile/systemDictionary.cpp
+1
-1
src/share/vm/classfile/systemDictionary.hpp
src/share/vm/classfile/systemDictionary.hpp
+14
-11
src/share/vm/classfile/vmSymbols.hpp
src/share/vm/classfile/vmSymbols.hpp
+3
-0
src/share/vm/code/dependencies.cpp
src/share/vm/code/dependencies.cpp
+153
-96
src/share/vm/code/dependencies.hpp
src/share/vm/code/dependencies.hpp
+109
-54
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+1
-0
src/share/vm/interpreter/interpreterRuntime.cpp
src/share/vm/interpreter/interpreterRuntime.cpp
+18
-3
src/share/vm/interpreter/templateTable.hpp
src/share/vm/interpreter/templateTable.hpp
+2
-2
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+32
-1
src/share/vm/memory/universe.hpp
src/share/vm/memory/universe.hpp
+1
-0
src/share/vm/oops/instanceKlass.cpp
src/share/vm/oops/instanceKlass.cpp
+1
-1
src/share/vm/opto/callGenerator.cpp
src/share/vm/opto/callGenerator.cpp
+29
-0
src/share/vm/opto/callGenerator.hpp
src/share/vm/opto/callGenerator.hpp
+3
-2
src/share/vm/opto/doCall.cpp
src/share/vm/opto/doCall.cpp
+12
-20
src/share/vm/opto/parse3.cpp
src/share/vm/opto/parse3.cpp
+8
-0
未找到文件。
src/cpu/sparc/vm/interp_masm_sparc.cpp
浏览文件 @
af93316f
...
...
@@ -758,6 +758,20 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
}
void
InterpreterMacroAssembler
::
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
temp
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
)
{
get_cache_and_index_at_bcp
(
cache
,
temp
,
bcp_offset
,
index_size
);
ld_ptr
(
cache
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
(),
bytecode
);
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
srl
(
bytecode
,
shift_count
,
bytecode
);
and3
(
bytecode
,
0xFF
,
bytecode
);
}
void
InterpreterMacroAssembler
::
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
)
{
assert
(
bcp_offset
>
0
,
"bcp is still pointing to start of bytecode"
);
...
...
src/cpu/sparc/vm/interp_masm_sparc.hpp
浏览文件 @
af93316f
...
...
@@ -189,6 +189,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
setCCOrNot
should_set_CC
=
dont_set_CC
);
void
get_cache_and_index_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
temp
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_index_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
...
...
src/cpu/sparc/vm/templateTable_sparc.cpp
浏览文件 @
af93316f
...
...
@@ -149,36 +149,68 @@ Address TemplateTable::at_bcp(int offset) {
}
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
Rbyte_code
,
Register
Rscratch
,
bool
load_bc_into_scratch
/*=true*/
)
{
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
bc_reg
,
Register
temp_reg
,
bool
load_bc_into_bc_reg
/*=true*/
,
int
byte_no
)
{
// With sharing on, may need to test methodOop flag.
if
(
!
RewriteBytecodes
)
return
;
if
(
load_bc_into_scratch
)
__
set
(
bc
,
Rbyte_code
);
Label
patch_done
;
if
(
!
RewriteBytecodes
)
return
;
Label
L_patch_done
;
switch
(
bc
)
{
case
Bytecodes
::
_fast_aputfield
:
case
Bytecodes
::
_fast_bputfield
:
case
Bytecodes
::
_fast_cputfield
:
case
Bytecodes
::
_fast_dputfield
:
case
Bytecodes
::
_fast_fputfield
:
case
Bytecodes
::
_fast_iputfield
:
case
Bytecodes
::
_fast_lputfield
:
case
Bytecodes
::
_fast_sputfield
:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
load_bc_into_bc_reg
,
"we use bc_reg as temp"
);
__
get_cache_and_index_and_bytecode_at_bcp
(
bc_reg
,
temp_reg
,
temp_reg
,
byte_no
,
1
);
__
set
(
bc
,
bc_reg
);
__
cmp_and_br_short
(
temp_reg
,
0
,
Assembler
::
equal
,
Assembler
::
pn
,
L_patch_done
);
// don't patch
}
break
;
default:
assert
(
byte_no
==
-
1
,
"sanity"
);
if
(
load_bc_into_bc_reg
)
{
__
set
(
bc
,
bc_reg
);
}
}
if
(
JvmtiExport
::
can_post_breakpoint
())
{
Label
fast_patch
;
__
ldub
(
at_bcp
(
0
),
Rscratch
);
__
cmp_and_br_short
(
Rscratch
,
Bytecodes
::
_breakpoint
,
Assembler
::
notEqual
,
Assembler
::
pt
,
fast_patch
);
Label
L_
fast_patch
;
__
ldub
(
at_bcp
(
0
),
temp_reg
);
__
cmp_and_br_short
(
temp_reg
,
Bytecodes
::
_breakpoint
,
Assembler
::
notEqual
,
Assembler
::
pt
,
L_
fast_patch
);
// perform the quickening, slowly, in the bowels of the breakpoint table
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
Lmethod
,
Lbcp
,
Rbyte_code
);
__
ba_short
(
patch_done
);
__
bind
(
fast_patch
);
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
Lmethod
,
Lbcp
,
bc_reg
);
__
ba_short
(
L_
patch_done
);
__
bind
(
L_
fast_patch
);
}
#ifdef ASSERT
Bytecodes
::
Code
orig_bytecode
=
Bytecodes
::
java_code
(
bc
);
Label
okay
;
__
ldub
(
at_bcp
(
0
),
Rscratch
);
__
cmp
(
Rscratch
,
orig_bytecode
);
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
okay
);
__
delayed
()
->
cmp
(
Rscratch
,
Rbyte_code
);
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
okay
);
Label
L_
okay
;
__
ldub
(
at_bcp
(
0
),
temp_reg
);
__
cmp
(
temp_reg
,
orig_bytecode
);
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
L_
okay
);
__
delayed
()
->
cmp
(
temp_reg
,
bc_reg
);
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
L_
okay
);
__
delayed
()
->
nop
();
__
stop
(
"
Rewriting wrong bytecode location
"
);
__
bind
(
okay
);
__
stop
(
"
patching the wrong bytecode
"
);
__
bind
(
L_
okay
);
#endif
__
stb
(
Rbyte_code
,
at_bcp
(
0
));
__
bind
(
patch_done
);
// patch bytecode
__
stb
(
bc_reg
,
at_bcp
(
0
));
__
bind
(
L_patch_done
);
}
//----------------------------------------------------------------------------------------------------
...
...
@@ -2061,12 +2093,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
// Depends on cpCacheOop layout!
Label
resolved
;
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
if
(
byte_no
==
f1_oop
)
{
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert_different_registers
(
result
,
Rcache
);
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
__
ld_ptr
(
Rcache
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
f1_offset
(),
result
);
__
tst
(
result
);
...
...
@@ -2075,15 +2107,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
}
else
{
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
result
==
noreg
,
""
);
//else change code for setting result
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
__
ld_ptr
(
Rcache
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
(),
Lbyte_code
);
__
srl
(
Lbyte_code
,
shift_count
,
Lbyte_code
);
__
and3
(
Lbyte_code
,
0xFF
,
Lbyte_code
);
__
cmp
(
Lbyte_code
,
(
int
)
bytecode
());
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
resolved
);
__
get_cache_and_index_and_bytecode_at_bcp
(
Rcache
,
index
,
Lbyte_code
,
byte_no
,
1
,
index_size
);
__
cmp
(
Lbyte_code
,
(
int
)
bytecode
());
// have we resolved this bytecode?
__
br
(
Assembler
::
equal
,
false
,
Assembler
::
pt
,
resolved
);
__
delayed
()
->
set
((
int
)
bytecode
(),
O1
);
}
...
...
@@ -2618,150 +2644,162 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if
(
is_static
)
{
// putstatic with object type most likely, check that first
__
cmp
(
Rflags
,
atos
);
__
cmp
(
Rflags
,
atos
);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notObj
);
__
delayed
()
->
cmp
(
Rflags
,
itos
);
__
delayed
()
->
cmp
(
Rflags
,
itos
);
// atos
__
pop_ptr
();
__
verify_oop
(
Otos_i
);
do_oop_store
(
_masm
,
Rclass
,
Roffset
,
0
,
Otos_i
,
G1_scratch
,
_bs
->
kind
(),
false
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
{
__
pop_ptr
(
);
__
verify_oop
(
Otos_i
);
do_oop_store
(
_masm
,
Rclass
,
Roffset
,
0
,
Otos_i
,
G1_scratch
,
_bs
->
kind
(),
false
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
bind
(
notObj
);
// cmp(Rflags, itos );
// cmp(Rflags, itos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notInt
);
__
delayed
()
->
cmp
(
Rflags
,
btos
);
__
delayed
()
->
cmp
(
Rflags
,
btos
);
// itos
__
pop_i
();
__
st
(
Otos_i
,
Rclass
,
Roffset
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
{
__
pop_i
();
__
st
(
Otos_i
,
Rclass
,
Roffset
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
bind
(
notInt
);
}
else
{
// putfield with int type most likely, check that first
__
cmp
(
Rflags
,
itos
);
__
cmp
(
Rflags
,
itos
);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notInt
);
__
delayed
()
->
cmp
(
Rflags
,
atos
);
__
delayed
()
->
cmp
(
Rflags
,
atos
);
// itos
__
pop_i
();
pop_and_check_object
(
Rclass
);
__
st
(
Otos_i
,
Rclass
,
Roffset
);
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
G3_scratch
,
G4_scratch
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
{
__
pop_i
();
pop_and_check_object
(
Rclass
);
__
st
(
Otos_i
,
Rclass
,
Roffset
);
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
bind
(
notInt
);
// cmp(Rflags, atos
);
// cmp(Rflags, atos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notObj
);
__
delayed
()
->
cmp
(
Rflags
,
btos
);
__
delayed
()
->
cmp
(
Rflags
,
btos
);
// atos
__
pop_ptr
();
pop_and_check_object
(
Rclass
);
__
verify_oop
(
Otos_i
);
do_oop_store
(
_masm
,
Rclass
,
Roffset
,
0
,
Otos_i
,
G1_scratch
,
_bs
->
kind
(),
false
);
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
G3_scratch
,
G4_scratch
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
{
__
pop_ptr
(
);
pop_and_check_object
(
Rclass
);
__
verify_oop
(
Otos_i
);
do_oop_store
(
_masm
,
Rclass
,
Roffset
,
0
,
Otos_i
,
G1_scratch
,
_bs
->
kind
(),
false
);
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
bind
(
notObj
);
}
// cmp(Rflags, btos
);
// cmp(Rflags, btos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notByte
);
__
delayed
()
->
cmp
(
Rflags
,
ltos
);
__
delayed
()
->
cmp
(
Rflags
,
ltos
);
// btos
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stb
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
G3_scratch
,
G4_scratch
);
{
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stb
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
__
bind
(
notByte
);
// cmp(Rflags, ltos );
// cmp(Rflags, ltos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notLong
);
__
delayed
()
->
cmp
(
Rflags
,
ctos
);
__
delayed
()
->
cmp
(
Rflags
,
ctos
);
// ltos
__
pop_l
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
st_long
(
Otos_l
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
G3_scratch
,
G4_scratch
);
{
__
pop_l
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
st_long
(
Otos_l
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
__
bind
(
notLong
);
// cmp(Rflags, ctos );
// cmp(Rflags, ctos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notChar
);
__
delayed
()
->
cmp
(
Rflags
,
stos
);
__
delayed
()
->
cmp
(
Rflags
,
stos
);
// ctos (char)
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
sth
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
G3_scratch
,
G4_scratch
);
{
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
sth
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
__
bind
(
notChar
);
// cmp(Rflags, stos
);
// cmp(Rflags, stos);
__
br
(
Assembler
::
notEqual
,
false
,
Assembler
::
pt
,
notShort
);
__
delayed
()
->
cmp
(
Rflags
,
ftos
);
__
delayed
()
->
cmp
(
Rflags
,
ftos
);
// stos (char)
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
sth
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
G3_scratch
,
G4_scratch
);
// stos (short)
{
__
pop_i
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
sth
(
Otos_i
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
__
bind
(
notShort
);
// cmp(Rflags, ftos
);
// cmp(Rflags, ftos);
__
br
(
Assembler
::
notZero
,
false
,
Assembler
::
pt
,
notFloat
);
__
delayed
()
->
nop
();
// ftos
__
pop_f
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stf
(
FloatRegisterImpl
::
S
,
Ftos_f
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
G3_scratch
,
G4_scratch
);
{
__
pop_f
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stf
(
FloatRegisterImpl
::
S
,
Ftos_f
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
}
__
ba
(
checkVolatile
);
__
delayed
()
->
tst
(
Lscratch
);
__
bind
(
notFloat
);
// dtos
__
pop_d
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stf
(
FloatRegisterImpl
::
D
,
Ftos_d
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
G3_scratch
,
G4_scratch
);
{
__
pop_d
();
if
(
!
is_static
)
pop_and_check_object
(
Rclass
);
__
stf
(
FloatRegisterImpl
::
D
,
Ftos_d
,
Rclass
,
Roffset
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
G3_scratch
,
G4_scratch
,
true
,
byte_no
);
}
}
__
bind
(
checkVolatile
);
...
...
src/cpu/x86/vm/interp_masm_x86_32.cpp
浏览文件 @
af93316f
...
...
@@ -233,7 +233,7 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
void
InterpreterMacroAssembler
::
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
,
size_t
index_size
)
{
assert
(
cache
!=
index
,
"must use different registers"
);
assert
_different_registers
(
cache
,
index
);
get_cache_index_at_bcp
(
index
,
bcp_offset
,
index_size
);
movptr
(
cache
,
Address
(
rbp
,
frame
::
interpreter_frame_cache_offset
*
wordSize
));
assert
(
sizeof
(
ConstantPoolCacheEntry
)
==
4
*
wordSize
,
"adjust code below"
);
...
...
@@ -241,6 +241,20 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
}
void
InterpreterMacroAssembler
::
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
index
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
)
{
get_cache_and_index_at_bcp
(
cache
,
index
,
bcp_offset
,
index_size
);
movptr
(
bytecode
,
Address
(
cache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
shrptr
(
bytecode
,
shift_count
);
andptr
(
bytecode
,
0xFF
);
}
void
InterpreterMacroAssembler
::
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
)
{
assert
(
cache
!=
tmp
,
"must use different register"
);
...
...
src/cpu/x86/vm/interp_masm_x86_32.hpp
浏览文件 @
af93316f
...
...
@@ -83,6 +83,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
}
void
get_unsigned_2_byte_index_at_bcp
(
Register
reg
,
int
bcp_offset
);
void
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
index
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_index_at_bcp
(
Register
index
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
...
...
src/cpu/x86/vm/interp_masm_x86_64.cpp
浏览文件 @
af93316f
...
...
@@ -233,7 +233,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register
index
,
int
bcp_offset
,
size_t
index_size
)
{
assert
(
cache
!=
index
,
"must use different registers"
);
assert
_different_registers
(
cache
,
index
);
get_cache_index_at_bcp
(
index
,
bcp_offset
,
index_size
);
movptr
(
cache
,
Address
(
rbp
,
frame
::
interpreter_frame_cache_offset
*
wordSize
));
assert
(
sizeof
(
ConstantPoolCacheEntry
)
==
4
*
wordSize
,
"adjust code below"
);
...
...
@@ -242,6 +242,22 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
}
void
InterpreterMacroAssembler
::
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
index
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
)
{
get_cache_and_index_at_bcp
(
cache
,
index
,
bcp_offset
,
index_size
);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
movl
(
bytecode
,
Address
(
cache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
shrl
(
bytecode
,
shift_count
);
andl
(
bytecode
,
0xFF
);
}
void
InterpreterMacroAssembler
::
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
...
...
src/cpu/x86/vm/interp_masm_x86_64.hpp
浏览文件 @
af93316f
...
...
@@ -100,13 +100,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
}
void
get_unsigned_2_byte_index_at_bcp
(
Register
reg
,
int
bcp_offset
);
void
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_and_index_at_bcp
(
Register
cache
,
Register
index
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_and_index_and_bytecode_at_bcp
(
Register
cache
,
Register
index
,
Register
bytecode
,
int
byte_no
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_entry_pointer_at_bcp
(
Register
cache
,
Register
tmp
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
get_cache_index_at_bcp
(
Register
index
,
int
bcp_offset
,
size_t
index_size
=
sizeof
(
u2
));
void
pop_ptr
(
Register
r
=
rax
);
void
pop_i
(
Register
r
=
rax
);
void
pop_l
(
Register
r
=
rax
);
...
...
src/cpu/x86/vm/templateTable_x86_32.cpp
浏览文件 @
af93316f
...
...
@@ -202,45 +202,74 @@ Address TemplateTable::at_bcp(int offset) {
}
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bytecode
,
Register
bc
,
Register
scratch
,
bool
load_bc_into_scratch
/*=true*/
)
{
if
(
!
RewriteBytecodes
)
return
;
// the pair bytecodes have already done the load.
if
(
load_bc_into_scratch
)
{
__
movl
(
bc
,
bytecode
);
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
bc_reg
,
Register
temp_reg
,
bool
load_bc_into_bc_reg
/*=true*/
,
int
byte_no
)
{
if
(
!
RewriteBytecodes
)
return
;
Label
L_patch_done
;
switch
(
bc
)
{
case
Bytecodes
::
_fast_aputfield
:
case
Bytecodes
::
_fast_bputfield
:
case
Bytecodes
::
_fast_cputfield
:
case
Bytecodes
::
_fast_dputfield
:
case
Bytecodes
::
_fast_fputfield
:
case
Bytecodes
::
_fast_iputfield
:
case
Bytecodes
::
_fast_lputfield
:
case
Bytecodes
::
_fast_sputfield
:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
load_bc_into_bc_reg
,
"we use bc_reg as temp"
);
__
get_cache_and_index_and_bytecode_at_bcp
(
bc_reg
,
temp_reg
,
temp_reg
,
byte_no
,
1
);
__
movl
(
bc_reg
,
bc
);
__
cmpl
(
temp_reg
,
(
int
)
0
);
__
jcc
(
Assembler
::
zero
,
L_patch_done
);
// don't patch
}
break
;
default:
assert
(
byte_no
==
-
1
,
"sanity"
);
// the pair bytecodes have already done the load.
if
(
load_bc_into_bc_reg
)
{
__
movl
(
bc_reg
,
bc
);
}
}
Label
patch_done
;
if
(
JvmtiExport
::
can_post_breakpoint
())
{
Label
fast_patch
;
Label
L_
fast_patch
;
// if a breakpoint is present we can't rewrite the stream directly
__
movzbl
(
scratch
,
at_bcp
(
0
));
__
cmpl
(
scratch
,
Bytecodes
::
_breakpoint
);
__
jcc
(
Assembler
::
notEqual
,
fast_patch
);
__
get_method
(
scratch
);
__
movzbl
(
temp_reg
,
at_bcp
(
0
));
__
cmpl
(
temp_reg
,
Bytecodes
::
_breakpoint
);
__
jcc
(
Assembler
::
notEqual
,
L_
fast_patch
);
__
get_method
(
temp_reg
);
// Let breakpoint table handling rewrite to quicker bytecode
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
scratch
,
rsi
,
bc
);
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
temp_reg
,
rsi
,
bc_reg
);
#ifndef ASSERT
__
jmpb
(
patch_done
);
__
jmpb
(
L_
patch_done
);
#else
__
jmp
(
patch_done
);
__
jmp
(
L_
patch_done
);
#endif
__
bind
(
fast_patch
);
__
bind
(
L_
fast_patch
);
}
#ifdef ASSERT
Label
okay
;
__
load_unsigned_byte
(
scratch
,
at_bcp
(
0
));
__
cmpl
(
scratch
,
(
int
)
Bytecodes
::
java_code
(
bytecode
));
__
jccb
(
Assembler
::
equal
,
okay
);
__
cmpl
(
scratch
,
bc
);
__
jcc
(
Assembler
::
equal
,
okay
);
Label
L_
okay
;
__
load_unsigned_byte
(
temp_reg
,
at_bcp
(
0
));
__
cmpl
(
temp_reg
,
(
int
)
Bytecodes
::
java_code
(
bc
));
__
jccb
(
Assembler
::
equal
,
L_
okay
);
__
cmpl
(
temp_reg
,
bc_reg
);
__
jcc
(
Assembler
::
equal
,
L_
okay
);
__
stop
(
"patching the wrong bytecode"
);
__
bind
(
okay
);
__
bind
(
L_
okay
);
#endif
// patch bytecode
__
movb
(
at_bcp
(
0
),
bc
);
__
bind
(
patch_done
);
__
movb
(
at_bcp
(
0
),
bc
_reg
);
__
bind
(
L_
patch_done
);
}
//----------------------------------------------------------------------------------------------------
...
...
@@ -2060,24 +2089,20 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers
(
result
,
Rcache
,
index
,
temp
);
Label
resolved
;
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
if
(
byte_no
==
f1_oop
)
{
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert
(
result
!=
noreg
,
""
);
//else do cmpptr(Address(...), (int32_t) NULL_WORD)
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
__
movptr
(
result
,
Address
(
Rcache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
f1_offset
()));
__
testptr
(
result
,
result
);
__
jcc
(
Assembler
::
notEqual
,
resolved
);
}
else
{
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
result
==
noreg
,
""
);
//else change code for setting result
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
__
movl
(
temp
,
Address
(
Rcache
,
index
,
Address
::
times_4
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
__
shrl
(
temp
,
shift_count
);
// have we resolved this bytecode?
__
andl
(
temp
,
0xFF
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
__
get_cache_and_index_and_bytecode_at_bcp
(
Rcache
,
index
,
temp
,
byte_no
,
1
,
index_size
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
// have we resolved this bytecode?
__
jcc
(
Assembler
::
equal
,
resolved
);
}
...
...
@@ -2453,138 +2478,153 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__
shrl
(
flags
,
ConstantPoolCacheEntry
::
tosBits
);
assert
(
btos
==
0
,
"change code, btos != 0"
);
// btos
__
andl
(
flags
,
0x0f
);
__
jcc
(
Assembler
::
notZero
,
notByte
);
__
pop
(
btos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movb
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
rcx
,
rbx
);
// btos
{
__
pop
(
btos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movb
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notByte
);
// itos
__
cmpl
(
flags
,
itos
);
__
cmpl
(
flags
,
itos
);
__
jcc
(
Assembler
::
notEqual
,
notInt
);
__
pop
(
itos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movl
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
rcx
,
rbx
);
// itos
{
__
pop
(
itos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movl
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notInt
);
// atos
__
cmpl
(
flags
,
atos
);
__
cmpl
(
flags
,
atos
);
__
jcc
(
Assembler
::
notEqual
,
notObj
);
__
pop
(
atos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
do_oop_store
(
_masm
,
lo
,
rax
,
_bs
->
kind
(),
false
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
rcx
,
rbx
);
// atos
{
__
pop
(
atos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
do_oop_store
(
_masm
,
lo
,
rax
,
_bs
->
kind
(),
false
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notObj
);
// ctos
__
cmpl
(
flags
,
ctos
);
__
cmpl
(
flags
,
ctos
);
__
jcc
(
Assembler
::
notEqual
,
notChar
);
__
pop
(
ctos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
rcx
,
rbx
);
// ctos
{
__
pop
(
ctos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notChar
);
// stos
__
cmpl
(
flags
,
stos
);
__
cmpl
(
flags
,
stos
);
__
jcc
(
Assembler
::
notEqual
,
notShort
);
__
pop
(
stos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
rcx
,
rbx
);
// stos
{
__
pop
(
stos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notShort
);
// ltos
__
cmpl
(
flags
,
ltos
);
__
cmpl
(
flags
,
ltos
);
__
jcc
(
Assembler
::
notEqual
,
notLong
);
Label
notVolatileLong
;
__
testl
(
rdx
,
rdx
);
__
jcc
(
Assembler
::
zero
,
notVolatileLong
);
__
pop
(
ltos
);
// overwrites rdx, do this after testing volatile.
if
(
!
is_static
)
pop_and_check_object
(
obj
);
// Replace with real volatile test
__
push
(
rdx
);
__
push
(
rax
);
// Must update atomically with FIST
__
fild_d
(
Address
(
rsp
,
0
));
// So load into FPU register
__
fistp_d
(
lo
);
// and put into memory atomically
__
addptr
(
rsp
,
2
*
wordSize
);
// volatile_barrier();
volatile_barrier
(
Assembler
::
Membar_mask_bits
(
Assembler
::
StoreLoad
|
Assembler
::
StoreStore
));
// Don't rewrite volatile version
__
jmp
(
notVolatile
);
__
bind
(
notVolatileLong
);
__
pop
(
ltos
);
// overwrites rdx
if
(
!
is_static
)
pop_and_check_object
(
obj
);
NOT_LP64
(
__
movptr
(
hi
,
rdx
));
__
movptr
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
rcx
,
rbx
);
// ltos
{
Label
notVolatileLong
;
__
testl
(
rdx
,
rdx
);
__
jcc
(
Assembler
::
zero
,
notVolatileLong
);
__
pop
(
ltos
);
// overwrites rdx, do this after testing volatile.
if
(
!
is_static
)
pop_and_check_object
(
obj
);
// Replace with real volatile test
__
push
(
rdx
);
__
push
(
rax
);
// Must update atomically with FIST
__
fild_d
(
Address
(
rsp
,
0
));
// So load into FPU register
__
fistp_d
(
lo
);
// and put into memory atomically
__
addptr
(
rsp
,
2
*
wordSize
);
// volatile_barrier();
volatile_barrier
(
Assembler
::
Membar_mask_bits
(
Assembler
::
StoreLoad
|
Assembler
::
StoreStore
));
// Don't rewrite volatile version
__
jmp
(
notVolatile
);
__
bind
(
notVolatileLong
);
__
pop
(
ltos
);
// overwrites rdx
if
(
!
is_static
)
pop_and_check_object
(
obj
);
NOT_LP64
(
__
movptr
(
hi
,
rdx
));
__
movptr
(
lo
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
notVolatile
);
}
__
jmp
(
notVolatile
);
__
bind
(
notLong
);
// ftos
__
cmpl
(
flags
,
ftos
);
__
cmpl
(
flags
,
ftos
);
__
jcc
(
Assembler
::
notEqual
,
notFloat
);
__
pop
(
ftos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
fstp_s
(
lo
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
rcx
,
rbx
);
// ftos
{
__
pop
(
ftos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
fstp_s
(
lo
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notFloat
);
// dtos
__
cmpl
(
flags
,
dtos
);
#ifdef ASSERT
__
cmpl
(
flags
,
dtos
);
__
jcc
(
Assembler
::
notEqual
,
notDouble
);
#endif
__
pop
(
dtos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
fstp_d
(
lo
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
rcx
,
rbx
);
// dtos
{
__
pop
(
dtos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
fstp_d
(
lo
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
rcx
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
#ifdef ASSERT
__
bind
(
notDouble
);
__
stop
(
"Bad state"
);
#endif
__
bind
(
Done
);
...
...
src/cpu/x86/vm/templateTable_x86_64.cpp
浏览文件 @
af93316f
...
...
@@ -203,46 +203,74 @@ Address TemplateTable::at_bcp(int offset) {
return
Address
(
r13
,
offset
);
}
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bytecode
,
Register
bc
,
Register
scratch
,
bool
load_bc_into_scratch
/*=true*/
)
{
if
(
!
RewriteBytecodes
)
{
return
;
}
// the pair bytecodes have already done the load.
if
(
load_bc_into_scratch
)
{
__
movl
(
bc
,
bytecode
);
void
TemplateTable
::
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
bc_reg
,
Register
temp_reg
,
bool
load_bc_into_bc_reg
/*=true*/
,
int
byte_no
)
{
if
(
!
RewriteBytecodes
)
return
;
Label
L_patch_done
;
switch
(
bc
)
{
case
Bytecodes
::
_fast_aputfield
:
case
Bytecodes
::
_fast_bputfield
:
case
Bytecodes
::
_fast_cputfield
:
case
Bytecodes
::
_fast_dputfield
:
case
Bytecodes
::
_fast_fputfield
:
case
Bytecodes
::
_fast_iputfield
:
case
Bytecodes
::
_fast_lputfield
:
case
Bytecodes
::
_fast_sputfield
:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
load_bc_into_bc_reg
,
"we use bc_reg as temp"
);
__
get_cache_and_index_and_bytecode_at_bcp
(
temp_reg
,
bc_reg
,
temp_reg
,
byte_no
,
1
);
__
movl
(
bc_reg
,
bc
);
__
cmpl
(
temp_reg
,
(
int
)
0
);
__
jcc
(
Assembler
::
zero
,
L_patch_done
);
// don't patch
}
break
;
default:
assert
(
byte_no
==
-
1
,
"sanity"
);
// the pair bytecodes have already done the load.
if
(
load_bc_into_bc_reg
)
{
__
movl
(
bc_reg
,
bc
);
}
}
Label
patch_done
;
if
(
JvmtiExport
::
can_post_breakpoint
())
{
Label
fast_patch
;
Label
L_
fast_patch
;
// if a breakpoint is present we can't rewrite the stream directly
__
movzbl
(
scratch
,
at_bcp
(
0
));
__
cmpl
(
scratch
,
Bytecodes
::
_breakpoint
);
__
jcc
(
Assembler
::
notEqual
,
fast_patch
);
__
get_method
(
scratch
);
__
movzbl
(
temp_reg
,
at_bcp
(
0
));
__
cmpl
(
temp_reg
,
Bytecodes
::
_breakpoint
);
__
jcc
(
Assembler
::
notEqual
,
L_
fast_patch
);
__
get_method
(
temp_reg
);
// Let breakpoint table handling rewrite to quicker bytecode
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
scratch
,
r13
,
bc
);
__
call_VM
(
noreg
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
set_original_bytecode_at
),
temp_reg
,
r13
,
bc_reg
);
#ifndef ASSERT
__
jmpb
(
patch_done
);
__
jmpb
(
L_
patch_done
);
#else
__
jmp
(
patch_done
);
__
jmp
(
L_
patch_done
);
#endif
__
bind
(
fast_patch
);
__
bind
(
L_
fast_patch
);
}
#ifdef ASSERT
Label
okay
;
__
load_unsigned_byte
(
scratch
,
at_bcp
(
0
));
__
cmpl
(
scratch
,
(
int
)
Bytecodes
::
java_code
(
bytecode
));
__
jcc
(
Assembler
::
equal
,
okay
);
__
cmpl
(
scratch
,
bc
);
__
jcc
(
Assembler
::
equal
,
okay
);
Label
L_
okay
;
__
load_unsigned_byte
(
temp_reg
,
at_bcp
(
0
));
__
cmpl
(
temp_reg
,
(
int
)
Bytecodes
::
java_code
(
bc
));
__
jcc
(
Assembler
::
equal
,
L_
okay
);
__
cmpl
(
temp_reg
,
bc_reg
);
__
jcc
(
Assembler
::
equal
,
L_
okay
);
__
stop
(
"patching the wrong bytecode"
);
__
bind
(
okay
);
__
bind
(
L_
okay
);
#endif
// patch bytecode
__
movb
(
at_bcp
(
0
),
bc
);
__
bind
(
patch_done
);
__
movb
(
at_bcp
(
0
),
bc
_reg
);
__
bind
(
L_
patch_done
);
}
...
...
@@ -2098,24 +2126,20 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers
(
result
,
Rcache
,
index
,
temp
);
Label
resolved
;
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
if
(
byte_no
==
f1_oop
)
{
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert
(
result
!=
noreg
,
""
);
//else do cmpptr(Address(...), (int32_t) NULL_WORD)
__
get_cache_and_index_at_bcp
(
Rcache
,
index
,
1
,
index_size
);
__
movptr
(
result
,
Address
(
Rcache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
f1_offset
()));
__
testptr
(
result
,
result
);
__
jcc
(
Assembler
::
notEqual
,
resolved
);
}
else
{
assert
(
byte_no
==
f1_byte
||
byte_no
==
f2_byte
,
"byte_no out of range"
);
assert
(
result
==
noreg
,
""
);
//else change code for setting result
const
int
shift_count
=
(
1
+
byte_no
)
*
BitsPerByte
;
__
movl
(
temp
,
Address
(
Rcache
,
index
,
Address
::
times_ptr
,
constantPoolCacheOopDesc
::
base_offset
()
+
ConstantPoolCacheEntry
::
indices_offset
()));
__
shrl
(
temp
,
shift_count
);
// have we resolved this bytecode?
__
andl
(
temp
,
0xFF
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
__
get_cache_and_index_and_bytecode_at_bcp
(
Rcache
,
index
,
temp
,
byte_no
,
1
,
index_size
);
__
cmpl
(
temp
,
(
int
)
bytecode
());
// have we resolved this bytecode?
__
jcc
(
Assembler
::
equal
,
resolved
);
}
...
...
@@ -2507,101 +2531,123 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
assert
(
btos
==
0
,
"change code, btos != 0"
);
__
andl
(
flags
,
0x0f
);
__
jcc
(
Assembler
::
notZero
,
notByte
);
// btos
__
pop
(
btos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movb
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
bc
,
rbx
);
{
__
pop
(
btos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movb
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_bputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notByte
);
__
cmpl
(
flags
,
atos
);
__
jcc
(
Assembler
::
notEqual
,
notObj
);
// atos
__
pop
(
atos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
// Store into the field
do_oop_store
(
_masm
,
field
,
rax
,
_bs
->
kind
(),
false
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
bc
,
rbx
);
// atos
{
__
pop
(
atos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
// Store into the field
do_oop_store
(
_masm
,
field
,
rax
,
_bs
->
kind
(),
false
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_aputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notObj
);
__
cmpl
(
flags
,
itos
);
__
jcc
(
Assembler
::
notEqual
,
notInt
);
// itos
__
pop
(
itos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movl
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
bc
,
rbx
);
{
__
pop
(
itos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movl
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_iputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notInt
);
__
cmpl
(
flags
,
ctos
);
__
jcc
(
Assembler
::
notEqual
,
notChar
);
// ctos
__
pop
(
ctos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
bc
,
rbx
);
{
__
pop
(
ctos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_cputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notChar
);
__
cmpl
(
flags
,
stos
);
__
jcc
(
Assembler
::
notEqual
,
notShort
);
// stos
__
pop
(
stos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
bc
,
rbx
);
{
__
pop
(
stos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movw
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_sputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notShort
);
__
cmpl
(
flags
,
ltos
);
__
jcc
(
Assembler
::
notEqual
,
notLong
);
// ltos
__
pop
(
ltos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movq
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
bc
,
rbx
);
{
__
pop
(
ltos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movq
(
field
,
rax
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_lputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notLong
);
__
cmpl
(
flags
,
ftos
);
__
jcc
(
Assembler
::
notEqual
,
notFloat
);
// ftos
__
pop
(
ftos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movflt
(
field
,
xmm0
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
bc
,
rbx
);
{
__
pop
(
ftos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movflt
(
field
,
xmm0
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_fputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
__
jmp
(
Done
);
}
__
jmp
(
Done
);
__
bind
(
notFloat
);
#ifdef ASSERT
__
cmpl
(
flags
,
dtos
);
__
jcc
(
Assembler
::
notEqual
,
notDouble
);
#endif
// dtos
__
pop
(
dtos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movdbl
(
field
,
xmm0
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
bc
,
rbx
);
{
__
pop
(
dtos
);
if
(
!
is_static
)
pop_and_check_object
(
obj
);
__
movdbl
(
field
,
xmm0
);
if
(
!
is_static
)
{
patch_bytecode
(
Bytecodes
::
_fast_dputfield
,
bc
,
rbx
,
true
,
byte_no
);
}
}
#ifdef ASSERT
...
...
@@ -2612,12 +2658,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
#endif
__
bind
(
Done
);
// Check for volatile store
__
testl
(
rdx
,
rdx
);
__
jcc
(
Assembler
::
zero
,
notVolatile
);
volatile_barrier
(
Assembler
::
Membar_mask_bits
(
Assembler
::
StoreLoad
|
Assembler
::
StoreStore
));
__
bind
(
notVolatile
);
}
...
...
src/share/vm/ci/ciCallSite.cpp
浏览文件 @
af93316f
...
...
@@ -28,6 +28,16 @@
// ciCallSite
bool
ciCallSite
::
is_constant_call_site
()
{
return
klass
()
->
is_subclass_of
(
CURRENT_ENV
->
ConstantCallSite_klass
());
}
bool
ciCallSite
::
is_mutable_call_site
()
{
return
klass
()
->
is_subclass_of
(
CURRENT_ENV
->
MutableCallSite_klass
());
}
bool
ciCallSite
::
is_volatile_call_site
()
{
return
klass
()
->
is_subclass_of
(
CURRENT_ENV
->
VolatileCallSite_klass
());
}
// ------------------------------------------------------------------
// ciCallSite::get_target
//
...
...
src/share/vm/ci/ciCallSite.hpp
浏览文件 @
af93316f
...
...
@@ -37,6 +37,10 @@ public:
// What kind of ciObject is this?
bool
is_call_site
()
const
{
return
true
;
}
bool
is_constant_call_site
();
bool
is_mutable_call_site
();
bool
is_volatile_call_site
();
// Return the target MethodHandle of this CallSite.
ciMethodHandle
*
get_target
()
const
;
...
...
src/share/vm/ci/ciField.hpp
浏览文件 @
af93316f
/*
* Copyright (c) 1999, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -178,6 +178,8 @@ public:
bool
is_volatile
()
{
return
flags
().
is_volatile
();
}
bool
is_transient
()
{
return
flags
().
is_transient
();
}
bool
is_call_site_target
()
{
return
((
holder
()
==
CURRENT_ENV
->
CallSite_klass
())
&&
(
name
()
==
ciSymbol
::
target_name
()));
}
// Debugging output
void
print
();
void
print_name_on
(
outputStream
*
st
);
...
...
src/share/vm/classfile/systemDictionary.cpp
浏览文件 @
af93316f
...
...
@@ -1978,7 +1978,7 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
// JSR 292 classes
WKID
jsr292_group_start
=
WK_KLASS_ENUM_NAME
(
MethodHandle_klass
);
WKID
jsr292_group_end
=
WK_KLASS_ENUM_NAME
(
CallSite_klass
);
WKID
jsr292_group_end
=
WK_KLASS_ENUM_NAME
(
Volatile
CallSite_klass
);
initialize_wk_klasses_until
(
jsr292_group_start
,
scan
,
CHECK
);
if
(
EnableInvokeDynamic
)
{
initialize_wk_klasses_through
(
jsr292_group_end
,
scan
,
CHECK
);
...
...
src/share/vm/classfile/systemDictionary.hpp
浏览文件 @
af93316f
...
...
@@ -144,18 +144,21 @@ class SymbolPropertyTable;
template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */
\
template(MethodHandle_klass,
java_lang_invoke_MethodHandle,
Pre_JSR292) \
template(MemberName_klass,
java_lang_invoke_MemberName,
Pre_JSR292) \
template(MethodHandleNatives_klass,
java_lang_invoke_MethodHandleNatives,
Pre_JSR292) \
template(AdapterMethodHandle_klass,
java_lang_invoke_AdapterMethodHandle,
Pre_JSR292) \
template(BoundMethodHandle_klass,
java_lang_invoke_BoundMethodHandle,
Pre_JSR292) \
template(DirectMethodHandle_klass,
java_lang_invoke_DirectMethodHandle,
Pre_JSR292) \
template(MethodType_klass,
java_lang_invoke_MethodType,
Pre_JSR292) \
template(MethodTypeForm_klass,
java_lang_invoke_MethodTypeForm,
Pre_JSR292) \
template(BootstrapMethodError_klass,
java_lang_BootstrapMethodError,
Pre_JSR292) \
template(MethodHandle_klass,
java_lang_invoke_MethodHandle,
Pre_JSR292) \
template(MemberName_klass,
java_lang_invoke_MemberName,
Pre_JSR292) \
template(MethodHandleNatives_klass,
java_lang_invoke_MethodHandleNatives,
Pre_JSR292) \
template(AdapterMethodHandle_klass,
java_lang_invoke_AdapterMethodHandle,
Pre_JSR292) \
template(BoundMethodHandle_klass,
java_lang_invoke_BoundMethodHandle,
Pre_JSR292) \
template(DirectMethodHandle_klass,
java_lang_invoke_DirectMethodHandle,
Pre_JSR292) \
template(MethodType_klass,
java_lang_invoke_MethodType,
Pre_JSR292) \
template(MethodTypeForm_klass,
java_lang_invoke_MethodTypeForm,
Pre_JSR292) \
template(BootstrapMethodError_klass,
java_lang_BootstrapMethodError,
Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and CallSite last in group */
\
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \
template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \
template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and VolatileCallSite last in group */
\
\
template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
...
...
src/share/vm/classfile/vmSymbols.hpp
浏览文件 @
af93316f
...
...
@@ -233,6 +233,9 @@
template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \
template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \
template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \
...
...
src/share/vm/code/dependencies.cpp
浏览文件 @
af93316f
...
...
@@ -113,6 +113,11 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
assert_common_1
(
no_finalizable_subclasses
,
ctxk
);
}
void
Dependencies
::
assert_call_site_target_value
(
ciKlass
*
ctxk
,
ciCallSite
*
call_site
,
ciMethodHandle
*
method_handle
)
{
check_ctxk
(
ctxk
);
assert_common_3
(
call_site_target_value
,
ctxk
,
call_site
,
method_handle
);
}
// Helper function. If we are adding a new dep. under ctxk2,
// try to find an old dep. under a broader* ctxk1. If there is
//
...
...
@@ -341,7 +346,8 @@ const char* Dependencies::_dep_name[TYPE_LIMIT] = {
"unique_concrete_method"
,
"abstract_with_exclusive_concrete_subtypes_2"
,
"exclusive_concrete_methods_2"
,
"no_finalizable_subclasses"
"no_finalizable_subclasses"
,
"call_site_target_value"
};
int
Dependencies
::
_dep_args
[
TYPE_LIMIT
]
=
{
...
...
@@ -354,7 +360,8 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
2
,
// unique_concrete_method ctxk, m
3
,
// unique_concrete_subtypes_2 ctxk, k1, k2
3
,
// unique_concrete_methods_2 ctxk, m1, m2
1
// no_finalizable_subclasses ctxk
1
,
// no_finalizable_subclasses ctxk
3
// call_site_target_value ctxk, call_site, method_handle
};
const
char
*
Dependencies
::
dep_name
(
Dependencies
::
DepType
dept
)
{
...
...
@@ -367,6 +374,13 @@ int Dependencies::dep_args(Dependencies::DepType dept) {
return
_dep_args
[
dept
];
}
void
Dependencies
::
check_valid_dependency_type
(
DepType
dept
)
{
for
(
int
deptv
=
(
int
)
FIRST_TYPE
;
deptv
<
(
int
)
TYPE_LIMIT
;
deptv
++
)
{
if
(
dept
==
((
DepType
)
deptv
))
return
;
}
ShouldNotReachHere
();
}
// for the sake of the compiler log, print out current dependencies:
void
Dependencies
::
log_all_dependencies
()
{
if
(
log
()
==
NULL
)
return
;
...
...
@@ -800,11 +814,11 @@ class ClassHierarchyWalker {
bool
participants_hide_witnesses
,
bool
top_level_call
=
true
);
// the spot-checking version:
klassOop
find_witness_in
(
DepChange
&
changes
,
klassOop
find_witness_in
(
Klass
DepChange
&
changes
,
klassOop
context_type
,
bool
participants_hide_witnesses
);
public:
klassOop
find_witness_subtype
(
klassOop
context_type
,
DepChange
*
changes
=
NULL
)
{
klassOop
find_witness_subtype
(
klassOop
context_type
,
Klass
DepChange
*
changes
=
NULL
)
{
assert
(
doing_subtype_search
(),
"must set up a subtype search"
);
// When looking for unexpected concrete types,
// do not look beneath expected ones.
...
...
@@ -817,7 +831,7 @@ class ClassHierarchyWalker {
return
find_witness_anywhere
(
context_type
,
participants_hide_witnesses
);
}
}
klassOop
find_witness_definer
(
klassOop
context_type
,
DepChange
*
changes
=
NULL
)
{
klassOop
find_witness_definer
(
klassOop
context_type
,
Klass
DepChange
*
changes
=
NULL
)
{
assert
(
!
doing_subtype_search
(),
"must set up a method definer search"
);
// When looking for unexpected concrete methods,
// look beneath expected ones, to see if there are overrides.
...
...
@@ -878,7 +892,7 @@ static bool count_find_witness_calls() {
#endif //PRODUCT
klassOop
ClassHierarchyWalker
::
find_witness_in
(
DepChange
&
changes
,
klassOop
ClassHierarchyWalker
::
find_witness_in
(
Klass
DepChange
&
changes
,
klassOop
context_type
,
bool
participants_hide_witnesses
)
{
assert
(
changes
.
involves_context
(
context_type
),
"irrelevant dependency"
);
...
...
@@ -1137,7 +1151,7 @@ klassOop Dependencies::check_leaf_type(klassOop ctxk) {
// when dealing with the types of actual instances.
klassOop
Dependencies
::
check_abstract_with_unique_concrete_subtype
(
klassOop
ctxk
,
klassOop
conck
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
ClassHierarchyWalker
wf
(
conck
);
return
wf
.
find_witness_subtype
(
ctxk
,
changes
);
}
...
...
@@ -1146,7 +1160,7 @@ klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk
// instantiatable. This can allow the compiler to make some paths go
// dead, if they are gated by a test of the type.
klassOop
Dependencies
::
check_abstract_with_no_concrete_subtype
(
klassOop
ctxk
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
// Find any concrete subtype, with no participants:
ClassHierarchyWalker
wf
;
return
wf
.
find_witness_subtype
(
ctxk
,
changes
);
...
...
@@ -1156,7 +1170,7 @@ klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk,
// If a concrete class has no concrete subtypes, it can always be
// exactly typed. This allows the use of a cheaper type test.
klassOop
Dependencies
::
check_concrete_with_no_concrete_subtype
(
klassOop
ctxk
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
// Find any concrete subtype, with only the ctxk as participant:
ClassHierarchyWalker
wf
(
ctxk
);
return
wf
.
find_witness_subtype
(
ctxk
,
changes
);
...
...
@@ -1217,7 +1231,7 @@ klassOop Dependencies::check_abstract_with_exclusive_concrete_subtypes(
klassOop
ctxk
,
klassOop
k1
,
klassOop
k2
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
ClassHierarchyWalker
wf
;
wf
.
add_participant
(
k1
);
wf
.
add_participant
(
k2
);
...
...
@@ -1278,7 +1292,7 @@ int Dependencies::find_exclusive_concrete_subtypes(klassOop ctxk,
// If a class (or interface) has a unique concrete method uniqm, return NULL.
// Otherwise, return a class that contains an interfering method.
klassOop
Dependencies
::
check_unique_concrete_method
(
klassOop
ctxk
,
methodOop
uniqm
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
// Here is a missing optimization: If uniqm->is_final(),
// we don't really need to search beneath it for overrides.
// This is probably not important, since we don't use dependencies
...
...
@@ -1321,7 +1335,7 @@ methodOop Dependencies::find_unique_concrete_method(klassOop ctxk, methodOop m)
klassOop
Dependencies
::
check_exclusive_concrete_methods
(
klassOop
ctxk
,
methodOop
m1
,
methodOop
m2
,
DepChange
*
changes
)
{
Klass
DepChange
*
changes
)
{
ClassHierarchyWalker
wf
(
m1
);
wf
.
add_participant
(
m1
->
method_holder
());
wf
.
add_participant
(
m2
->
method_holder
());
...
...
@@ -1383,7 +1397,7 @@ int Dependencies::find_exclusive_concrete_methods(klassOop ctxk,
}
klassOop
Dependencies
::
check_has_no_finalizable_subclasses
(
klassOop
ctxk
,
DepChange
*
changes
)
{
klassOop
Dependencies
::
check_has_no_finalizable_subclasses
(
klassOop
ctxk
,
Klass
DepChange
*
changes
)
{
Klass
*
search_at
=
ctxk
->
klass_part
();
if
(
changes
!=
NULL
)
search_at
=
changes
->
new_type
()
->
klass_part
();
// just look at the new bit
...
...
@@ -1395,8 +1409,39 @@ klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, DepCha
}
klassOop
Dependencies
::
DepStream
::
check_dependency_impl
(
DepChange
*
changes
)
{
klassOop
Dependencies
::
check_call_site_target_value
(
klassOop
ctxk
,
oop
call_site
,
oop
method_handle
,
CallSiteDepChange
*
changes
)
{
assert
(
call_site
->
is_a
(
SystemDictionary
::
CallSite_klass
()),
"sanity"
);
assert
(
method_handle
->
is_a
(
SystemDictionary
::
MethodHandle_klass
()),
"sanity"
);
if
(
changes
==
NULL
)
{
// Validate all CallSites
if
(
java_lang_invoke_CallSite
::
target
(
call_site
)
!=
method_handle
)
return
ctxk
;
// assertion failed
}
else
{
// Validate the given CallSite
if
(
call_site
==
changes
->
call_site
()
&&
java_lang_invoke_CallSite
::
target
(
call_site
)
!=
changes
->
method_handle
())
{
assert
(
method_handle
!=
changes
->
method_handle
(),
"must be"
);
return
ctxk
;
// assertion failed
}
}
assert
(
java_lang_invoke_CallSite
::
target
(
call_site
)
==
method_handle
,
"should still be valid"
);
return
NULL
;
// assertion still valid
}
void
Dependencies
::
DepStream
::
trace_and_log_witness
(
klassOop
witness
)
{
if
(
witness
!=
NULL
)
{
if
(
TraceDependencies
)
{
print_dependency
(
witness
,
/*verbose=*/
true
);
}
// The following is a no-op unless logging is enabled:
log_dependency
(
witness
);
}
}
klassOop
Dependencies
::
DepStream
::
check_klass_dependency
(
KlassDepChange
*
changes
)
{
assert_locked_or_safepoint
(
Compile_lock
);
Dependencies
::
check_valid_dependency_type
(
type
());
klassOop
witness
=
NULL
;
switch
(
type
())
{
...
...
@@ -1407,95 +1452,103 @@ klassOop Dependencies::DepStream::check_dependency_impl(DepChange* changes) {
witness
=
check_leaf_type
(
context_type
());
break
;
case
abstract_with_unique_concrete_subtype
:
witness
=
check_abstract_with_unique_concrete_subtype
(
context_type
(),
type_argument
(
1
),
changes
);
witness
=
check_abstract_with_unique_concrete_subtype
(
context_type
(),
type_argument
(
1
),
changes
);
break
;
case
abstract_with_no_concrete_subtype
:
witness
=
check_abstract_with_no_concrete_subtype
(
context_type
(),
changes
);
witness
=
check_abstract_with_no_concrete_subtype
(
context_type
(),
changes
);
break
;
case
concrete_with_no_concrete_subtype
:
witness
=
check_concrete_with_no_concrete_subtype
(
context_type
(),
changes
);
witness
=
check_concrete_with_no_concrete_subtype
(
context_type
(),
changes
);
break
;
case
unique_concrete_method
:
witness
=
check_unique_concrete_method
(
context_type
(),
method_argument
(
1
),
changes
);
witness
=
check_unique_concrete_method
(
context_type
(),
method_argument
(
1
),
changes
);
break
;
case
abstract_with_exclusive_concrete_subtypes_2
:
witness
=
check_abstract_with_exclusive_concrete_subtypes
(
context_type
(),
type_argument
(
1
),
type_argument
(
2
),
changes
);
witness
=
check_abstract_with_exclusive_concrete_subtypes
(
context_type
(),
type_argument
(
1
),
type_argument
(
2
),
changes
);
break
;
case
exclusive_concrete_methods_2
:
witness
=
check_exclusive_concrete_methods
(
context_type
(),
method_argument
(
1
),
method_argument
(
2
),
changes
);
witness
=
check_exclusive_concrete_methods
(
context_type
(),
method_argument
(
1
),
method_argument
(
2
),
changes
);
break
;
case
no_finalizable_subclasses
:
witness
=
check_has_no_finalizable_subclasses
(
context_type
(),
changes
);
witness
=
check_has_no_finalizable_subclasses
(
context_type
(),
changes
);
break
;
default:
default:
witness
=
NULL
;
ShouldNotReachHere
();
break
;
}
if
(
witness
!=
NULL
)
{
if
(
TraceDependencies
)
{
print_dependency
(
witness
,
/*verbose=*/
true
);
}
// The following is a no-op unless logging is enabled:
log_dependency
(
witness
);
}
trace_and_log_witness
(
witness
);
return
witness
;
}
klassOop
Dependencies
::
DepStream
::
spot_check_dependency_at
(
DepChange
&
changes
)
{
if
(
!
changes
.
involves_context
(
context_type
()))
// irrelevant dependency; skip it
return
NULL
;
klassOop
Dependencies
::
DepStream
::
check_call_site_dependency
(
CallSiteDepChange
*
changes
)
{
assert_locked_or_safepoint
(
Compile_lock
);
Dependencies
::
check_valid_dependency_type
(
type
());
return
check_dependency_impl
(
&
changes
);
klassOop
witness
=
NULL
;
switch
(
type
())
{
case
call_site_target_value
:
witness
=
check_call_site_target_value
(
context_type
(),
argument
(
1
),
argument
(
2
),
changes
);
break
;
default:
witness
=
NULL
;
break
;
}
trace_and_log_witness
(
witness
);
return
witness
;
}
void
DepChange
::
initialize
()
{
// entire transaction must be under this lock:
assert_lock_strong
(
Compile_lock
);
klassOop
Dependencies
::
DepStream
::
spot_check_dependency_at
(
DepChange
&
changes
)
{
// Handle klass dependency
if
(
changes
.
is_klass_change
()
&&
changes
.
as_klass_change
()
->
involves_context
(
context_type
()))
return
check_klass_dependency
(
changes
.
as_klass_change
());
// Mark all dependee and all its superclasses
// Mark transitive interfaces
for
(
ContextStream
str
(
*
this
);
str
.
next
();
)
{
klassOop
d
=
str
.
klass
();
assert
(
!
instanceKlass
::
cast
(
d
)
->
is_marked_dependent
(),
"checking"
);
instanceKlass
::
cast
(
d
)
->
set_is_marked_dependent
(
true
);
}
// Handle CallSite dependency
if
(
changes
.
is_call_site_change
())
return
check_call_site_dependency
(
changes
.
as_call_site_change
());
// irrelevant dependency; skip it
return
NULL
;
}
DepChange
::~
DepChange
()
{
// Unmark all dependee and all its superclasses
// Unmark transitive interfaces
void
DepChange
::
print
()
{
int
nsup
=
0
,
nint
=
0
;
for
(
ContextStream
str
(
*
this
);
str
.
next
();
)
{
klassOop
d
=
str
.
klass
();
instanceKlass
::
cast
(
d
)
->
set_is_marked_dependent
(
false
);
klassOop
k
=
str
.
klass
();
switch
(
str
.
change_type
())
{
case
Change_new_type
:
tty
->
print_cr
(
" dependee = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
break
;
case
Change_new_sub
:
if
(
!
WizardMode
)
{
++
nsup
;
}
else
{
tty
->
print_cr
(
" context super = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
}
break
;
case
Change_new_impl
:
if
(
!
WizardMode
)
{
++
nint
;
}
else
{
tty
->
print_cr
(
" context interface = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
}
break
;
}
}
if
(
nsup
+
nint
!=
0
)
{
tty
->
print_cr
(
" context supers = %d, interfaces = %d"
,
nsup
,
nint
);
}
}
bool
DepChange
::
involves_context
(
klassOop
k
)
{
if
(
k
==
NULL
||
!
Klass
::
cast
(
k
)
->
oop_is_instance
())
{
return
false
;
}
instanceKlass
*
ik
=
instanceKlass
::
cast
(
k
);
bool
is_contained
=
ik
->
is_marked_dependent
();
assert
(
is_contained
==
Klass
::
cast
(
new_type
())
->
is_subtype_of
(
k
),
"correct marking of potential context types"
);
return
is_contained
;
void
DepChange
::
ContextStream
::
start
()
{
klassOop
new_type
=
_changes
.
is_klass_change
()
?
_changes
.
as_klass_change
()
->
new_type
()
:
(
klassOop
)
NULL
;
_change_type
=
(
new_type
==
NULL
?
NO_CHANGE
:
Start_Klass
);
_klass
=
new_type
;
_ti_base
=
NULL
;
_ti_index
=
0
;
_ti_limit
=
0
;
}
bool
DepChange
::
ContextStream
::
next
()
{
...
...
@@ -1534,35 +1587,39 @@ bool DepChange::ContextStream::next() {
return
false
;
}
void
DepChange
::
print
()
{
int
nsup
=
0
,
nint
=
0
;
void
KlassDepChange
::
initialize
()
{
// entire transaction must be under this lock:
assert_lock_strong
(
Compile_lock
);
// Mark all dependee and all its superclasses
// Mark transitive interfaces
for
(
ContextStream
str
(
*
this
);
str
.
next
();
)
{
klassOop
k
=
str
.
klass
();
switch
(
str
.
change_type
())
{
case
Change_new_type
:
tty
->
print_cr
(
" dependee = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
break
;
case
Change_new_sub
:
if
(
!
WizardMode
)
{
++
nsup
;
}
else
{
tty
->
print_cr
(
" context super = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
}
break
;
case
Change_new_impl
:
if
(
!
WizardMode
)
{
++
nint
;
}
else
{
tty
->
print_cr
(
" context interface = %s"
,
instanceKlass
::
cast
(
k
)
->
external_name
());
}
break
;
}
klassOop
d
=
str
.
klass
();
assert
(
!
instanceKlass
::
cast
(
d
)
->
is_marked_dependent
(),
"checking"
);
instanceKlass
::
cast
(
d
)
->
set_is_marked_dependent
(
true
);
}
if
(
nsup
+
nint
!=
0
)
{
tty
->
print_cr
(
" context supers = %d, interfaces = %d"
,
nsup
,
nint
);
}
KlassDepChange
::~
KlassDepChange
()
{
// Unmark all dependee and all its superclasses
// Unmark transitive interfaces
for
(
ContextStream
str
(
*
this
);
str
.
next
();
)
{
klassOop
d
=
str
.
klass
();
instanceKlass
::
cast
(
d
)
->
set_is_marked_dependent
(
false
);
}
}
bool
KlassDepChange
::
involves_context
(
klassOop
k
)
{
if
(
k
==
NULL
||
!
Klass
::
cast
(
k
)
->
oop_is_instance
())
{
return
false
;
}
instanceKlass
*
ik
=
instanceKlass
::
cast
(
k
);
bool
is_contained
=
ik
->
is_marked_dependent
();
assert
(
is_contained
==
Klass
::
cast
(
new_type
())
->
is_subtype_of
(
k
),
"correct marking of potential context types"
);
return
is_contained
;
}
#ifndef PRODUCT
void
Dependencies
::
print_statistics
()
{
if
(
deps_find_witness_print
!=
0
)
{
...
...
src/share/vm/code/dependencies.hpp
浏览文件 @
af93316f
/*
* Copyright (c) 2005, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -25,18 +25,21 @@
#ifndef SHARE_VM_CODE_DEPENDENCIES_HPP
#define SHARE_VM_CODE_DEPENDENCIES_HPP
#include "ci/ciCallSite.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMethodHandle.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/compressedStream.hpp"
#include "code/nmethod.hpp"
#include "utilities/growableArray.hpp"
//** Dependencies represent assertions (approximate invariants) within
// the
class hierarchy. An example is an assertion that a give
n
//
method is not overridden; another example is that a type has only
//
one concrete subtype. Compiled code which relies on su
ch
//
assertions must be discarded if they are overturned by changes in
//
the class hierarchy. We can think of these assertions a
s
// approximate invariants, because we expect them to be overturned
// the
runtime system, e.g. class hierarchy changes. An example is a
n
//
assertion that a given method is not overridden; another example is
//
that a type has only one concrete subtype. Compiled code whi
ch
//
relies on such assertions must be discarded if they are overturned
//
by changes in the runtime system. We can think of these assertion
s
// a
s a
pproximate invariants, because we expect them to be overturned
// very infrequently. We are willing to perform expensive recovery
// operations when they are overturned. The benefit, of course, is
// performing optimistic optimizations (!) on the object code.
...
...
@@ -52,6 +55,8 @@ class OopRecorder;
class
xmlStream
;
class
CompileLog
;
class
DepChange
;
class
KlassDepChange
;
class
CallSiteDepChange
;
class
No_Safepoint_Verifier
;
class
Dependencies
:
public
ResourceObj
{
...
...
@@ -152,6 +157,9 @@ class Dependencies: public ResourceObj {
// subclasses require finalization registration.
no_finalizable_subclasses
,
// This dependency asserts when the CallSite.target value changed.
call_site_target_value
,
TYPE_LIMIT
};
enum
{
...
...
@@ -179,6 +187,7 @@ class Dependencies: public ResourceObj {
static
int
dep_context_arg
(
DepType
dept
)
{
return
dept_in_mask
(
dept
,
ctxk_types
)
?
0
:
-
1
;
}
static
void
check_valid_dependency_type
(
DepType
dept
);
private:
// State for writing a new set of dependencies:
...
...
@@ -255,6 +264,7 @@ class Dependencies: public ResourceObj {
void
assert_abstract_with_exclusive_concrete_subtypes
(
ciKlass
*
ctxk
,
ciKlass
*
k1
,
ciKlass
*
k2
);
void
assert_exclusive_concrete_methods
(
ciKlass
*
ctxk
,
ciMethod
*
m1
,
ciMethod
*
m2
);
void
assert_has_no_finalizable_subclasses
(
ciKlass
*
ctxk
);
void
assert_call_site_target_value
(
ciKlass
*
ctxk
,
ciCallSite
*
call_site
,
ciMethodHandle
*
method_handle
);
// Define whether a given method or type is concrete.
// These methods define the term "concrete" as used in this module.
...
...
@@ -296,19 +306,19 @@ class Dependencies: public ResourceObj {
static
klassOop
check_evol_method
(
methodOop
m
);
static
klassOop
check_leaf_type
(
klassOop
ctxk
);
static
klassOop
check_abstract_with_unique_concrete_subtype
(
klassOop
ctxk
,
klassOop
conck
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_abstract_with_no_concrete_subtype
(
klassOop
ctxk
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_concrete_with_no_concrete_subtype
(
klassOop
ctxk
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_unique_concrete_method
(
klassOop
ctxk
,
methodOop
uniqm
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_abstract_with_exclusive_concrete_subtypes
(
klassOop
ctxk
,
klassOop
k1
,
klassOop
k2
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_exclusive_concrete_methods
(
klassOop
ctxk
,
methodOop
m1
,
methodOop
m2
,
DepChange
*
changes
=
NULL
);
static
klassOop
check_has_no_finalizable_subclasses
(
klassOop
ctxk
,
DepChange
*
changes
=
NULL
);
Klass
DepChange
*
changes
=
NULL
);
static
klassOop
check_has_no_finalizable_subclasses
(
klassOop
ctxk
,
KlassDepChange
*
changes
=
NULL
);
static
klassOop
check_call_site_target_value
(
klassOop
ctxk
,
oop
call_site
,
oop
method_handle
,
CallSite
DepChange
*
changes
=
NULL
);
// A returned klassOop is NULL if the dependency assertion is still
// valid. A non-NULL klassOop is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has
...
...
@@ -415,7 +425,10 @@ class Dependencies: public ResourceObj {
inline
oop
recorded_oop_at
(
int
i
);
// => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
klassOop
check_dependency_impl
(
DepChange
*
changes
);
klassOop
check_klass_dependency
(
KlassDepChange
*
changes
);
klassOop
check_call_site_dependency
(
CallSiteDepChange
*
changes
);
void
trace_and_log_witness
(
klassOop
witness
);
public:
DepStream
(
Dependencies
*
deps
)
...
...
@@ -453,10 +466,13 @@ class Dependencies: public ResourceObj {
return
(
klassOop
)
x
;
}
// The point of the whole exercise: Is this dep
is
still OK?
// The point of the whole exercise: Is this dep still OK?
klassOop
check_dependency
()
{
return
check_dependency_impl
(
NULL
);
klassOop
result
=
check_klass_dependency
(
NULL
);
if
(
result
!=
NULL
)
return
result
;
return
check_call_site_dependency
(
NULL
);
}
// A lighter version: Checks only around recent changes in a class
// hierarchy. (See Universe::flush_dependents_on.)
klassOop
spot_check_dependency_at
(
DepChange
&
changes
);
...
...
@@ -472,12 +488,26 @@ class Dependencies: public ResourceObj {
static
void
print_statistics
()
PRODUCT_RETURN
;
};
// A class hierarchy change coming through the VM (under the Compile_lock).
// The change is structured as a single new type with any number of supers
// and implemented interface types. Other than the new type, any of the
// super types can be context types for a relevant dependency, which the
// new type could invalidate.
// Every particular DepChange is a sub-class of this class.
class
DepChange
:
public
StackObj
{
public:
// What kind of DepChange is this?
virtual
bool
is_klass_change
()
const
{
return
false
;
}
virtual
bool
is_call_site_change
()
const
{
return
false
;
}
// Subclass casting with assertions.
KlassDepChange
*
as_klass_change
()
{
assert
(
is_klass_change
(),
"bad cast"
);
return
(
KlassDepChange
*
)
this
;
}
CallSiteDepChange
*
as_call_site_change
()
{
assert
(
is_call_site_change
(),
"bad cast"
);
return
(
CallSiteDepChange
*
)
this
;
}
void
print
();
public:
enum
ChangeType
{
NO_CHANGE
=
0
,
// an uninvolved klass
...
...
@@ -488,28 +518,6 @@ class DepChange : public StackObj {
Start_Klass
=
CHANGE_LIMIT
// internal indicator for ContextStream
};
private:
// each change set is rooted in exactly one new type (at present):
KlassHandle
_new_type
;
void
initialize
();
public:
// notes the new type, marks it and all its super-types
DepChange
(
KlassHandle
new_type
)
:
_new_type
(
new_type
)
{
initialize
();
}
// cleans up the marks
~
DepChange
();
klassOop
new_type
()
{
return
_new_type
();
}
// involves_context(k) is true if k is new_type or any of the super types
bool
involves_context
(
klassOop
k
);
// Usage:
// for (DepChange::ContextStream str(changes); str.next(); ) {
// klassOop k = str.klass();
...
...
@@ -530,14 +538,7 @@ class DepChange : public StackObj {
int
_ti_limit
;
// start at the beginning:
void
start
()
{
klassOop
new_type
=
_changes
.
new_type
();
_change_type
=
(
new_type
==
NULL
?
NO_CHANGE
:
Start_Klass
);
_klass
=
new_type
;
_ti_base
=
NULL
;
_ti_index
=
0
;
_ti_limit
=
0
;
}
void
start
();
public:
ContextStream
(
DepChange
&
changes
)
...
...
@@ -555,8 +556,62 @@ class DepChange : public StackObj {
klassOop
klass
()
{
return
_klass
;
}
};
friend
class
DepChange
::
ContextStream
;
};
void
print
();
// A class hierarchy change coming through the VM (under the Compile_lock).
// The change is structured as a single new type with any number of supers
// and implemented interface types. Other than the new type, any of the
// super types can be context types for a relevant dependency, which the
// new type could invalidate.
class
KlassDepChange
:
public
DepChange
{
private:
// each change set is rooted in exactly one new type (at present):
KlassHandle
_new_type
;
void
initialize
();
public:
// notes the new type, marks it and all its super-types
KlassDepChange
(
KlassHandle
new_type
)
:
_new_type
(
new_type
)
{
initialize
();
}
// cleans up the marks
~
KlassDepChange
();
// What kind of DepChange is this?
virtual
bool
is_klass_change
()
const
{
return
true
;
}
klassOop
new_type
()
{
return
_new_type
();
}
// involves_context(k) is true if k is new_type or any of the super types
bool
involves_context
(
klassOop
k
);
};
// A CallSite has changed its target.
class
CallSiteDepChange
:
public
DepChange
{
private:
Handle
_call_site
;
Handle
_method_handle
;
public:
CallSiteDepChange
(
Handle
call_site
,
Handle
method_handle
)
:
_call_site
(
call_site
),
_method_handle
(
method_handle
)
{
assert
(
_call_site
()
->
is_a
(
SystemDictionary
::
CallSite_klass
()),
"must be"
);
assert
(
_method_handle
()
->
is_a
(
SystemDictionary
::
MethodHandle_klass
()),
"must be"
);
}
// What kind of DepChange is this?
virtual
bool
is_call_site_change
()
const
{
return
true
;
}
oop
call_site
()
const
{
return
_call_site
();
}
oop
method_handle
()
const
{
return
_method_handle
();
}
};
#endif // SHARE_VM_CODE_DEPENDENCIES_HPP
src/share/vm/code/nmethod.cpp
浏览文件 @
af93316f
...
...
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/nmethod.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp"
...
...
src/share/vm/interpreter/interpreterRuntime.cpp
浏览文件 @
af93316f
...
...
@@ -509,6 +509,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// resolve field
FieldAccessInfo
info
;
constantPoolHandle
pool
(
thread
,
method
(
thread
)
->
constants
());
bool
is_put
=
(
bytecode
==
Bytecodes
::
_putfield
||
bytecode
==
Bytecodes
::
_putstatic
);
bool
is_static
=
(
bytecode
==
Bytecodes
::
_getstatic
||
bytecode
==
Bytecodes
::
_putstatic
);
{
...
...
@@ -528,8 +529,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// exceptions at the correct place. If we do not resolve completely
// in the current pass, leaving the put_code set to zero will
// cause the next put instruction to reresolve.
bool
is_put
=
(
bytecode
==
Bytecodes
::
_putfield
||
bytecode
==
Bytecodes
::
_putstatic
);
Bytecodes
::
Code
put_code
=
(
Bytecodes
::
Code
)
0
;
// We also need to delay resolving getstatic instructions until the
...
...
@@ -541,7 +540,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
!
klass
->
is_initialized
());
Bytecodes
::
Code
get_code
=
(
Bytecodes
::
Code
)
0
;
if
(
!
uninitialized_static
)
{
get_code
=
((
is_static
)
?
Bytecodes
::
_getstatic
:
Bytecodes
::
_getfield
);
if
(
is_put
||
!
info
.
access_flags
().
is_final
())
{
...
...
@@ -549,6 +547,23 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
}
}
if
(
is_put
&&
!
is_static
&&
klass
->
is_subclass_of
(
SystemDictionary
::
CallSite_klass
())
&&
(
info
.
name
()
==
vmSymbols
::
target_name
()))
{
const
jint
direction
=
frame
::
interpreter_frame_expression_stack_direction
();
oop
call_site
=
*
((
oop
*
)
thread
->
last_frame
().
interpreter_frame_tos_at
(
-
1
*
direction
));
oop
method_handle
=
*
((
oop
*
)
thread
->
last_frame
().
interpreter_frame_tos_at
(
0
*
direction
));
assert
(
call_site
->
is_a
(
SystemDictionary
::
CallSite_klass
()),
"must be"
);
assert
(
method_handle
->
is_a
(
SystemDictionary
::
MethodHandle_klass
()),
"must be"
);
{
// Walk all nmethods depending on CallSite
MutexLocker
mu
(
Compile_lock
,
thread
);
Universe
::
flush_dependents_on
(
call_site
,
method_handle
);
}
// Don't allow fast path for setting CallSite.target and sub-classes.
put_code
=
(
Bytecodes
::
Code
)
0
;
}
cache_entry
(
thread
)
->
set_field
(
get_code
,
put_code
,
...
...
src/share/vm/interpreter/templateTable.hpp
浏览文件 @
af93316f
...
...
@@ -120,8 +120,8 @@ class TemplateTable: AllStatic {
// helpers
static
void
unimplemented_bc
();
static
void
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
scratch1
,
Register
scratch2
,
bool
load_bc_in_scratch
=
true
);
static
void
patch_bytecode
(
Bytecodes
::
Code
bc
,
Register
bc_reg
,
Register
temp_reg
,
bool
load_bc_into_bc_reg
=
true
,
int
byte_no
=
-
1
);
// C calls
static
void
call_VM
(
Register
oop_result
,
address
entry_point
);
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
af93316f
...
...
@@ -1177,7 +1177,7 @@ void Universe::flush_dependents_on(instanceKlassHandle dependee) {
// stopped dring the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
DepChange
changes
(
dependee
);
Klass
DepChange
changes
(
dependee
);
// Compute the dependent nmethods
if
(
CodeCache
::
mark_for_deoptimization
(
changes
)
>
0
)
{
...
...
@@ -1187,6 +1187,37 @@ void Universe::flush_dependents_on(instanceKlassHandle dependee) {
}
}
// Flushes compiled methods dependent on a particular CallSite
// instance when its target is different than the given MethodHandle.
void
Universe
::
flush_dependents_on
(
Handle
call_site
,
Handle
method_handle
)
{
assert_lock_strong
(
Compile_lock
);
if
(
CodeCache
::
number_of_nmethods_with_dependencies
()
==
0
)
return
;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped dring the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
CallSiteDepChange
changes
(
call_site
(),
method_handle
());
// Compute the dependent nmethods that have a reference to a
// CallSite object. We use instanceKlass::mark_dependent_nmethod
// directly instead of CodeCache::mark_for_deoptimization because we
// want dependents on the class CallSite only not all classes in the
// ContextStream.
int
marked
=
0
;
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
instanceKlass
*
call_site_klass
=
instanceKlass
::
cast
(
SystemDictionary
::
CallSite_klass
());
marked
=
call_site_klass
->
mark_dependent_nmethods
(
changes
);
}
if
(
marked
>
0
)
{
// At least one nmethod has been marked for deoptimization
VM_Deoptimize
op
;
VMThread
::
execute
(
&
op
);
}
}
#ifdef HOTSWAP
// Flushes compiled methods dependent on dependee in the evolutionary sense
void
Universe
::
flush_evol_dependents_on
(
instanceKlassHandle
ev_k_h
)
{
...
...
src/share/vm/memory/universe.hpp
浏览文件 @
af93316f
...
...
@@ -439,6 +439,7 @@ class Universe: AllStatic {
// Flushing and deoptimization
static
void
flush_dependents_on
(
instanceKlassHandle
dependee
);
static
void
flush_dependents_on
(
Handle
call_site
,
Handle
method_handle
);
#ifdef HOTSWAP
// Flushing and deoptimization in case of evolution
static
void
flush_evol_dependents_on
(
instanceKlassHandle
dependee
);
...
...
src/share/vm/oops/instanceKlass.cpp
浏览文件 @
af93316f
...
...
@@ -1406,7 +1406,7 @@ class nmethodBucket {
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the
klassOop that was
passed in and mark them for
// are dependent on the
changes that were
passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int
instanceKlass
::
mark_dependent_nmethods
(
DepChange
&
changes
)
{
...
...
src/share/vm/opto/callGenerator.cpp
浏览文件 @
af93316f
...
...
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciCPCache.hpp"
#include "ci/ciMethodHandle.hpp"
#include "classfile/javaClasses.hpp"
...
...
@@ -738,6 +739,34 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
}
CallGenerator
*
CallGenerator
::
for_invokedynamic_inline
(
ciCallSite
*
call_site
,
JVMState
*
jvms
,
ciMethod
*
caller
,
ciMethod
*
callee
,
ciCallProfile
profile
)
{
assert
(
call_site
->
is_constant_call_site
()
||
call_site
->
is_mutable_call_site
(),
"must be"
);
ciMethodHandle
*
method_handle
=
call_site
->
get_target
();
// Set the callee to have access to the class and signature in the
// MethodHandleCompiler.
method_handle
->
set_callee
(
callee
);
method_handle
->
set_caller
(
caller
);
method_handle
->
set_call_profile
(
profile
);
// Get an adapter for the MethodHandle.
ciMethod
*
target_method
=
method_handle
->
get_invokedynamic_adapter
();
if
(
target_method
!=
NULL
)
{
Compile
*
C
=
Compile
::
current
();
CallGenerator
*
hit_cg
=
C
->
call_generator
(
target_method
,
-
1
,
false
,
jvms
,
true
,
PROB_ALWAYS
);
if
(
hit_cg
!=
NULL
&&
hit_cg
->
is_inline
())
{
// Add a dependence for invalidation of the optimization.
if
(
call_site
->
is_mutable_call_site
())
{
C
->
dependencies
()
->
assert_call_site_target_value
(
C
->
env
()
->
CallSite_klass
(),
call_site
,
method_handle
);
}
return
hit_cg
;
}
}
return
NULL
;
}
JVMState
*
PredictedDynamicCallGenerator
::
generate
(
JVMState
*
jvms
)
{
GraphKit
kit
(
jvms
);
PhaseGVN
&
gvn
=
kit
.
gvn
();
...
...
src/share/vm/opto/callGenerator.hpp
浏览文件 @
af93316f
/*
* Copyright (c) 2000, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -111,7 +111,8 @@ class CallGenerator : public ResourceObj {
static
CallGenerator
*
for_dynamic_call
(
ciMethod
*
m
);
// invokedynamic
static
CallGenerator
*
for_virtual_call
(
ciMethod
*
m
,
int
vtable_index
);
// virtual, interface
static
CallGenerator
*
for_method_handle_inline
(
Node
*
method_handle
,
JVMState
*
jvms
,
ciMethod
*
caller
,
ciMethod
*
callee
,
ciCallProfile
profile
);
static
CallGenerator
*
for_method_handle_inline
(
Node
*
method_handle
,
JVMState
*
jvms
,
ciMethod
*
caller
,
ciMethod
*
callee
,
ciCallProfile
profile
);
static
CallGenerator
*
for_invokedynamic_inline
(
ciCallSite
*
call_site
,
JVMState
*
jvms
,
ciMethod
*
caller
,
ciMethod
*
callee
,
ciCallProfile
profile
);
// How to generate a replace a direct call with an inline version
static
CallGenerator
*
for_late_inline
(
ciMethod
*
m
,
CallGenerator
*
inline_cg
);
...
...
src/share/vm/opto/doCall.cpp
浏览文件 @
af93316f
...
...
@@ -114,7 +114,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
if
(
cg
!=
NULL
)
return
cg
;
}
// Do
MethodH
andle calls.
// Do
method h
andle calls.
// NOTE: This must happen before normal inlining logic below since
// MethodHandle.invoke* are native methods which obviously don't
// have bytecodes and so normal inlining fails.
...
...
@@ -127,33 +127,25 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
if
(
cg
!=
NULL
)
{
return
cg
;
}
return
CallGenerator
::
for_direct_call
(
call_method
);
}
else
{
// Get the
MethodHandle from the CallSite
.
// Get the
CallSite object
.
ciMethod
*
caller_method
=
jvms
->
method
();
ciBytecodeStream
str
(
caller_method
);
str
.
force_bci
(
jvms
->
bci
());
// Set the stream to the invokedynamic bci.
ciCallSite
*
call_site
=
str
.
get_call_site
();
ciMethodHandle
*
method_handle
=
call_site
->
get_target
();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
method_handle
->
set_callee
(
call_method
);
method_handle
->
set_caller
(
caller
);
method_handle
->
set_call_profile
(
profile
);
// Get an adapter for the MethodHandle.
ciMethod
*
target_method
=
method_handle
->
get_invokedynamic_adapter
();
if
(
target_method
!=
NULL
)
{
CallGenerator
*
hit_cg
=
this
->
call_generator
(
target_method
,
vtable_index
,
false
,
jvms
,
true
,
prof_factor
);
if
(
hit_cg
!=
NULL
&&
hit_cg
->
is_inline
())
{
CallGenerator
*
miss_cg
=
CallGenerator
::
for_dynamic_call
(
call_method
);
return
CallGenerator
::
for_predicted_dynamic_call
(
method_handle
,
miss_cg
,
hit_cg
,
prof_factor
);
ciCallSite
*
call_site
=
str
.
get_call_site
();
// Inline constant and mutable call sites. We don't inline
// volatile call sites optimistically since they are specified
// to change their value often and that would result in a lot of
// deoptimizations and recompiles.
if
(
call_site
->
is_constant_call_site
()
||
call_site
->
is_mutable_call_site
())
{
CallGenerator
*
cg
=
CallGenerator
::
for_invokedynamic_inline
(
call_site
,
jvms
,
caller
,
call_method
,
profile
);
if
(
cg
!=
NULL
)
{
return
cg
;
}
}
// If something failed, generate a normal dynamic call.
return
CallGenerator
::
for_dynamic_call
(
call_method
);
}
...
...
src/share/vm/opto/parse3.cpp
浏览文件 @
af93316f
...
...
@@ -100,6 +100,14 @@ void Parse::do_field_access(bool is_get, bool is_field) {
}
}
// Deoptimize on putfield writes to CallSite.target
if
(
!
is_get
&&
field
->
is_call_site_target
())
{
uncommon_trap
(
Deoptimization
::
Reason_unhandled
,
Deoptimization
::
Action_reinterpret
,
NULL
,
"put to CallSite.target field"
);
return
;
}
assert
(
field
->
will_link
(
method
()
->
holder
(),
bc
()),
"getfield: typeflow responsibility"
);
// Note: We do not check for an unloaded field type here any more.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录