Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
6b2e7283
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6b2e7283
编写于
10月 08, 2014
作者:
A
asaha
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
22a99b3e
e7245a36
变更
36
隐藏空白更改
内联
并排
Showing
36 changed file
with
616 addition
and
349 deletion
+616
-349
.hgtags
.hgtags
+2
-0
make/aix/makefiles/fastdebug.make
make/aix/makefiles/fastdebug.make
+0
-1
make/hotspot_version
make/hotspot_version
+1
-1
src/cpu/ppc/vm/assembler_ppc.hpp
src/cpu/ppc/vm/assembler_ppc.hpp
+112
-5
src/cpu/ppc/vm/assembler_ppc.inline.hpp
src/cpu/ppc/vm/assembler_ppc.inline.hpp
+52
-0
src/cpu/ppc/vm/globalDefinitions_ppc.hpp
src/cpu/ppc/vm/globalDefinitions_ppc.hpp
+2
-0
src/cpu/ppc/vm/interp_masm_ppc_64.cpp
src/cpu/ppc/vm/interp_masm_ppc_64.cpp
+0
-1
src/cpu/ppc/vm/interpreter_ppc.cpp
src/cpu/ppc/vm/interpreter_ppc.cpp
+0
-1
src/cpu/ppc/vm/macroAssembler_ppc.cpp
src/cpu/ppc/vm/macroAssembler_ppc.cpp
+6
-8
src/cpu/ppc/vm/ppc.ad
src/cpu/ppc/vm/ppc.ad
+33
-25
src/cpu/ppc/vm/stubGenerator_ppc.cpp
src/cpu/ppc/vm/stubGenerator_ppc.cpp
+1
-5
src/cpu/ppc/vm/templateTable_ppc_64.cpp
src/cpu/ppc/vm/templateTable_ppc_64.cpp
+0
-4
src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp
src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp
+1
-1
src/share/vm/classfile/javaClasses.cpp
src/share/vm/classfile/javaClasses.cpp
+1
-0
src/share/vm/classfile/systemDictionary.cpp
src/share/vm/classfile/systemDictionary.cpp
+8
-6
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+1
-3
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+11
-17
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
+9
-16
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+6
-10
src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
+11
-16
src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
...e/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
+0
-14
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+6
-86
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+0
-14
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+0
-9
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+0
-3
src/share/vm/interpreter/oopMapCache.cpp
src/share/vm/interpreter/oopMapCache.cpp
+5
-5
src/share/vm/interpreter/oopMapCache.hpp
src/share/vm/interpreter/oopMapCache.hpp
+16
-16
src/share/vm/oops/arrayKlass.cpp
src/share/vm/oops/arrayKlass.cpp
+1
-1
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+6
-0
src/share/vm/runtime/deoptimization.hpp
src/share/vm/runtime/deoptimization.hpp
+1
-1
src/share/vm/runtime/vframe.cpp
src/share/vm/runtime/vframe.cpp
+125
-74
src/share/vm/runtime/vframe.hpp
src/share/vm/runtime/vframe.hpp
+1
-1
src/share/vm/services/mallocTracker.cpp
src/share/vm/services/mallocTracker.cpp
+2
-5
test/gc/arguments/TestG1ConcRefinementThreads.java
test/gc/arguments/TestG1ConcRefinementThreads.java
+97
-0
test/runtime/LoadClass/ShowClassLoader.java
test/runtime/LoadClass/ShowClassLoader.java
+45
-0
test/runtime/NMT/UnsafeMallocLimit2.java
test/runtime/NMT/UnsafeMallocLimit2.java
+54
-0
未找到文件。
.hgtags
浏览文件 @
6b2e7283
...
@@ -522,6 +522,8 @@ c9635cad4a5d794a96b4a26d3e7ad1d783133add hs25.40-b09
...
@@ -522,6 +522,8 @@ c9635cad4a5d794a96b4a26d3e7ad1d783133add hs25.40-b09
b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
5c1b5be2c69bcae610a790e9438da446c61d3361 hs25.40-b12
5c1b5be2c69bcae610a790e9438da446c61d3361 hs25.40-b12
905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08
905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08
d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13
7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09
a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
...
...
make/aix/makefiles/fastdebug.make
浏览文件 @
6b2e7283
...
@@ -67,7 +67,6 @@ MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
...
@@ -67,7 +67,6 @@ MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
# not justified.
# not justified.
LFLAGS_QIPA
=
LFLAGS_QIPA
=
G_SUFFIX
=
_g
VERSION
=
optimized
VERSION
=
optimized
SYSDEFS
+=
-DASSERT
-DFASTDEBUG
SYSDEFS
+=
-DASSERT
-DFASTDEBUG
PICFLAGS
=
DEFAULT
PICFLAGS
=
DEFAULT
make/hotspot_version
浏览文件 @
6b2e7283
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_MINOR_VER=40
HS_BUILD_NUMBER=1
2
HS_BUILD_NUMBER=1
3
JDK_MAJOR_VER=1
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
JDK_MINOR_VER=8
...
...
src/cpu/ppc/vm/assembler_ppc.hpp
浏览文件 @
6b2e7283
...
@@ -268,8 +268,35 @@ class Assembler : public AbstractAssembler {
...
@@ -268,8 +268,35 @@ class Assembler : public AbstractAssembler {
ISEL_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
15u
<<
1
),
ISEL_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
15u
<<
1
),
MTLR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
467u
<<
1
|
8
<<
SPR_0_4_SHIFT
),
// Special purpose registers
MFLR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
339u
<<
1
|
8
<<
SPR_0_4_SHIFT
),
MTSPR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
467u
<<
1
),
MFSPR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
339u
<<
1
),
MTXER_OPCODE
=
(
MTSPR_OPCODE
|
1
<<
SPR_0_4_SHIFT
),
MFXER_OPCODE
=
(
MFSPR_OPCODE
|
1
<<
SPR_0_4_SHIFT
),
MTDSCR_OPCODE
=
(
MTSPR_OPCODE
|
3
<<
SPR_0_4_SHIFT
),
MFDSCR_OPCODE
=
(
MFSPR_OPCODE
|
3
<<
SPR_0_4_SHIFT
),
MTLR_OPCODE
=
(
MTSPR_OPCODE
|
8
<<
SPR_0_4_SHIFT
),
MFLR_OPCODE
=
(
MFSPR_OPCODE
|
8
<<
SPR_0_4_SHIFT
),
MTCTR_OPCODE
=
(
MTSPR_OPCODE
|
9
<<
SPR_0_4_SHIFT
),
MFCTR_OPCODE
=
(
MFSPR_OPCODE
|
9
<<
SPR_0_4_SHIFT
),
MTTFHAR_OPCODE
=
(
MTSPR_OPCODE
|
128
<<
SPR_0_4_SHIFT
),
MFTFHAR_OPCODE
=
(
MFSPR_OPCODE
|
128
<<
SPR_0_4_SHIFT
),
MTTFIAR_OPCODE
=
(
MTSPR_OPCODE
|
129
<<
SPR_0_4_SHIFT
),
MFTFIAR_OPCODE
=
(
MFSPR_OPCODE
|
129
<<
SPR_0_4_SHIFT
),
MTTEXASR_OPCODE
=
(
MTSPR_OPCODE
|
130
<<
SPR_0_4_SHIFT
),
MFTEXASR_OPCODE
=
(
MFSPR_OPCODE
|
130
<<
SPR_0_4_SHIFT
),
MTTEXASRU_OPCODE
=
(
MTSPR_OPCODE
|
131
<<
SPR_0_4_SHIFT
),
MFTEXASRU_OPCODE
=
(
MFSPR_OPCODE
|
131
<<
SPR_0_4_SHIFT
),
MTVRSAVE_OPCODE
=
(
MTSPR_OPCODE
|
256
<<
SPR_0_4_SHIFT
),
MFVRSAVE_OPCODE
=
(
MFSPR_OPCODE
|
256
<<
SPR_0_4_SHIFT
),
MFTB_OPCODE
=
(
MFSPR_OPCODE
|
268
<<
SPR_0_4_SHIFT
),
MTCRF_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
144u
<<
1
),
MTCRF_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
144u
<<
1
),
MFCR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
19u
<<
1
),
MFCR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
19u
<<
1
),
...
@@ -291,9 +318,6 @@ class Assembler : public AbstractAssembler {
...
@@ -291,9 +318,6 @@ class Assembler : public AbstractAssembler {
// CTR-related opcodes
// CTR-related opcodes
BCCTR_OPCODE
=
(
19u
<<
OPCODE_SHIFT
|
528u
<<
1
),
BCCTR_OPCODE
=
(
19u
<<
OPCODE_SHIFT
|
528u
<<
1
),
MTCTR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
467u
<<
1
|
9
<<
SPR_0_4_SHIFT
),
MFCTR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
339u
<<
1
|
9
<<
SPR_0_4_SHIFT
),
LWZ_OPCODE
=
(
32u
<<
OPCODE_SHIFT
),
LWZ_OPCODE
=
(
32u
<<
OPCODE_SHIFT
),
LWZX_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
23u
<<
1
),
LWZX_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
23u
<<
1
),
...
@@ -585,6 +609,37 @@ class Assembler : public AbstractAssembler {
...
@@ -585,6 +609,37 @@ class Assembler : public AbstractAssembler {
MTVSCR_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1604u
),
MTVSCR_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1604u
),
MFVSCR_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1540u
),
MFVSCR_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1540u
),
// AES (introduced with Power 8)
VCIPHER_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1288u
),
VCIPHERLAST_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1289u
),
VNCIPHER_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1352u
),
VNCIPHERLAST_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1353u
),
VSBOX_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1480u
),
// SHA (introduced with Power 8)
VSHASIGMAD_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1730u
),
VSHASIGMAW_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1666u
),
// Vector Binary Polynomial Multiplication (introduced with Power 8)
VPMSUMB_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1032u
),
VPMSUMD_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1224u
),
VPMSUMH_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1096u
),
VPMSUMW_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
1160u
),
// Vector Permute and Xor (introduced with Power 8)
VPERMXOR_OPCODE
=
(
4u
<<
OPCODE_SHIFT
|
45u
),
// Transactional Memory instructions (introduced with Power 8)
TBEGIN_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
654u
<<
1
),
TEND_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
686u
<<
1
),
TABORT_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
910u
<<
1
),
TABORTWC_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
782u
<<
1
),
TABORTWCI_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
846u
<<
1
),
TABORTDC_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
814u
<<
1
),
TABORTDCI_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
878u
<<
1
),
TSR_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
750u
<<
1
),
TCHECK_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
718u
<<
1
),
// Icache and dcache related instructions
// Icache and dcache related instructions
DCBA_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
758u
<<
1
),
DCBA_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
758u
<<
1
),
DCBZ_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
1014u
<<
1
),
DCBZ_OPCODE
=
(
31u
<<
OPCODE_SHIFT
|
1014u
<<
1
),
...
@@ -1420,6 +1475,25 @@ class Assembler : public AbstractAssembler {
...
@@ -1420,6 +1475,25 @@ class Assembler : public AbstractAssembler {
inline
void
mcrf
(
ConditionRegister
crd
,
ConditionRegister
cra
);
inline
void
mcrf
(
ConditionRegister
crd
,
ConditionRegister
cra
);
inline
void
mtcr
(
Register
s
);
inline
void
mtcr
(
Register
s
);
// Special purpose registers
// Exception Register
inline
void
mtxer
(
Register
s1
);
inline
void
mfxer
(
Register
d
);
// Vector Register Save Register
inline
void
mtvrsave
(
Register
s1
);
inline
void
mfvrsave
(
Register
d
);
// Timebase
inline
void
mftb
(
Register
d
);
// Introduced with Power 8:
// Data Stream Control Register
inline
void
mtdscr
(
Register
s1
);
inline
void
mfdscr
(
Register
d
);
// Transactional Memory Registers
inline
void
mftfhar
(
Register
d
);
inline
void
mftfiar
(
Register
d
);
inline
void
mftexasr
(
Register
d
);
inline
void
mftexasru
(
Register
d
);
// PPC 1, section 2.4.1 Branch Instructions
// PPC 1, section 2.4.1 Branch Instructions
inline
void
b
(
address
a
,
relocInfo
::
relocType
rt
=
relocInfo
::
none
);
inline
void
b
(
address
a
,
relocInfo
::
relocType
rt
=
relocInfo
::
none
);
inline
void
b
(
Label
&
L
);
inline
void
b
(
Label
&
L
);
...
@@ -1860,6 +1934,39 @@ class Assembler : public AbstractAssembler {
...
@@ -1860,6 +1934,39 @@ class Assembler : public AbstractAssembler {
inline
void
mtvscr
(
VectorRegister
b
);
inline
void
mtvscr
(
VectorRegister
b
);
inline
void
mfvscr
(
VectorRegister
d
);
inline
void
mfvscr
(
VectorRegister
d
);
// AES (introduced with Power 8)
inline
void
vcipher
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vcipherlast
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vncipher
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vncipherlast
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vsbox
(
VectorRegister
d
,
VectorRegister
a
);
// SHA (introduced with Power 8)
// Not yet implemented.
// Vector Binary Polynomial Multiplication (introduced with Power 8)
inline
void
vpmsumb
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vpmsumd
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vpmsumh
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
inline
void
vpmsumw
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
);
// Vector Permute and Xor (introduced with Power 8)
inline
void
vpermxor
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
,
VectorRegister
c
);
// Transactional Memory instructions (introduced with Power 8)
inline
void
tbegin_
();
// R=0
inline
void
tbeginrot_
();
// R=1 Rollback-Only Transaction
inline
void
tend_
();
// A=0
inline
void
tendall_
();
// A=1
inline
void
tabort_
(
Register
a
);
inline
void
tabortwc_
(
int
t
,
Register
a
,
Register
b
);
inline
void
tabortwci_
(
int
t
,
Register
a
,
int
si
);
inline
void
tabortdc_
(
int
t
,
Register
a
,
Register
b
);
inline
void
tabortdci_
(
int
t
,
Register
a
,
int
si
);
inline
void
tsuspend_
();
// tsr with L=0
inline
void
tresume_
();
// tsr with L=1
inline
void
tcheck
(
int
f
);
// The following encoders use r0 as second operand. These instructions
// The following encoders use r0 as second operand. These instructions
// read r0 as '0'.
// read r0 as '0'.
inline
void
lwzx
(
Register
d
,
Register
s2
);
inline
void
lwzx
(
Register
d
,
Register
s2
);
...
...
src/cpu/ppc/vm/assembler_ppc.inline.hpp
浏览文件 @
6b2e7283
...
@@ -312,6 +312,25 @@ inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
...
@@ -312,6 +312,25 @@ inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
{
emit_int32
(
MCRF_OPCODE
|
bf
(
crd
)
|
bfa
(
cra
));
}
{
emit_int32
(
MCRF_OPCODE
|
bf
(
crd
)
|
bfa
(
cra
));
}
inline
void
Assembler
::
mtcr
(
Register
s
)
{
Assembler
::
mtcrf
(
0xff
,
s
);
}
inline
void
Assembler
::
mtcr
(
Register
s
)
{
Assembler
::
mtcrf
(
0xff
,
s
);
}
// Special purpose registers
// Exception Register
inline
void
Assembler
::
mtxer
(
Register
s1
)
{
emit_int32
(
MTXER_OPCODE
|
rs
(
s1
));
}
inline
void
Assembler
::
mfxer
(
Register
d
)
{
emit_int32
(
MFXER_OPCODE
|
rt
(
d
));
}
// Vector Register Save Register
inline
void
Assembler
::
mtvrsave
(
Register
s1
)
{
emit_int32
(
MTVRSAVE_OPCODE
|
rs
(
s1
));
}
inline
void
Assembler
::
mfvrsave
(
Register
d
)
{
emit_int32
(
MFVRSAVE_OPCODE
|
rt
(
d
));
}
// Timebase
inline
void
Assembler
::
mftb
(
Register
d
)
{
emit_int32
(
MFTB_OPCODE
|
rt
(
d
));
}
// Introduced with Power 8:
// Data Stream Control Register
inline
void
Assembler
::
mtdscr
(
Register
s1
)
{
emit_int32
(
MTDSCR_OPCODE
|
rs
(
s1
));
}
inline
void
Assembler
::
mfdscr
(
Register
d
)
{
emit_int32
(
MFDSCR_OPCODE
|
rt
(
d
));
}
// Transactional Memory Registers
inline
void
Assembler
::
mftfhar
(
Register
d
)
{
emit_int32
(
MFTFHAR_OPCODE
|
rt
(
d
));
}
inline
void
Assembler
::
mftfiar
(
Register
d
)
{
emit_int32
(
MFTFIAR_OPCODE
|
rt
(
d
));
}
inline
void
Assembler
::
mftexasr
(
Register
d
)
{
emit_int32
(
MFTEXASR_OPCODE
|
rt
(
d
));
}
inline
void
Assembler
::
mftexasru
(
Register
d
)
{
emit_int32
(
MFTEXASRU_OPCODE
|
rt
(
d
));
}
// SAP JVM 2006-02-13 PPC branch instruction.
// SAP JVM 2006-02-13 PPC branch instruction.
// PPC 1, section 2.4.1 Branch Instructions
// PPC 1, section 2.4.1 Branch Instructions
inline
void
Assembler
::
b
(
address
a
,
relocInfo
::
relocType
rt
)
{
emit_data
(
BXX_OPCODE
|
li
(
disp
(
intptr_t
(
a
),
intptr_t
(
pc
())))
|
aa
(
0
)
|
lk
(
0
),
rt
);
}
inline
void
Assembler
::
b
(
address
a
,
relocInfo
::
relocType
rt
)
{
emit_data
(
BXX_OPCODE
|
li
(
disp
(
intptr_t
(
a
),
intptr_t
(
pc
())))
|
aa
(
0
)
|
lk
(
0
),
rt
);
}
...
@@ -735,6 +754,39 @@ inline void Assembler::vsrah( VectorRegister d, VectorRegister a, VectorRegist
...
@@ -735,6 +754,39 @@ inline void Assembler::vsrah( VectorRegister d, VectorRegister a, VectorRegist
inline
void
Assembler
::
mtvscr
(
VectorRegister
b
)
{
emit_int32
(
MTVSCR_OPCODE
|
vrb
(
b
));
}
inline
void
Assembler
::
mtvscr
(
VectorRegister
b
)
{
emit_int32
(
MTVSCR_OPCODE
|
vrb
(
b
));
}
inline
void
Assembler
::
mfvscr
(
VectorRegister
d
)
{
emit_int32
(
MFVSCR_OPCODE
|
vrt
(
d
));
}
inline
void
Assembler
::
mfvscr
(
VectorRegister
d
)
{
emit_int32
(
MFVSCR_OPCODE
|
vrt
(
d
));
}
// AES (introduced with Power 8)
inline
void
Assembler
::
vcipher
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VCIPHER_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vcipherlast
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VCIPHERLAST_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vncipher
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VNCIPHER_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vncipherlast
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VNCIPHERLAST_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vsbox
(
VectorRegister
d
,
VectorRegister
a
)
{
emit_int32
(
VSBOX_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
);
}
// SHA (introduced with Power 8)
// Not yet implemented.
// Vector Binary Polynomial Multiplication (introduced with Power 8)
inline
void
Assembler
::
vpmsumb
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VPMSUMB_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vpmsumd
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VPMSUMD_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vpmsumh
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VPMSUMH_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
inline
void
Assembler
::
vpmsumw
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
)
{
emit_int32
(
VPMSUMW_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
));
}
// Vector Permute and Xor (introduced with Power 8)
inline
void
Assembler
::
vpermxor
(
VectorRegister
d
,
VectorRegister
a
,
VectorRegister
b
,
VectorRegister
c
)
{
emit_int32
(
VPMSUMW_OPCODE
|
vrt
(
d
)
|
vra
(
a
)
|
vrb
(
b
)
|
vrc
(
c
));
}
// Transactional Memory instructions (introduced with Power 8)
inline
void
Assembler
::
tbegin_
()
{
emit_int32
(
TBEGIN_OPCODE
|
rc
(
1
));
}
inline
void
Assembler
::
tbeginrot_
()
{
emit_int32
(
TBEGIN_OPCODE
|
/*R=1*/
1u
<<
(
31
-
10
)
|
rc
(
1
));
}
inline
void
Assembler
::
tend_
()
{
emit_int32
(
TEND_OPCODE
|
rc
(
1
));
}
inline
void
Assembler
::
tendall_
()
{
emit_int32
(
TEND_OPCODE
|
/*A=1*/
1u
<<
(
31
-
6
)
|
rc
(
1
));
}
inline
void
Assembler
::
tabort_
(
Register
a
)
{
emit_int32
(
TABORT_OPCODE
|
ra
(
a
)
|
rc
(
1
));
}
inline
void
Assembler
::
tabortwc_
(
int
t
,
Register
a
,
Register
b
)
{
emit_int32
(
TABORTWC_OPCODE
|
to
(
t
)
|
ra
(
a
)
|
rb
(
b
)
|
rc
(
1
));
}
inline
void
Assembler
::
tabortwci_
(
int
t
,
Register
a
,
int
si
)
{
emit_int32
(
TABORTWCI_OPCODE
|
to
(
t
)
|
ra
(
a
)
|
sh1620
(
si
)
|
rc
(
1
));
}
inline
void
Assembler
::
tabortdc_
(
int
t
,
Register
a
,
Register
b
)
{
emit_int32
(
TABORTDC_OPCODE
|
to
(
t
)
|
ra
(
a
)
|
rb
(
b
)
|
rc
(
1
));
}
inline
void
Assembler
::
tabortdci_
(
int
t
,
Register
a
,
int
si
)
{
emit_int32
(
TABORTDCI_OPCODE
|
to
(
t
)
|
ra
(
a
)
|
sh1620
(
si
)
|
rc
(
1
));
}
inline
void
Assembler
::
tsuspend_
()
{
emit_int32
(
TSR_OPCODE
|
rc
(
1
));
}
inline
void
Assembler
::
tresume_
()
{
emit_int32
(
TSR_OPCODE
|
/*L=1*/
1u
<<
(
31
-
10
)
|
rc
(
1
));
}
inline
void
Assembler
::
tcheck
(
int
f
)
{
emit_int32
(
TCHECK_OPCODE
|
bf
(
f
));
}
// ra0 version
// ra0 version
inline
void
Assembler
::
lwzx
(
Register
d
,
Register
s2
)
{
emit_int32
(
LWZX_OPCODE
|
rt
(
d
)
|
rb
(
s2
));}
inline
void
Assembler
::
lwzx
(
Register
d
,
Register
s2
)
{
emit_int32
(
LWZX_OPCODE
|
rt
(
d
)
|
rb
(
s2
));}
inline
void
Assembler
::
lwz
(
Register
d
,
int
si16
)
{
emit_int32
(
LWZ_OPCODE
|
rt
(
d
)
|
d1
(
si16
));}
inline
void
Assembler
::
lwz
(
Register
d
,
int
si16
)
{
emit_int32
(
LWZ_OPCODE
|
rt
(
d
)
|
d1
(
si16
));}
...
...
src/cpu/ppc/vm/globalDefinitions_ppc.hpp
浏览文件 @
6b2e7283
...
@@ -37,6 +37,8 @@ const int StackAlignmentInBytes = 16;
...
@@ -37,6 +37,8 @@ const int StackAlignmentInBytes = 16;
// signatures accordingly.
// signatures accordingly.
const
bool
CCallingConventionRequiresIntsAsLongs
=
true
;
const
bool
CCallingConventionRequiresIntsAsLongs
=
true
;
#define SUPPORTS_NATIVE_CX8
// The PPC CPUs are NOT multiple-copy-atomic.
// The PPC CPUs are NOT multiple-copy-atomic.
#define CPU_NOT_MULTIPLE_COPY_ATOMIC
#define CPU_NOT_MULTIPLE_COPY_ATOMIC
...
...
src/cpu/ppc/vm/interp_masm_ppc_64.cpp
浏览文件 @
6b2e7283
...
@@ -25,7 +25,6 @@
...
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interpreterRuntime.hpp"
...
...
src/cpu/ppc/vm/interpreter_ppc.cpp
浏览文件 @
6b2e7283
...
@@ -24,7 +24,6 @@
...
@@ -24,7 +24,6 @@
*/
*/
#include "precompiled.hpp"
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreter.hpp"
...
...
src/cpu/ppc/vm/macroAssembler_ppc.cpp
浏览文件 @
6b2e7283
...
@@ -2365,7 +2365,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
...
@@ -2365,7 +2365,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
#endif // INCLUDE_ALL_GCS
#endif // INCLUDE_ALL_GCS
// Values for last_Java_pc, and last_Java_sp must comply to the rules
// Values for last_Java_pc, and last_Java_sp must comply to the rules
// in frame_ppc
64
.hpp.
// in frame_ppc.hpp.
void
MacroAssembler
::
set_last_Java_frame
(
Register
last_Java_sp
,
Register
last_Java_pc
)
{
void
MacroAssembler
::
set_last_Java_frame
(
Register
last_Java_sp
,
Register
last_Java_pc
)
{
// Always set last_Java_pc and flags first because once last_Java_sp
// Always set last_Java_pc and flags first because once last_Java_sp
// is visible has_last_Java_frame is true and users will look at the
// is visible has_last_Java_frame is true and users will look at the
...
@@ -2492,6 +2492,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
...
@@ -2492,6 +2492,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
}
}
void
MacroAssembler
::
decode_klass_not_null
(
Register
dst
,
Register
src
)
{
void
MacroAssembler
::
decode_klass_not_null
(
Register
dst
,
Register
src
)
{
assert
(
dst
!=
R0
,
"Dst reg may not be R0, as R0 is used here."
);
if
(
src
==
noreg
)
src
=
dst
;
if
(
src
==
noreg
)
src
=
dst
;
Register
shifted_src
=
src
;
Register
shifted_src
=
src
;
if
(
Universe
::
narrow_klass_shift
()
!=
0
||
if
(
Universe
::
narrow_klass_shift
()
!=
0
||
...
@@ -2526,14 +2527,11 @@ void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src)
...
@@ -2526,14 +2527,11 @@ void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src)
void
MacroAssembler
::
reinit_heapbase
(
Register
d
,
Register
tmp
)
{
void
MacroAssembler
::
reinit_heapbase
(
Register
d
,
Register
tmp
)
{
if
(
Universe
::
heap
()
!=
NULL
)
{
if
(
Universe
::
heap
()
!=
NULL
)
{
if
(
Universe
::
narrow_oop_base
()
==
NULL
)
{
load_const_optimized
(
R30
,
Universe
::
narrow_ptrs_base
(),
tmp
);
Assembler
::
xorr
(
R30
,
R30
,
R30
);
}
else
{
load_const
(
R30
,
Universe
::
narrow_ptrs_base
(),
tmp
);
}
}
else
{
}
else
{
load_const
(
R30
,
Universe
::
narrow_ptrs_base_addr
(),
tmp
);
// Heap not yet allocated. Load indirectly.
ld
(
R30
,
0
,
R30
);
int
simm16_offset
=
load_const_optimized
(
R30
,
Universe
::
narrow_ptrs_base_addr
(),
tmp
,
true
);
ld
(
R30
,
simm16_offset
,
R30
);
}
}
}
}
...
...
src/cpu/ppc/vm/ppc.ad
浏览文件 @
6b2e7283
...
@@ -1249,6 +1249,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
...
@@ -1249,6 +1249,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
// Emit the trampoline stub which will be related to the branch-and-link below.
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype);
__ relocate(rtype);
}
}
...
@@ -1412,7 +1413,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
...
@@ -1412,7 +1413,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
while (bang_offset <= bang_end) {
while (bang_offset <= bang_end) {
// Need at least one stack bang at end of shadow zone.
// Need at least one stack bang at end of shadow zone.
// Again I had to copy code, this time from assembler_ppc
64
.cpp,
// Again I had to copy code, this time from assembler_ppc.cpp,
// bang_stack_with_offset - see there for comments.
// bang_stack_with_offset - see there for comments.
// Stack grows down, caller passes positive offset.
// Stack grows down, caller passes positive offset.
...
@@ -2002,7 +2003,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
...
@@ -2002,7 +2003,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Inline_cache contains a klass.
// Inline_cache contains a klass.
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
Register receiver_klass = R
0
; // tmp
Register receiver_klass = R
12_scratch2
; // tmp
assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
assert(R11_scratch1 == R11, "need prologue scratch register");
assert(R11_scratch1 == R11, "need prologue scratch register");
...
@@ -3486,6 +3487,7 @@ encode %{
...
@@ -3486,6 +3487,7 @@ encode %{
// Emit the trampoline stub which will be related to the branch-and-link below.
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
if (Compile::current()->env()->failing()) { return; } // Code cache may be full.
__ relocate(_optimized_virtual ?
__ relocate(_optimized_virtual ?
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
}
}
...
@@ -3529,6 +3531,7 @@ encode %{
...
@@ -3529,6 +3531,7 @@ encode %{
// Emit the trampoline stub which will be related to the branch-and-link below.
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
assert(_optimized_virtual, "methodHandle call should be a virtual call");
assert(_optimized_virtual, "methodHandle call should be a virtual call");
__ relocate(relocInfo::opt_virtual_call_type);
__ relocate(relocInfo::opt_virtual_call_type);
}
}
...
@@ -3579,9 +3582,7 @@ encode %{
...
@@ -3579,9 +3582,7 @@ encode %{
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
if (ra_->C->env()->failing())
return;
// Build relocation at call site with ic position as data.
// Build relocation at call site with ic position as data.
assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
...
@@ -5640,19 +5641,6 @@ instruct loadNKlass(iRegNdst dst, memory mem) %{
...
@@ -5640,19 +5641,6 @@ instruct loadNKlass(iRegNdst dst, memory mem) %{
ins_pipe(pipe_class_memory);
ins_pipe(pipe_class_memory);
%}
%}
//// Load compressed klass and decode it if narrow_klass_shift == 0.
//// TODO: will narrow_klass_shift ever be 0?
//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
// match(Set dst (DecodeNKlass (LoadNKlass mem)));
// predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
// ins_cost(MEMORY_REF_COST);
//
// format %{ "LWZ $dst, $mem \t// DecodeNKlass (unscaled)" %}
// size(4);
// ins_encode( enc_lwz(dst, mem) );
// ins_pipe(pipe_class_memory);
//%}
// Load Klass Pointer
// Load Klass Pointer
instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
match(Set dst (LoadKlass mem));
match(Set dst (LoadKlass mem));
...
@@ -6072,11 +6060,15 @@ instruct loadConN_Ex(iRegNdst dst, immN src) %{
...
@@ -6072,11 +6060,15 @@ instruct loadConN_Ex(iRegNdst dst, immN src) %{
%}
%}
%}
%}
instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
// We have seen a safepoint between the hi and lo parts, and this node was handled
// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
// not a narrow oop.
instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
match(Set dst src);
effect(DEF dst, USE src);
effect(DEF dst, USE src);
ins_cost(DEFAULT_COST);
ins_cost(DEFAULT_COST);
format %{ "LIS $dst, $src \t// narrow
oop
hi" %}
format %{ "LIS $dst, $src \t// narrow
klass
hi" %}
size(4);
size(4);
ins_encode %{
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_addis);
// TODO: PPC port $archOpcode(ppc64Opcode_addis);
...
@@ -6086,6 +6078,21 @@ instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
...
@@ -6086,6 +6078,21 @@ instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
ins_pipe(pipe_class_default);
ins_pipe(pipe_class_default);
%}
%}
// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
match(Set dst src1);
effect(TEMP src2);
ins_cost(DEFAULT_COST);
format %{ "MASK $dst, $src2, 0xFFFFFFFF" %} // mask
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
__ clrldi($dst$$Register, $src2$$Register, 0x20);
%}
ins_pipe(pipe_class_default);
%}
// This needs a match rule so that build_oop_map knows this is
// This needs a match rule so that build_oop_map knows this is
// not a narrow oop.
// not a narrow oop.
instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
...
@@ -6093,10 +6100,10 @@ instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
...
@@ -6093,10 +6100,10 @@ instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
effect(TEMP src2);
effect(TEMP src2);
ins_cost(DEFAULT_COST);
ins_cost(DEFAULT_COST);
format %{ "
ADDI $dst, $src1, $src2 \t// narrow oop
lo" %}
format %{ "
ORI $dst, $src1, $src2 \t// narrow klass
lo" %}
size(4);
size(4);
ins_encode %{
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_
add
i);
// TODO: PPC port $archOpcode(ppc64Opcode_
or
i);
intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
...
@@ -6127,10 +6134,11 @@ instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
...
@@ -6127,10 +6134,11 @@ instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
MachNode *m2 = m1;
MachNode *m2 = m1;
if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
// Value might be 1-extended. Mask out these bits.
// Value might be 1-extended. Mask out these bits.
m2 = new (C)
clearMs32b
Node();
m2 = new (C)
loadConNKlass_mask
Node();
m2->add_req(NULL, m1);
m2->add_req(NULL, m1);
m2->_opnds[0] = op_dst;
m2->_opnds[0] = op_dst;
m2->_opnds[1] = op_dst;
m2->_opnds[1] = op_src;
m2->_opnds[2] = op_dst;
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
nodes->push(m2);
nodes->push(m2);
}
}
...
@@ -6975,7 +6983,7 @@ instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
...
@@ -6975,7 +6983,7 @@ instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
size(4);
size(4);
ins_encode %{
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
__ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_
oop
_shift(), 32);
__ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_
klass
_shift(), 32);
%}
%}
ins_pipe(pipe_class_default);
ins_pipe(pipe_class_default);
%}
%}
...
...
src/cpu/ppc/vm/stubGenerator_ppc.cpp
浏览文件 @
6b2e7283
...
@@ -24,7 +24,6 @@
...
@@ -24,7 +24,6 @@
*/
*/
#include "precompiled.hpp"
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "nativeInst_ppc.hpp"
...
@@ -39,9 +38,6 @@
...
@@ -39,9 +38,6 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/top.hpp"
#include "utilities/top.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#include "runtime/thread.inline.hpp"
#include "runtime/thread.inline.hpp"
#define __ _masm->
#define __ _masm->
...
@@ -216,7 +212,7 @@ class StubGenerator: public StubCodeGenerator {
...
@@ -216,7 +212,7 @@ class StubGenerator: public StubCodeGenerator {
{
{
BLOCK_COMMENT
(
"Call frame manager or native entry."
);
BLOCK_COMMENT
(
"Call frame manager or native entry."
);
// Call frame manager or native entry.
// Call frame manager or native entry.
Register
r_new_arg_entry
=
R14
;
// PPC_state;
Register
r_new_arg_entry
=
R14
;
assert_different_registers
(
r_new_arg_entry
,
r_top_of_arguments_addr
,
assert_different_registers
(
r_new_arg_entry
,
r_top_of_arguments_addr
,
r_arg_method
,
r_arg_thread
);
r_arg_method
,
r_arg_thread
);
...
...
src/cpu/ppc/vm/templateTable_ppc_64.cpp
浏览文件 @
6b2e7283
...
@@ -352,7 +352,6 @@ void TemplateTable::ldc(bool wide) {
...
@@ -352,7 +352,6 @@ void TemplateTable::ldc(bool wide) {
__
sldi
(
Rscratch1
,
Rscratch1
,
LogBytesPerWord
);
__
sldi
(
Rscratch1
,
Rscratch1
,
LogBytesPerWord
);
__
cmpdi
(
CCR0
,
Rscratch2
,
JVM_CONSTANT_Integer
);
__
cmpdi
(
CCR0
,
Rscratch2
,
JVM_CONSTANT_Integer
);
__
bne
(
CCR0
,
notInt
);
__
bne
(
CCR0
,
notInt
);
__
isync
();
// Order load of constant wrt. tags.
__
lwax
(
R17_tos
,
Rcpool
,
Rscratch1
);
__
lwax
(
R17_tos
,
Rcpool
,
Rscratch1
);
__
push
(
itos
);
__
push
(
itos
);
__
b
(
exit
);
__
b
(
exit
);
...
@@ -364,7 +363,6 @@ void TemplateTable::ldc(bool wide) {
...
@@ -364,7 +363,6 @@ void TemplateTable::ldc(bool wide) {
__
cmpdi
(
CCR0
,
Rscratch2
,
JVM_CONSTANT_Float
);
__
cmpdi
(
CCR0
,
Rscratch2
,
JVM_CONSTANT_Float
);
__
asm_assert_eq
(
"unexpected type"
,
0x8765
);
__
asm_assert_eq
(
"unexpected type"
,
0x8765
);
#endif
#endif
__
isync
();
// Order load of constant wrt. tags.
__
lfsx
(
F15_ftos
,
Rcpool
,
Rscratch1
);
__
lfsx
(
F15_ftos
,
Rcpool
,
Rscratch1
);
__
push
(
ftos
);
__
push
(
ftos
);
...
@@ -423,13 +421,11 @@ void TemplateTable::ldc2_w() {
...
@@ -423,13 +421,11 @@ void TemplateTable::ldc2_w() {
// Check out Conversions.java for an example.
// Check out Conversions.java for an example.
// Also ConstantPool::header_size() is 20, which makes it very difficult
// Also ConstantPool::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97
// to double-align double on the constant pool. SG, 11/7/97
__
isync
();
// Order load of constant wrt. tags.
__
lfdx
(
F15_ftos
,
Rcpool
,
Rindex
);
__
lfdx
(
F15_ftos
,
Rcpool
,
Rindex
);
__
push
(
dtos
);
__
push
(
dtos
);
__
b
(
Lexit
);
__
b
(
Lexit
);
__
bind
(
Llong
);
__
bind
(
Llong
);
__
isync
();
// Order load of constant wrt. tags.
__
ldx
(
R17_tos
,
Rcpool
,
Rindex
);
__
ldx
(
R17_tos
,
Rcpool
,
Rindex
);
__
push
(
ltos
);
__
push
(
ltos
);
...
...
src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp
浏览文件 @
6b2e7283
...
@@ -47,4 +47,4 @@ inline void Prefetch::write(void *loc, intx interval) {
...
@@ -47,4 +47,4 @@ inline void Prefetch::write(void *loc, intx interval) {
);
);
}
}
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_
OJDKPPC
_HPP
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_
PPC_INLINE
_HPP
src/share/vm/classfile/javaClasses.cpp
浏览文件 @
6b2e7283
...
@@ -625,6 +625,7 @@ void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
...
@@ -625,6 +625,7 @@ void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
}
}
// set the classLoader field in the java_lang_Class instance
// set the classLoader field in the java_lang_Class instance
assert
(
class_loader
()
==
k
->
class_loader
(),
"should be same"
);
set_class_loader
(
mirror
(),
class_loader
());
set_class_loader
(
mirror
(),
class_loader
());
// Setup indirection from klass->mirror last
// Setup indirection from klass->mirror last
...
...
src/share/vm/classfile/systemDictionary.cpp
浏览文件 @
6b2e7283
...
@@ -2276,12 +2276,14 @@ methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid
...
@@ -2276,12 +2276,14 @@ methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid
spe
=
NULL
;
spe
=
NULL
;
// Must create lots of stuff here, but outside of the SystemDictionary lock.
// Must create lots of stuff here, but outside of the SystemDictionary lock.
m
=
Method
::
make_method_handle_intrinsic
(
iid
,
signature
,
CHECK_
(
empty
));
m
=
Method
::
make_method_handle_intrinsic
(
iid
,
signature
,
CHECK_
(
empty
));
CompileBroker
::
compile_method
(
m
,
InvocationEntryBci
,
CompLevel_highest_tier
,
if
(
!
Arguments
::
is_interpreter_only
())
{
methodHandle
(),
CompileThreshold
,
"MH"
,
CHECK_
(
empty
));
// Generate a compiled form of the MH intrinsic.
// Check if we need to have compiled code but we don't.
AdapterHandlerLibrary
::
create_native_wrapper
(
m
);
if
(
!
Arguments
::
is_interpreter_only
()
&&
!
m
->
has_compiled_code
())
{
// Check if have the compiled code.
THROW_MSG_
(
vmSymbols
::
java_lang_VirtualMachineError
(),
if
(
!
m
->
has_compiled_code
())
{
"out of space in CodeCache for method handle intrinsic"
,
empty
);
THROW_MSG_
(
vmSymbols
::
java_lang_VirtualMachineError
(),
"out of space in CodeCache for method handle intrinsic"
,
empty
);
}
}
}
// Now grab the lock. We might have to throw away the new method,
// Now grab the lock. We might have to throw away the new method,
// if a racing thread has managed to install one at the same time.
// if a racing thread has managed to install one at the same time.
...
...
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
浏览文件 @
6b2e7283
...
@@ -128,9 +128,7 @@ void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
...
@@ -128,9 +128,7 @@ void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
}
}
uint
ConcurrentG1Refine
::
thread_num
()
{
uint
ConcurrentG1Refine
::
thread_num
()
{
uint
n_threads
=
(
G1ConcRefinementThreads
>
0
)
?
G1ConcRefinementThreads
return
G1ConcRefinementThreads
;
:
ParallelGCThreads
;
return
MAX2
<
uint
>
(
n_threads
,
1
);
}
}
void
ConcurrentG1Refine
::
print_worker_threads_on
(
outputStream
*
st
)
const
{
void
ConcurrentG1Refine
::
print_worker_threads_on
(
outputStream
*
st
)
const
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
6b2e7283
...
@@ -1483,9 +1483,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
...
@@ -1483,9 +1483,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Discard all rset updates
// Discard all rset updates
JavaThread
::
dirty_card_queue_set
().
abandon_logs
();
JavaThread
::
dirty_card_queue_set
().
abandon_logs
();
assert
(
!
G1DeferredRSUpdate
assert
(
dirty_card_queue_set
().
completed_buffers_num
()
==
0
,
"DCQS should be empty"
);
||
(
G1DeferredRSUpdate
&&
(
dirty_card_queue_set
().
completed_buffers_num
()
==
0
)),
"Should not be any"
);
_young_list
->
reset_sampled_info
();
_young_list
->
reset_sampled_info
();
// At this point there should be no regions in the
// At this point there should be no regions in the
...
@@ -2100,15 +2098,13 @@ jint G1CollectedHeap::initialize() {
...
@@ -2100,15 +2098,13 @@ jint G1CollectedHeap::initialize() {
concurrent_g1_refine
()
->
red_zone
(),
concurrent_g1_refine
()
->
red_zone
(),
Shared_DirtyCardQ_lock
);
Shared_DirtyCardQ_lock
);
if
(
G1DeferredRSUpdate
)
{
dirty_card_queue_set
().
initialize
(
NULL
,
// Should never be called by the Java code
dirty_card_queue_set
().
initialize
(
NULL
,
// Should never be called by the Java code
DirtyCardQ_CBL_mon
,
DirtyCardQ_CBL_mon
,
DirtyCardQ_FL_lock
,
DirtyCardQ_FL_lock
,
-
1
,
// never trigger processing
-
1
,
// never trigger processing
-
1
,
// no limit on length
-
1
,
// no limit on length
Shared_DirtyCardQ_lock
,
Shared_DirtyCardQ_lock
,
&
JavaThread
::
dirty_card_queue_set
());
&
JavaThread
::
dirty_card_queue_set
());
}
// Initialize the card queue set used to hold cards containing
// Initialize the card queue set used to hold cards containing
// references into the collection set.
// references into the collection set.
...
@@ -5303,7 +5299,6 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
...
@@ -5303,7 +5299,6 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
};
};
void
G1CollectedHeap
::
redirty_logged_cards
()
{
void
G1CollectedHeap
::
redirty_logged_cards
()
{
guarantee
(
G1DeferredRSUpdate
,
"Must only be called when using deferred RS updates."
);
double
redirty_logged_cards_start
=
os
::
elapsedTime
();
double
redirty_logged_cards_start
=
os
::
elapsedTime
();
uint
n_workers
=
(
G1CollectedHeap
::
use_parallel_gc_threads
()
?
uint
n_workers
=
(
G1CollectedHeap
::
use_parallel_gc_threads
()
?
...
@@ -5358,9 +5353,10 @@ public:
...
@@ -5358,9 +5353,10 @@ public:
void
do_oop
(
narrowOop
*
p
)
{
guarantee
(
false
,
"Not needed"
);
}
void
do_oop
(
narrowOop
*
p
)
{
guarantee
(
false
,
"Not needed"
);
}
void
do_oop
(
oop
*
p
)
{
void
do_oop
(
oop
*
p
)
{
oop
obj
=
*
p
;
oop
obj
=
*
p
;
assert
(
obj
!=
NULL
,
"the caller should have filtered out NULL values"
);
G1CollectedHeap
::
in_cset_state_t
cset_state
=
_g1
->
in_cset_state
(
obj
);
G1CollectedHeap
::
in_cset_state_t
cset_state
=
_g1
->
in_cset_state
(
obj
);
if
(
obj
==
NULL
||
cset_state
==
G1CollectedHeap
::
InNeither
)
{
if
(
cset_state
==
G1CollectedHeap
::
InNeither
)
{
return
;
return
;
}
}
if
(
cset_state
==
G1CollectedHeap
::
InCSet
)
{
if
(
cset_state
==
G1CollectedHeap
::
InCSet
)
{
...
@@ -5962,9 +5958,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
...
@@ -5962,9 +5958,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// RSets.
// RSets.
enqueue_discovered_references
(
n_workers
);
enqueue_discovered_references
(
n_workers
);
if
(
G1DeferredRSUpdate
)
{
redirty_logged_cards
();
redirty_logged_cards
();
}
COMPILER2_PRESENT
(
DerivedPointerTable
::
update_pointers
());
COMPILER2_PRESENT
(
DerivedPointerTable
::
update_pointers
());
}
}
...
...
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
浏览文件 @
6b2e7283
...
@@ -176,15 +176,17 @@ public:
...
@@ -176,15 +176,17 @@ public:
class
RemoveSelfForwardPtrHRClosure
:
public
HeapRegionClosure
{
class
RemoveSelfForwardPtrHRClosure
:
public
HeapRegionClosure
{
G1CollectedHeap
*
_g1h
;
G1CollectedHeap
*
_g1h
;
ConcurrentMark
*
_cm
;
ConcurrentMark
*
_cm
;
OopsInHeapRegionClosure
*
_update_rset_cl
;
uint
_worker_id
;
uint
_worker_id
;
DirtyCardQueue
_dcq
;
UpdateRSetDeferred
_update_rset_cl
;
public:
public:
RemoveSelfForwardPtrHRClosure
(
G1CollectedHeap
*
g1h
,
RemoveSelfForwardPtrHRClosure
(
G1CollectedHeap
*
g1h
,
OopsInHeapRegionClosure
*
update_rset_cl
,
uint
worker_id
)
:
uint
worker_id
)
:
_g1h
(
g1h
),
_update_rset_cl
(
update_rset_cl
),
_g1h
(
g1h
),
_dcq
(
&
g1h
->
dirty_card_queue_set
()),
_update_rset_cl
(
g1h
,
&
_dcq
),
_worker_id
(
worker_id
),
_cm
(
_g1h
->
concurrent_mark
())
{
}
_worker_id
(
worker_id
),
_cm
(
_g1h
->
concurrent_mark
())
{
}
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
bool
during_initial_mark
=
_g1h
->
g1_policy
()
->
during_initial_mark_pause
();
bool
during_initial_mark
=
_g1h
->
g1_policy
()
->
during_initial_mark_pause
();
...
@@ -195,7 +197,7 @@ public:
...
@@ -195,7 +197,7 @@ public:
if
(
hr
->
claimHeapRegion
(
HeapRegion
::
ParEvacFailureClaimValue
))
{
if
(
hr
->
claimHeapRegion
(
HeapRegion
::
ParEvacFailureClaimValue
))
{
if
(
hr
->
evacuation_failed
())
{
if
(
hr
->
evacuation_failed
())
{
RemoveSelfForwardPtrObjClosure
rspc
(
_g1h
,
_cm
,
hr
,
_update_rset_cl
,
RemoveSelfForwardPtrObjClosure
rspc
(
_g1h
,
_cm
,
hr
,
&
_update_rset_cl
,
during_initial_mark
,
during_initial_mark
,
during_conc_mark
,
during_conc_mark
,
_worker_id
);
_worker_id
);
...
@@ -214,7 +216,7 @@ public:
...
@@ -214,7 +216,7 @@ public:
// whenever this might be required in the future.
// whenever this might be required in the future.
hr
->
rem_set
()
->
reset_for_par_iteration
();
hr
->
rem_set
()
->
reset_for_par_iteration
();
hr
->
reset_bot
();
hr
->
reset_bot
();
_update_rset_cl
->
set_region
(
hr
);
_update_rset_cl
.
set_region
(
hr
);
hr
->
object_iterate
(
&
rspc
);
hr
->
object_iterate
(
&
rspc
);
hr
->
rem_set
()
->
clean_strong_code_roots
(
hr
);
hr
->
rem_set
()
->
clean_strong_code_roots
(
hr
);
...
@@ -238,16 +240,7 @@ public:
...
@@ -238,16 +240,7 @@ public:
_g1h
(
g1h
)
{
}
_g1h
(
g1h
)
{
}
void
work
(
uint
worker_id
)
{
void
work
(
uint
worker_id
)
{
UpdateRSetImmediate
immediate_update
(
_g1h
->
g1_rem_set
());
RemoveSelfForwardPtrHRClosure
rsfp_cl
(
_g1h
,
worker_id
);
DirtyCardQueue
dcq
(
&
_g1h
->
dirty_card_queue_set
());
UpdateRSetDeferred
deferred_update
(
_g1h
,
&
dcq
);
OopsInHeapRegionClosure
*
update_rset_cl
=
&
deferred_update
;
if
(
!
G1DeferredRSUpdate
)
{
update_rset_cl
=
&
immediate_update
;
}
RemoveSelfForwardPtrHRClosure
rsfp_cl
(
_g1h
,
update_rset_cl
,
worker_id
);
HeapRegion
*
hr
=
_g1h
->
start_cset_region_for_worker
(
worker_id
);
HeapRegion
*
hr
=
_g1h
->
start_cset_region_for_worker
(
worker_id
);
_g1h
->
collection_set_iterate_from
(
hr
,
&
rsfp_cl
);
_g1h
->
collection_set_iterate_from
(
hr
,
&
rsfp_cl
);
...
...
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
浏览文件 @
6b2e7283
...
@@ -236,10 +236,8 @@ void G1GCPhaseTimes::note_gc_end() {
...
@@ -236,10 +236,8 @@ void G1GCPhaseTimes::note_gc_end() {
_last_gc_worker_times_ms
.
verify
();
_last_gc_worker_times_ms
.
verify
();
_last_gc_worker_other_times_ms
.
verify
();
_last_gc_worker_other_times_ms
.
verify
();
if
(
G1DeferredRSUpdate
)
{
_last_redirty_logged_cards_time_ms
.
verify
();
_last_redirty_logged_cards_time_ms
.
verify
();
_last_redirty_logged_cards_processed_cards
.
verify
();
_last_redirty_logged_cards_processed_cards
.
verify
();
}
}
}
void
G1GCPhaseTimes
::
note_string_dedup_fixup_start
()
{
void
G1GCPhaseTimes
::
note_string_dedup_fixup_start
()
{
...
@@ -351,12 +349,10 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
...
@@ -351,12 +349,10 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
_recorded_non_young_cset_choice_time_ms
));
_recorded_non_young_cset_choice_time_ms
));
print_stats
(
2
,
"Ref Proc"
,
_cur_ref_proc_time_ms
);
print_stats
(
2
,
"Ref Proc"
,
_cur_ref_proc_time_ms
);
print_stats
(
2
,
"Ref Enq"
,
_cur_ref_enq_time_ms
);
print_stats
(
2
,
"Ref Enq"
,
_cur_ref_enq_time_ms
);
if
(
G1DeferredRSUpdate
)
{
print_stats
(
2
,
"Redirty Cards"
,
_recorded_redirty_logged_cards_time_ms
);
print_stats
(
2
,
"Redirty Cards"
,
_recorded_redirty_logged_cards_time_ms
);
if
(
G1Log
::
finest
())
{
if
(
G1Log
::
finest
())
{
_last_redirty_logged_cards_time_ms
.
print
(
3
,
"Parallel Redirty"
);
_last_redirty_logged_cards_time_ms
.
print
(
3
,
"Parallel Redirty"
);
_last_redirty_logged_cards_processed_cards
.
print
(
3
,
"Redirtied Cards"
);
_last_redirty_logged_cards_processed_cards
.
print
(
3
,
"Redirtied Cards"
);
}
}
}
if
(
G1ReclaimDeadHumongousObjectsAtYoungGC
)
{
if
(
G1ReclaimDeadHumongousObjectsAtYoungGC
)
{
print_stats
(
2
,
"Humongous Reclaim"
,
_cur_fast_reclaim_humongous_time_ms
);
print_stats
(
2
,
"Humongous Reclaim"
,
_cur_fast_reclaim_humongous_time_ms
);
...
...
src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
浏览文件 @
6b2e7283
...
@@ -82,20 +82,6 @@ class G1ParScanThreadState : public StackObj {
...
@@ -82,20 +82,6 @@ class G1ParScanThreadState : public StackObj {
DirtyCardQueue
&
dirty_card_queue
()
{
return
_dcq
;
}
DirtyCardQueue
&
dirty_card_queue
()
{
return
_dcq
;
}
G1SATBCardTableModRefBS
*
ctbs
()
{
return
_ct_bs
;
}
G1SATBCardTableModRefBS
*
ctbs
()
{
return
_ct_bs
;
}
template
<
class
T
>
inline
void
immediate_rs_update
(
HeapRegion
*
from
,
T
*
p
,
int
tid
);
template
<
class
T
>
void
deferred_rs_update
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if
(
!
from
->
is_in_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
))
&&
!
from
->
is_survivor
())
{
size_t
card_index
=
ctbs
()
->
index_for
(
p
);
// If the card hasn't been added to the buffer, do it.
if
(
ctbs
()
->
mark_card_deferred
(
card_index
))
{
dirty_card_queue
().
enqueue
((
jbyte
*
)
ctbs
()
->
byte_for_index
(
card_index
));
}
}
}
public:
public:
G1ParScanThreadState
(
G1CollectedHeap
*
g1h
,
uint
queue_num
,
ReferenceProcessor
*
rp
);
G1ParScanThreadState
(
G1CollectedHeap
*
g1h
,
uint
queue_num
,
ReferenceProcessor
*
rp
);
~
G1ParScanThreadState
();
~
G1ParScanThreadState
();
...
@@ -115,8 +101,17 @@ class G1ParScanThreadState : public StackObj {
...
@@ -115,8 +101,17 @@ class G1ParScanThreadState : public StackObj {
_refs
->
push
(
ref
);
_refs
->
push
(
ref
);
}
}
template
<
class
T
>
inline
void
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
);
template
<
class
T
>
void
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if
(
!
from
->
is_in_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
))
&&
!
from
->
is_survivor
())
{
size_t
card_index
=
ctbs
()
->
index_for
(
p
);
// If the card hasn't been added to the buffer, do it.
if
(
ctbs
()
->
mark_card_deferred
(
card_index
))
{
dirty_card_queue
().
enqueue
((
jbyte
*
)
ctbs
()
->
byte_for_index
(
card_index
));
}
}
}
public:
public:
void
set_evac_failure_closure
(
OopsInHeapRegionClosure
*
evac_failure_cl
)
{
void
set_evac_failure_closure
(
OopsInHeapRegionClosure
*
evac_failure_cl
)
{
...
...
src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
浏览文件 @
6b2e7283
...
@@ -29,20 +29,6 @@
...
@@ -29,20 +29,6 @@
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline.hpp"
template
<
class
T
>
inline
void
G1ParScanThreadState
::
immediate_rs_update
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
if
(
!
from
->
is_survivor
())
{
_g1_rem
->
par_write_ref
(
from
,
p
,
tid
);
}
}
template
<
class
T
>
void
G1ParScanThreadState
::
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
if
(
G1DeferredRSUpdate
)
{
deferred_rs_update
(
from
,
p
,
tid
);
}
else
{
immediate_rs_update
(
from
,
p
,
tid
);
}
}
template
<
class
T
>
void
G1ParScanThreadState
::
do_oop_evac
(
T
*
p
,
HeapRegion
*
from
)
{
template
<
class
T
>
void
G1ParScanThreadState
::
do_oop_evac
(
T
*
p
,
HeapRegion
*
from
)
{
assert
(
!
oopDesc
::
is_null
(
oopDesc
::
load_decode_heap_oop
(
p
)),
assert
(
!
oopDesc
::
is_null
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"Reference should not be NULL here as such are never pushed to the task queue."
);
"Reference should not be NULL here as such are never pushed to the task queue."
);
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
6b2e7283
...
@@ -338,12 +338,8 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
...
@@ -338,12 +338,8 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
// are just discarded (there's no need to update the RSets of regions
// are just discarded (there's no need to update the RSets of regions
// that were in the collection set - after the pause these regions
// that were in the collection set - after the pause these regions
// are wholly 'free' of live objects. In the event of an evacuation
// are wholly 'free' of live objects. In the event of an evacuation
// failure the cards/buffers in this queue set are:
// failure the cards/buffers in this queue set are passed to the
// * passed to the DirtyCardQueueSet that is used to manage deferred
// DirtyCardQueueSet that is used to manage RSet updates
// RSet updates, or
// * scanned for references that point into the collection set
// and the RSet of the corresponding region in the collection set
// is updated immediately.
DirtyCardQueue
into_cset_dcq
(
&
_g1
->
into_cset_dirty_card_queue_set
());
DirtyCardQueue
into_cset_dcq
(
&
_g1
->
into_cset_dirty_card_queue_set
());
assert
((
ParallelGCThreads
>
0
)
||
worker_i
==
0
,
"invariant"
);
assert
((
ParallelGCThreads
>
0
)
||
worker_i
==
0
,
"invariant"
);
...
@@ -372,7 +368,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
...
@@ -372,7 +368,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
void
G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
void
G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
cleanupHRRS
();
cleanupHRRS
();
ConcurrentG1Refine
*
cg1r
=
_g1
->
concurrent_g1_refine
();
_g1
->
set_refine_cte_cl_concurrency
(
false
);
_g1
->
set_refine_cte_cl_concurrency
(
false
);
DirtyCardQueueSet
&
dcqs
=
JavaThread
::
dirty_card_queue_set
();
DirtyCardQueueSet
&
dcqs
=
JavaThread
::
dirty_card_queue_set
();
dcqs
.
concatenate_logs
();
dcqs
.
concatenate_logs
();
...
@@ -385,66 +380,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
...
@@ -385,66 +380,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
_total_cards_scanned
=
0
;
_total_cards_scanned
=
0
;
}
}
// This closure, applied to a DirtyCardQueueSet, is used to immediately
// update the RSets for the regions in the CSet. For each card it iterates
// through the oops which coincide with that card. It scans the reference
// fields in each oop; when it finds an oop that points into the collection
// set, the RSet for the region containing the referenced object is updated.
class
UpdateRSetCardTableEntryIntoCSetClosure
:
public
CardTableEntryClosure
{
G1CollectedHeap
*
_g1
;
CardTableModRefBS
*
_ct_bs
;
public:
UpdateRSetCardTableEntryIntoCSetClosure
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
bs
)
:
_g1
(
g1
),
_ct_bs
(
bs
)
{
}
bool
do_card_ptr
(
jbyte
*
card_ptr
,
uint
worker_i
)
{
// Construct the region representing the card.
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
// And find the region containing it.
HeapRegion
*
r
=
_g1
->
heap_region_containing
(
start
);
// Scan oops in the card looking for references into the collection set
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap. This is not safe without a perm
// gen.
HeapWord
*
end
=
start
+
CardTableModRefBS
::
card_size_in_words
;
MemRegion
scanRegion
(
start
,
end
);
UpdateRSetImmediate
update_rs_cl
(
_g1
->
g1_rem_set
());
FilterIntoCSClosure
update_rs_cset_oop_cl
(
NULL
,
_g1
,
&
update_rs_cl
);
FilterOutOfRegionClosure
filter_then_update_rs_cset_oop_cl
(
r
,
&
update_rs_cset_oop_cl
);
// We can pass false as the "filter_young" parameter here as:
// * we should be in a STW pause,
// * the DCQS to which this closure is applied is used to hold
// references that point into the collection set from the prior
// RSet updating,
// * the post-write barrier shouldn't be logging updates to young
// regions (but there is a situation where this can happen - see
// the comment in G1RemSet::refine_card() below -
// that should not be applicable here), and
// * during actual RSet updating, the filtering of cards in young
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
// employed.
// As a result, when this closure is applied to "refs into cset"
// DCQS, we shouldn't see any cards in young regions.
update_rs_cl
.
set_region
(
r
);
HeapWord
*
stop_point
=
r
->
oops_on_card_seq_iterate_careful
(
scanRegion
,
&
filter_then_update_rs_cset_oop_cl
,
false
/* filter_young */
,
NULL
/* card_ptr */
);
// Since this is performed in the event of an evacuation failure, we
// we shouldn't see a non-null stop point
assert
(
stop_point
==
NULL
,
"saw an unallocated region"
);
return
true
;
}
};
void
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
void
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
_total_cards_scanned
=
0
;
_total_cards_scanned
=
0
;
...
@@ -465,25 +400,10 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
...
@@ -465,25 +400,10 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
double
restore_remembered_set_start
=
os
::
elapsedTime
();
double
restore_remembered_set_start
=
os
::
elapsedTime
();
// Restore remembered sets for the regions pointing into the collection set.
// Restore remembered sets for the regions pointing into the collection set.
if
(
G1DeferredRSUpdate
)
{
// We just need to transfer the completed buffers from the DirtyCardQueueSet
// If deferred RS updates are enabled then we just need to transfer
// used to hold cards that contain references that point into the collection set
// the completed buffers from (a) the DirtyCardQueueSet used to hold
// to the DCQS used to hold the deferred RS updates.
// cards that contain references that point into the collection set
_g1
->
dirty_card_queue_set
().
merge_bufferlists
(
&
into_cset_dcqs
);
// to (b) the DCQS used to hold the deferred RS updates
_g1
->
dirty_card_queue_set
().
merge_bufferlists
(
&
into_cset_dcqs
);
}
else
{
CardTableModRefBS
*
bs
=
(
CardTableModRefBS
*
)
_g1
->
barrier_set
();
UpdateRSetCardTableEntryIntoCSetClosure
update_rs_cset_immediate
(
_g1
,
bs
);
int
n_completed_buffers
=
0
;
while
(
into_cset_dcqs
.
apply_closure_to_completed_buffer
(
&
update_rs_cset_immediate
,
0
,
0
,
true
))
{
n_completed_buffers
++
;
}
assert
(
n_completed_buffers
==
into_cset_n_buffers
,
"missed some buffers"
);
}
_g1
->
g1_policy
()
->
phase_times
()
->
record_evac_fail_restore_remsets
((
os
::
elapsedTime
()
-
restore_remembered_set_start
)
*
1000.0
);
_g1
->
g1_policy
()
->
phase_times
()
->
record_evac_fail_restore_remsets
((
os
::
elapsedTime
()
-
restore_remembered_set_start
)
*
1000.0
);
}
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
6b2e7283
...
@@ -193,18 +193,4 @@ public:
...
@@ -193,18 +193,4 @@ public:
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
};
};
class
UpdateRSetImmediate
:
public
OopsInHeapRegionClosure
{
private:
G1RemSet
*
_g1_rem_set
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
);
public:
UpdateRSetImmediate
(
G1RemSet
*
rs
)
:
_g1_rem_set
(
rs
)
{}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
6b2e7283
...
@@ -79,13 +79,4 @@ inline void UpdateRSOopClosure::do_oop_work(T* p) {
...
@@ -79,13 +79,4 @@ inline void UpdateRSOopClosure::do_oop_work(T* p) {
_rs
->
par_write_ref
(
_from
,
p
,
_worker_i
);
_rs
->
par_write_ref
(
_from
,
p
,
_worker_i
);
}
}
template
<
class
T
>
inline
void
UpdateRSetImmediate
::
do_oop_work
(
T
*
p
)
{
assert
(
_from
->
is_in_reserved
(
p
),
"paranoia"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
)
&&
!
_from
->
is_survivor
())
{
_g1_rem_set
->
par_write_ref
(
_from
,
p
,
0
);
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
6b2e7283
...
@@ -108,9 +108,6 @@
...
@@ -108,9 +108,6 @@
develop(bool, G1RSBarrierRegionFilter, true, \
develop(bool, G1RSBarrierRegionFilter, true, \
"If true, generate region filtering code in RS barrier") \
"If true, generate region filtering code in RS barrier") \
\
\
develop(bool, G1DeferredRSUpdate, true, \
"If true, use deferred RS updates") \
\
develop(bool, G1RSLogCheckCardTable, false, \
develop(bool, G1RSLogCheckCardTable, false, \
"If true, verify that no dirty cards remain after RS log " \
"If true, verify that no dirty cards remain after RS log " \
"processing.") \
"processing.") \
...
...
src/share/vm/interpreter/oopMapCache.cpp
浏览文件 @
6b2e7283
...
@@ -180,7 +180,7 @@ InterpreterOopMap::~InterpreterOopMap() {
...
@@ -180,7 +180,7 @@ InterpreterOopMap::~InterpreterOopMap() {
}
}
}
}
bool
InterpreterOopMap
::
is_empty
()
{
bool
InterpreterOopMap
::
is_empty
()
const
{
bool
result
=
_method
==
NULL
;
bool
result
=
_method
==
NULL
;
assert
(
_method
!=
NULL
||
(
_bci
==
0
&&
assert
(
_method
!=
NULL
||
(
_bci
==
0
&&
(
_mask_size
==
0
||
_mask_size
==
USHRT_MAX
)
&&
(
_mask_size
==
0
||
_mask_size
==
USHRT_MAX
)
&&
...
@@ -196,7 +196,7 @@ void InterpreterOopMap::initialize() {
...
@@ -196,7 +196,7 @@ void InterpreterOopMap::initialize() {
for
(
int
i
=
0
;
i
<
N
;
i
++
)
_bit_mask
[
i
]
=
0
;
for
(
int
i
=
0
;
i
<
N
;
i
++
)
_bit_mask
[
i
]
=
0
;
}
}
void
InterpreterOopMap
::
iterate_oop
(
OffsetClosure
*
oop_closure
)
{
void
InterpreterOopMap
::
iterate_oop
(
OffsetClosure
*
oop_closure
)
const
{
int
n
=
number_of_entries
();
int
n
=
number_of_entries
();
int
word_index
=
0
;
int
word_index
=
0
;
uintptr_t
value
=
0
;
uintptr_t
value
=
0
;
...
@@ -238,7 +238,7 @@ void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* v
...
@@ -238,7 +238,7 @@ void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* v
#endif
#endif
void
InterpreterOopMap
::
print
()
{
void
InterpreterOopMap
::
print
()
const
{
int
n
=
number_of_entries
();
int
n
=
number_of_entries
();
tty
->
print
(
"oop map for "
);
tty
->
print
(
"oop map for "
);
method
()
->
print_value
();
method
()
->
print_value
();
...
@@ -469,7 +469,7 @@ void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
...
@@ -469,7 +469,7 @@ void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
}
}
}
}
inline
unsigned
int
OopMapCache
::
hash_value_for
(
methodHandle
method
,
int
bci
)
{
inline
unsigned
int
OopMapCache
::
hash_value_for
(
methodHandle
method
,
int
bci
)
const
{
// We use method->code_size() rather than method->identity_hash() below since
// We use method->code_size() rather than method->identity_hash() below since
// the mark may not be present if a pointer to the method is already reversed.
// the mark may not be present if a pointer to the method is already reversed.
return
((
unsigned
int
)
bci
)
return
((
unsigned
int
)
bci
)
...
@@ -522,7 +522,7 @@ void OopMapCache::flush_obsolete_entries() {
...
@@ -522,7 +522,7 @@ void OopMapCache::flush_obsolete_entries() {
void
OopMapCache
::
lookup
(
methodHandle
method
,
void
OopMapCache
::
lookup
(
methodHandle
method
,
int
bci
,
int
bci
,
InterpreterOopMap
*
entry_for
)
{
InterpreterOopMap
*
entry_for
)
const
{
MutexLocker
x
(
&
_mut
);
MutexLocker
x
(
&
_mut
);
OopMapCacheEntry
*
entry
=
NULL
;
OopMapCacheEntry
*
entry
=
NULL
;
...
...
src/share/vm/interpreter/oopMapCache.hpp
浏览文件 @
6b2e7283
...
@@ -101,32 +101,31 @@ class InterpreterOopMap: ResourceObj {
...
@@ -101,32 +101,31 @@ class InterpreterOopMap: ResourceObj {
// access methods
// access methods
Method
*
method
()
const
{
return
_method
;
}
Method
*
method
()
const
{
return
_method
;
}
void
set_method
(
Method
*
v
)
{
_method
=
v
;
}
void
set_method
(
Method
*
v
)
{
_method
=
v
;
}
int
bci
()
const
{
return
_bci
;
}
int
bci
()
const
{
return
_bci
;
}
void
set_bci
(
int
v
)
{
_bci
=
v
;
}
void
set_bci
(
int
v
)
{
_bci
=
v
;
}
int
mask_size
()
const
{
return
_mask_size
;
}
int
mask_size
()
const
{
return
_mask_size
;
}
void
set_mask_size
(
int
v
)
{
_mask_size
=
v
;
}
void
set_mask_size
(
int
v
)
{
_mask_size
=
v
;
}
int
number_of_entries
()
const
{
return
mask_size
()
/
bits_per_entry
;
}
// Test bit mask size and return either the in-line bit mask or allocated
// Test bit mask size and return either the in-line bit mask or allocated
// bit mask.
// bit mask.
uintptr_t
*
bit_mask
()
{
return
(
uintptr_t
*
)(
mask_size
()
<=
small_mask_limit
?
(
intptr_t
)
_bit_mask
:
_bit_mask
[
0
]);
}
uintptr_t
*
bit_mask
()
const
{
return
(
uintptr_t
*
)(
mask_size
()
<=
small_mask_limit
?
(
intptr_t
)
_bit_mask
:
_bit_mask
[
0
]);
}
// return the word size of_bit_mask. mask_size() <= 4 * MAX_USHORT
// return the word size of_bit_mask. mask_size() <= 4 * MAX_USHORT
size_t
mask_word_size
()
{
size_t
mask_word_size
()
const
{
return
(
mask_size
()
+
BitsPerWord
-
1
)
/
BitsPerWord
;
return
(
mask_size
()
+
BitsPerWord
-
1
)
/
BitsPerWord
;
}
}
uintptr_t
entry_at
(
int
offset
)
{
int
i
=
offset
*
bits_per_entry
;
return
bit_mask
()[
i
/
BitsPerWord
]
>>
(
i
%
BitsPerWord
);
}
uintptr_t
entry_at
(
int
offset
)
const
{
int
i
=
offset
*
bits_per_entry
;
return
bit_mask
()[
i
/
BitsPerWord
]
>>
(
i
%
BitsPerWord
);
}
void
set_expression_stack_size
(
int
sz
)
{
_expression_stack_size
=
sz
;
}
void
set_expression_stack_size
(
int
sz
)
{
_expression_stack_size
=
sz
;
}
#ifdef ENABLE_ZAP_DEAD_LOCALS
#ifdef ENABLE_ZAP_DEAD_LOCALS
bool
is_dead
(
int
offset
)
{
return
(
entry_at
(
offset
)
&
(
1
<<
dead_bit_number
))
!=
0
;
}
bool
is_dead
(
int
offset
)
const
{
return
(
entry_at
(
offset
)
&
(
1
<<
dead_bit_number
))
!=
0
;
}
#endif
#endif
// Lookup
// Lookup
bool
match
(
methodHandle
method
,
int
bci
)
{
return
_method
==
method
()
&&
_bci
==
bci
;
}
bool
match
(
methodHandle
method
,
int
bci
)
const
{
return
_method
==
method
()
&&
_bci
==
bci
;
}
bool
is_empty
();
bool
is_empty
()
const
;
// Initialization
// Initialization
void
initialize
();
void
initialize
();
...
@@ -141,12 +140,13 @@ class InterpreterOopMap: ResourceObj {
...
@@ -141,12 +140,13 @@ class InterpreterOopMap: ResourceObj {
// in-line), allocate the space from a Resource area.
// in-line), allocate the space from a Resource area.
void
resource_copy
(
OopMapCacheEntry
*
from
);
void
resource_copy
(
OopMapCacheEntry
*
from
);
void
iterate_oop
(
OffsetClosure
*
oop_closure
);
void
iterate_oop
(
OffsetClosure
*
oop_closure
)
const
;
void
print
();
void
print
()
const
;
bool
is_oop
(
int
offset
)
{
return
(
entry_at
(
offset
)
&
(
1
<<
oop_bit_number
))
!=
0
;
}
int
number_of_entries
()
const
{
return
mask_size
()
/
bits_per_entry
;
}
bool
is_oop
(
int
offset
)
const
{
return
(
entry_at
(
offset
)
&
(
1
<<
oop_bit_number
))
!=
0
;
}
int
expression_stack_size
()
{
return
_expression_stack_size
;
}
int
expression_stack_size
()
const
{
return
_expression_stack_size
;
}
#ifdef ENABLE_ZAP_DEAD_LOCALS
#ifdef ENABLE_ZAP_DEAD_LOCALS
void
iterate_all
(
OffsetClosure
*
oop_closure
,
OffsetClosure
*
value_closure
,
OffsetClosure
*
dead_closure
);
void
iterate_all
(
OffsetClosure
*
oop_closure
,
OffsetClosure
*
value_closure
,
OffsetClosure
*
dead_closure
);
...
@@ -161,10 +161,10 @@ class OopMapCache : public CHeapObj<mtClass> {
...
@@ -161,10 +161,10 @@ class OopMapCache : public CHeapObj<mtClass> {
OopMapCacheEntry
*
_array
;
OopMapCacheEntry
*
_array
;
unsigned
int
hash_value_for
(
methodHandle
method
,
int
bci
);
unsigned
int
hash_value_for
(
methodHandle
method
,
int
bci
)
const
;
OopMapCacheEntry
*
entry_at
(
int
i
)
const
;
OopMapCacheEntry
*
entry_at
(
int
i
)
const
;
Mutex
_mut
;
mutable
Mutex
_mut
;
void
flush
();
void
flush
();
...
@@ -177,7 +177,7 @@ class OopMapCache : public CHeapObj<mtClass> {
...
@@ -177,7 +177,7 @@ class OopMapCache : public CHeapObj<mtClass> {
// Returns the oopMap for (method, bci) in parameter "entry".
// Returns the oopMap for (method, bci) in parameter "entry".
// Returns false if an oop map was not found.
// Returns false if an oop map was not found.
void
lookup
(
methodHandle
method
,
int
bci
,
InterpreterOopMap
*
entry
);
void
lookup
(
methodHandle
method
,
int
bci
,
InterpreterOopMap
*
entry
)
const
;
// Compute an oop map without updating the cache or grabbing any locks (for debugging)
// Compute an oop map without updating the cache or grabbing any locks (for debugging)
static
void
compute_one_oop_map
(
methodHandle
method
,
int
bci
,
InterpreterOopMap
*
entry
);
static
void
compute_one_oop_map
(
methodHandle
method
,
int
bci
,
InterpreterOopMap
*
entry
);
...
...
src/share/vm/oops/arrayKlass.cpp
浏览文件 @
6b2e7283
...
@@ -100,7 +100,7 @@ void ArrayKlass::complete_create_array_klass(ArrayKlass* k, KlassHandle super_kl
...
@@ -100,7 +100,7 @@ void ArrayKlass::complete_create_array_klass(ArrayKlass* k, KlassHandle super_kl
ResourceMark
rm
(
THREAD
);
ResourceMark
rm
(
THREAD
);
k
->
initialize_supers
(
super_klass
(),
CHECK
);
k
->
initialize_supers
(
super_klass
(),
CHECK
);
k
->
vtable
()
->
initialize_vtable
(
false
,
CHECK
);
k
->
vtable
()
->
initialize_vtable
(
false
,
CHECK
);
java_lang_Class
::
create_mirror
(
k
,
Handle
(
NULL
),
Handle
(
NULL
),
CHECK
);
java_lang_Class
::
create_mirror
(
k
,
Handle
(
THREAD
,
k
->
class_loader
()
),
Handle
(
NULL
),
CHECK
);
}
}
GrowableArray
<
Klass
*>*
ArrayKlass
::
compute_secondary_supers
(
int
num_extra_slots
)
{
GrowableArray
<
Klass
*>*
ArrayKlass
::
compute_secondary_supers
(
int
num_extra_slots
)
{
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
6b2e7283
...
@@ -1646,6 +1646,12 @@ void Arguments::set_g1_gc_flags() {
...
@@ -1646,6 +1646,12 @@ void Arguments::set_g1_gc_flags() {
Abstract_VM_Version
::
parallel_worker_threads
());
Abstract_VM_Version
::
parallel_worker_threads
());
}
}
#if INCLUDE_ALL_GCS
if
(
G1ConcRefinementThreads
==
0
)
{
FLAG_SET_DEFAULT
(
G1ConcRefinementThreads
,
ParallelGCThreads
);
}
#endif
// MarkStackSize will be set (if it hasn't been set by the user)
// MarkStackSize will be set (if it hasn't been set by the user)
// when concurrent marking is initialized.
// when concurrent marking is initialized.
// Its value will be based upon the number of parallel marking threads.
// Its value will be based upon the number of parallel marking threads.
...
...
src/share/vm/runtime/deoptimization.hpp
浏览文件 @
6b2e7283
...
@@ -60,8 +60,8 @@ class Deoptimization : AllStatic {
...
@@ -60,8 +60,8 @@ class Deoptimization : AllStatic {
Reason_predicate
,
// compiler generated predicate failed
Reason_predicate
,
// compiler generated predicate failed
Reason_loop_limit_check
,
// compiler generated loop limits check failed
Reason_loop_limit_check
,
// compiler generated loop limits check failed
Reason_speculate_class_check
,
// saw unexpected object class from type speculation
Reason_speculate_class_check
,
// saw unexpected object class from type speculation
Reason_unstable_if
,
// a branch predicted always false was taken
Reason_rtm_state_change
,
// rtm state change detected
Reason_rtm_state_change
,
// rtm state change detected
Reason_unstable_if
,
// a branch predicted always false was taken
Reason_LIMIT
,
Reason_LIMIT
,
// Note: Keep this enum in sync. with _trap_reason_name.
// Note: Keep this enum in sync. with _trap_reason_name.
Reason_RECORDED_LIMIT
=
Reason_bimorphic
// some are not recorded per bc
Reason_RECORDED_LIMIT
=
Reason_bimorphic
// some are not recorded per bc
...
...
src/share/vm/runtime/vframe.cpp
浏览文件 @
6b2e7283
...
@@ -260,65 +260,156 @@ Method* interpretedVFrame::method() const {
...
@@ -260,65 +260,156 @@ Method* interpretedVFrame::method() const {
return
fr
().
interpreter_frame_method
();
return
fr
().
interpreter_frame_method
();
}
}
StackValueCollection
*
interpretedVFrame
::
locals
()
const
{
static
StackValue
*
create_stack_value_from_oop_map
(
const
InterpreterOopMap
&
oop_mask
,
int
length
=
method
()
->
max_locals
();
int
index
,
const
intptr_t
*
const
addr
)
{
assert
(
index
>=
0
&&
index
<
oop_mask
.
number_of_entries
(),
"invariant"
);
// categorize using oop_mask
if
(
oop_mask
.
is_oop
(
index
))
{
// reference (oop) "r"
Handle
h
(
addr
!=
NULL
?
(
*
(
oop
*
)
addr
)
:
(
oop
)
NULL
);
return
new
StackValue
(
h
);
}
// value (integer) "v"
return
new
StackValue
(
addr
!=
NULL
?
*
addr
:
0
);
}
static
bool
is_in_expression_stack
(
const
frame
&
fr
,
const
intptr_t
*
const
addr
)
{
assert
(
addr
!=
NULL
,
"invariant"
);
// Ensure to be 'inside' the expresion stack (i.e., addr >= sp for Intel).
// In case of exceptions, the expression stack is invalid and the sp
// will be reset to express this condition.
if
(
frame
::
interpreter_frame_expression_stack_direction
()
>
0
)
{
return
addr
<=
fr
.
interpreter_frame_tos_address
();
}
return
addr
>=
fr
.
interpreter_frame_tos_address
();
}
static
void
stack_locals
(
StackValueCollection
*
result
,
int
length
,
const
InterpreterOopMap
&
oop_mask
,
const
frame
&
fr
)
{
assert
(
result
!=
NULL
,
"invariant"
);
for
(
int
i
=
0
;
i
<
length
;
++
i
)
{
const
intptr_t
*
const
addr
=
fr
.
interpreter_frame_local_at
(
i
);
assert
(
addr
!=
NULL
,
"invariant"
);
assert
(
addr
>=
fr
.
sp
(),
"must be inside the frame"
);
StackValue
*
const
sv
=
create_stack_value_from_oop_map
(
oop_mask
,
i
,
addr
);
assert
(
sv
!=
NULL
,
"sanity check"
);
result
->
add
(
sv
);
}
}
static
void
stack_expressions
(
StackValueCollection
*
result
,
int
length
,
int
max_locals
,
const
InterpreterOopMap
&
oop_mask
,
const
frame
&
fr
)
{
assert
(
result
!=
NULL
,
"invariant"
);
if
(
method
()
->
is_native
())
{
for
(
int
i
=
0
;
i
<
length
;
++
i
)
{
// If the method is native, max_locals is not telling the truth.
const
intptr_t
*
addr
=
fr
.
interpreter_frame_expression_stack_at
(
i
);
// maxlocals then equals the size of parameters
assert
(
addr
!=
NULL
,
"invariant"
);
length
=
method
()
->
size_of_parameters
();
if
(
!
is_in_expression_stack
(
fr
,
addr
))
{
// Need to ensure no bogus escapes.
addr
=
NULL
;
}
StackValue
*
const
sv
=
create_stack_value_from_oop_map
(
oop_mask
,
i
+
max_locals
,
addr
);
assert
(
sv
!=
NULL
,
"sanity check"
);
result
->
add
(
sv
);
}
}
}
StackValueCollection
*
result
=
new
StackValueCollection
(
length
);
StackValueCollection
*
interpretedVFrame
::
locals
()
const
{
return
stack_data
(
false
);
}
StackValueCollection
*
interpretedVFrame
::
expressions
()
const
{
return
stack_data
(
true
);
}
/*
* Worker routine for fetching references and/or values
* for a particular bci in the interpretedVFrame.
*
* Returns data for either "locals" or "expressions",
* using bci relative oop_map (oop_mask) information.
*
* @param expressions bool switch controlling what data to return
(false == locals / true == expressions)
*
*/
StackValueCollection
*
interpretedVFrame
::
stack_data
(
bool
expressions
)
const
{
// Get oopmap describing oops and int for current bci
InterpreterOopMap
oop_mask
;
InterpreterOopMap
oop_mask
;
// oopmap for current bci
if
(
TraceDeoptimization
&&
Verbose
)
{
if
(
TraceDeoptimization
&&
Verbose
)
{
methodHandle
m_h
(
thread
(),
method
());
methodHandle
m_h
(
Thread
::
current
(),
method
());
OopMapCache
::
compute_one_oop_map
(
m_h
,
bci
(),
&
oop_mask
);
OopMapCache
::
compute_one_oop_map
(
m_h
,
bci
(),
&
oop_mask
);
}
else
{
}
else
{
method
()
->
mask_for
(
bci
(),
&
oop_mask
);
method
()
->
mask_for
(
bci
(),
&
oop_mask
);
}
}
// handle locals
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
// Find stack location
intptr_t
*
addr
=
locals_addr_at
(
i
);
// Depending on oop/int put it in the right package
const
int
mask_len
=
oop_mask
.
number_of_entries
();
StackValue
*
sv
;
if
(
oop_mask
.
is_oop
(
i
))
{
// If the method is native, method()->max_locals() is not telling the truth.
// oop value
// For our purposes, max locals instead equals the size of parameters.
Handle
h
(
*
(
oop
*
)
addr
);
const
int
max_locals
=
method
()
->
is_native
()
?
sv
=
new
StackValue
(
h
);
method
()
->
size_of_parameters
()
:
method
()
->
max_locals
();
}
else
{
// integer
assert
(
mask_len
>=
max_locals
,
"invariant"
);
sv
=
new
StackValue
(
*
addr
);
}
const
int
length
=
expressions
?
mask_len
-
max_locals
:
max_locals
;
assert
(
sv
!=
NULL
,
"sanity check"
);
assert
(
length
>=
0
,
"invariant"
);
result
->
add
(
sv
);
StackValueCollection
*
const
result
=
new
StackValueCollection
(
length
);
if
(
0
==
length
)
{
return
result
;
}
}
if
(
expressions
)
{
stack_expressions
(
result
,
length
,
max_locals
,
oop_mask
,
fr
());
}
else
{
stack_locals
(
result
,
length
,
oop_mask
,
fr
());
}
assert
(
length
==
result
->
size
(),
"invariant"
);
return
result
;
return
result
;
}
}
void
interpretedVFrame
::
set_locals
(
StackValueCollection
*
values
)
const
{
void
interpretedVFrame
::
set_locals
(
StackValueCollection
*
values
)
const
{
if
(
values
==
NULL
||
values
->
size
()
==
0
)
return
;
if
(
values
==
NULL
||
values
->
size
()
==
0
)
return
;
int
length
=
method
()
->
max_locals
();
// If the method is native, max_locals is not telling the truth.
if
(
method
()
->
is_native
())
{
// maxlocals then equals the size of parameters
// If the method is native, max_locals is not telling the truth.
const
int
max_locals
=
method
()
->
is_native
()
?
// maxlocals then equals the size of parameters
method
()
->
size_of_parameters
()
:
method
()
->
max_locals
();
length
=
method
()
->
size_of_parameters
();
}
assert
(
length
==
values
->
size
(),
"Mismatch between actual stack format and supplied data"
);
assert
(
max_locals
==
values
->
size
(),
"Mismatch between actual stack format and supplied data"
);
// handle locals
// handle locals
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
for
(
int
i
=
0
;
i
<
max_locals
;
i
++
)
{
// Find stack location
// Find stack location
intptr_t
*
addr
=
locals_addr_at
(
i
);
intptr_t
*
addr
=
locals_addr_at
(
i
);
// Depending on oop/int put it in the right package
// Depending on oop/int put it in the right package
StackValue
*
sv
=
values
->
at
(
i
);
const
StackValue
*
const
sv
=
values
->
at
(
i
);
assert
(
sv
!=
NULL
,
"sanity check"
);
assert
(
sv
!=
NULL
,
"sanity check"
);
if
(
sv
->
type
()
==
T_OBJECT
)
{
if
(
sv
->
type
()
==
T_OBJECT
)
{
*
(
oop
*
)
addr
=
(
sv
->
get_obj
())();
*
(
oop
*
)
addr
=
(
sv
->
get_obj
())();
...
@@ -328,46 +419,6 @@ void interpretedVFrame::set_locals(StackValueCollection* values) const {
...
@@ -328,46 +419,6 @@ void interpretedVFrame::set_locals(StackValueCollection* values) const {
}
}
}
}
StackValueCollection
*
interpretedVFrame
::
expressions
()
const
{
int
length
=
fr
().
interpreter_frame_expression_stack_size
();
if
(
method
()
->
is_native
())
{
// If the method is native, there is no expression stack
length
=
0
;
}
int
nof_locals
=
method
()
->
max_locals
();
StackValueCollection
*
result
=
new
StackValueCollection
(
length
);
InterpreterOopMap
oop_mask
;
// Get oopmap describing oops and int for current bci
if
(
TraceDeoptimization
&&
Verbose
)
{
methodHandle
m_h
(
method
());
OopMapCache
::
compute_one_oop_map
(
m_h
,
bci
(),
&
oop_mask
);
}
else
{
method
()
->
mask_for
(
bci
(),
&
oop_mask
);
}
// handle expressions
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
// Find stack location
intptr_t
*
addr
=
fr
().
interpreter_frame_expression_stack_at
(
i
);
// Depending on oop/int put it in the right package
StackValue
*
sv
;
if
(
oop_mask
.
is_oop
(
i
+
nof_locals
))
{
// oop value
Handle
h
(
*
(
oop
*
)
addr
);
sv
=
new
StackValue
(
h
);
}
else
{
// integer
sv
=
new
StackValue
(
*
addr
);
}
assert
(
sv
!=
NULL
,
"sanity check"
);
result
->
add
(
sv
);
}
return
result
;
}
// ------------- cChunk --------------
// ------------- cChunk --------------
entryVFrame
::
entryVFrame
(
const
frame
*
fr
,
const
RegisterMap
*
reg_map
,
JavaThread
*
thread
)
entryVFrame
::
entryVFrame
(
const
frame
*
fr
,
const
RegisterMap
*
reg_map
,
JavaThread
*
thread
)
...
...
src/share/vm/runtime/vframe.hpp
浏览文件 @
6b2e7283
...
@@ -186,7 +186,7 @@ class interpretedVFrame: public javaVFrame {
...
@@ -186,7 +186,7 @@ class interpretedVFrame: public javaVFrame {
private:
private:
static
const
int
bcp_offset
;
static
const
int
bcp_offset
;
intptr_t
*
locals_addr_at
(
int
offset
)
const
;
intptr_t
*
locals_addr_at
(
int
offset
)
const
;
StackValueCollection
*
stack_data
(
bool
expressions
)
const
;
// returns where the parameters starts relative to the frame pointer
// returns where the parameters starts relative to the frame pointer
int
start_of_parameters
()
const
;
int
start_of_parameters
()
const
;
...
...
src/share/vm/services/mallocTracker.cpp
浏览文件 @
6b2e7283
...
@@ -140,11 +140,6 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
...
@@ -140,11 +140,6 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
return
NULL
;
return
NULL
;
}
}
// Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
// systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
if
(
size
>
MAX_MALLOC_SIZE
)
{
fatal
(
"Should not use malloc for big memory block, use virtual memory instead"
);
}
// Uses placement global new operator to initialize malloc header
// Uses placement global new operator to initialize malloc header
switch
(
level
)
{
switch
(
level
)
{
case
NMT_off
:
case
NMT_off
:
...
@@ -154,10 +149,12 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
...
@@ -154,10 +149,12 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
break
;
break
;
}
}
case
NMT_summary
:
{
case
NMT_summary
:
{
assert
(
size
<=
MAX_MALLOC_SIZE
,
"malloc size overrun for NMT"
);
header
=
::
new
(
malloc_base
)
MallocHeader
(
size
,
flags
);
header
=
::
new
(
malloc_base
)
MallocHeader
(
size
,
flags
);
break
;
break
;
}
}
case
NMT_detail
:
{
case
NMT_detail
:
{
assert
(
size
<=
MAX_MALLOC_SIZE
,
"malloc size overrun for NMT"
);
header
=
::
new
(
malloc_base
)
MallocHeader
(
size
,
flags
,
stack
);
header
=
::
new
(
malloc_base
)
MallocHeader
(
size
,
flags
,
stack
);
break
;
break
;
}
}
...
...
test/gc/arguments/TestG1ConcRefinementThreads.java
0 → 100644
浏览文件 @
6b2e7283
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestG1ConcRefinementThreads
* @key gc
* @bug 8047976
* @summary Tests argument processing for G1ConcRefinementThreads
* @library /testlibrary
*/
import
com.oracle.java.testlibrary.*
;
import
java.util.*
;
import
java.util.regex.*
;
public
class
TestG1ConcRefinementThreads
{
static
final
int
AUTO_SELECT_THREADS_COUNT
=
0
;
static
final
int
PASSED_THREADS_COUNT
=
11
;
public
static
void
main
(
String
args
[])
throws
Exception
{
// default case
runG1ConcRefinementThreadsTest
(
new
String
[]{},
// automatically selected
AUTO_SELECT_THREADS_COUNT
/* use default setting */
);
// zero setting case
runG1ConcRefinementThreadsTest
(
new
String
[]{
"-XX:G1ConcRefinementThreads=0"
},
// automatically selected
AUTO_SELECT_THREADS_COUNT
/* set to zero */
);
// non-zero sestting case
runG1ConcRefinementThreadsTest
(
new
String
[]{
"-XX:G1ConcRefinementThreads="
+
Integer
.
toString
(
PASSED_THREADS_COUNT
)},
PASSED_THREADS_COUNT
);
}
private
static
void
runG1ConcRefinementThreadsTest
(
String
[]
passedOpts
,
int
expectedValue
)
throws
Exception
{
List
<
String
>
vmOpts
=
new
ArrayList
<>();
if
(
passedOpts
.
length
>
0
)
{
Collections
.
addAll
(
vmOpts
,
passedOpts
);
}
Collections
.
addAll
(
vmOpts
,
"-XX:+UseG1GC"
,
"-XX:+PrintFlagsFinal"
,
"-version"
);
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
vmOpts
.
toArray
(
new
String
[
vmOpts
.
size
()]));
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldHaveExitValue
(
0
);
String
stdout
=
output
.
getStdout
();
checkG1ConcRefinementThreadsConsistency
(
stdout
,
expectedValue
);
}
private
static
void
checkG1ConcRefinementThreadsConsistency
(
String
output
,
int
expectedValue
)
{
int
actualValue
=
getIntValue
(
"G1ConcRefinementThreads"
,
output
);
if
(
expectedValue
==
0
)
{
// If expectedValue is automatically selected, set it same as ParallelGCThreads.
expectedValue
=
getIntValue
(
"ParallelGCThreads"
,
output
);
}
if
(
expectedValue
!=
actualValue
)
{
throw
new
RuntimeException
(
"Actual G1ConcRefinementThreads("
+
Integer
.
toString
(
actualValue
)
+
") is not equal to expected value("
+
Integer
.
toString
(
expectedValue
)
+
")"
);
}
}
public
static
int
getIntValue
(
String
flag
,
String
where
)
{
Matcher
m
=
Pattern
.
compile
(
flag
+
"\\s+:?=\\s+\\d+"
).
matcher
(
where
);
if
(!
m
.
find
())
{
throw
new
RuntimeException
(
"Could not find value for flag "
+
flag
+
" in output string"
);
}
String
match
=
m
.
group
();
return
Integer
.
parseInt
(
match
.
substring
(
match
.
lastIndexOf
(
" "
)
+
1
,
match
.
length
()));
}
}
test/runtime/LoadClass/ShowClassLoader.java
0 → 100644
浏览文件 @
6b2e7283
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @key regression
* @bug 8058927
* @summary Make sure array class has the right class loader
* @run main ShowClassLoader
*/
public
class
ShowClassLoader
{
public
static
void
main
(
String
[]
args
)
{
Object
[]
oa
=
new
Object
[
0
];
ShowClassLoader
[]
sa
=
new
ShowClassLoader
[
0
];
System
.
out
.
println
(
"Classloader for Object[] is "
+
oa
.
getClass
().
getClassLoader
());
System
.
out
.
println
(
"Classloader for SCL[] is "
+
sa
.
getClass
().
getClassLoader
()
);
if
(
sa
.
getClass
().
getClassLoader
()
==
null
)
{
throw
new
RuntimeException
(
"Wrong class loader"
);
}
}
}
test/
gc/g1/TestDeferredRSUpdate
.java
→
test/
runtime/NMT/UnsafeMallocLimit2
.java
浏览文件 @
6b2e7283
...
@@ -22,58 +22,33 @@
...
@@ -22,58 +22,33 @@
*/
*/
/*
/*
* @test TestDeferredRSUpdate
* @test
* @bug 8040977 8052170
* @bug 8058818
* @summary Ensure that running with -XX:-G1DeferredRSUpdate does not crash the VM
* @key gc
* @library /testlibrary
* @library /testlibrary
* @build UnsafeMallocLimit2
* @run main/othervm -Xmx32m -XX:NativeMemoryTracking=off UnsafeMallocLimit2
*/
*/
import
com.oracle.java.testlibrary.ProcessTools
;
import
com.oracle.java.testlibrary.*
;
import
com.oracle.java.testlibrary.OutputAnalyzer
;
import
sun.misc.Unsafe
;
public
class
TestDeferredRSUpdate
{
public
class
UnsafeMallocLimit2
{
public
static
void
main
(
String
[]
args
)
throws
Exception
{
GCTest
.
main
(
args
);
public
static
void
main
(
String
args
[])
throws
Exception
{
if
(
Platform
.
is32bit
())
{
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UseG1GC"
,
Unsafe
unsafe
=
Utils
.
getUnsafe
();
"-Xmx10M"
,
try
{
"-XX:+PrintGCDetails"
,
// Allocate greater than MALLOC_MAX and likely won't fail to allocate,
// G1DeferredRSUpdate is a develop option, but we cannot limit execution of this test to only debug VMs.
// so it hits the NMT code that asserted.
"-XX:+IgnoreUnrecognizedVMOptions"
,
// Test that this doesn't cause an assertion with NMT off.
"-XX:-G1DeferredRSUpdate"
,
// The option above overrides if all the tests are run with NMT on.
GCTest
.
class
.
getName
());
unsafe
.
allocateMemory
(
0x40000000
);
System
.
out
.
println
(
"Allocation succeeded"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
}
catch
(
OutOfMemoryError
e
)
{
output
.
shouldHaveExitValue
(
0
);
System
.
out
.
println
(
"Allocation failed"
);
}
}
}
else
{
static
class
GCTest
{
System
.
out
.
println
(
"Test only valid on 32-bit platforms"
);
private
static
Object
[]
garbage
=
new
Object
[
32
];
public
static
void
main
(
String
[]
args
)
{
System
.
out
.
println
(
"Creating garbage"
);
// Create 128MB of garbage. This should result in at least one minor GC, with
// some objects copied to old gen. As references from old to young are installed,
// the crash due to the use before initialize occurs.
Object
prev
=
null
;
Object
prevPrev
=
null
;
for
(
int
i
=
0
;
i
<
1024
;
i
++)
{
Object
[]
next
=
new
Object
[
32
*
1024
];
next
[
0
]
=
prev
;
next
[
1
]
=
prevPrev
;
Object
[]
cur
=
(
Object
[])
garbage
[
i
%
garbage
.
length
];
if
(
cur
!=
null
)
{
cur
[
0
]
=
null
;
cur
[
1
]
=
null
;
}
}
garbage
[
i
%
garbage
.
length
]
=
next
;
prevPrev
=
prev
;
prev
=
next
;
}
System
.
out
.
println
(
"Done"
);
}
}
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录