Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
f4d4ba98
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f4d4ba98
编写于
10月 14, 2014
作者:
A
asaha
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
6b2e7283
4a841854
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
183 addition
and
66 deletion
+183
-66
.hgtags
.hgtags
+1
-0
make/hotspot_version
make/hotspot_version
+1
-1
src/share/vm/c1/c1_LIRGenerator.cpp
src/share/vm/c1/c1_LIRGenerator.cpp
+10
-10
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+51
-14
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+14
-0
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+1
-0
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+3
-2
src/share/vm/gc_interface/gcCause.cpp
src/share/vm/gc_interface/gcCause.cpp
+2
-1
src/share/vm/gc_interface/gcCause.hpp
src/share/vm/gc_interface/gcCause.hpp
+2
-1
test/compiler/osr/TestRangeCheck.java
test/compiler/osr/TestRangeCheck.java
+22
-29
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
+42
-5
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
+34
-3
未找到文件。
.hgtags
浏览文件 @
f4d4ba98
...
...
@@ -524,6 +524,7 @@ b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08
d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13
7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09
e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14
a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
...
...
make/hotspot_version
浏览文件 @
f4d4ba98
...
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=1
3
HS_BUILD_NUMBER=1
4
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
...
...
src/share/vm/c1/c1_LIRGenerator.cpp
浏览文件 @
f4d4ba98
...
...
@@ -2066,14 +2066,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
LIR_Opr
base_op
=
base
.
result
();
LIR_Opr
index_op
=
idx
.
result
();
#ifndef _LP64
if
(
x
->
base
()
->
type
()
->
tag
()
==
longTag
)
{
if
(
base_op
->
type
()
==
T_LONG
)
{
base_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
base
.
result
(),
base_op
);
}
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
)
{
if
(
index_op
->
type
()
==
T_LONG
)
{
LIR_Opr
long_index_op
=
index_op
;
if
(
x
->
index
()
->
type
()
->
is_constant
())
{
if
(
index_op
->
is_constant
())
{
long_index_op
=
new_register
(
T_LONG
);
__
move
(
index_op
,
long_index_op
);
}
...
...
@@ -2088,14 +2088,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
assert
(
!
x
->
has_index
()
||
index_op
->
type
()
==
T_INT
,
"index should be an int"
);
#else
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
)
{
if
(
!
x
->
index
()
->
type
()
->
is_constant
())
{
if
(
index_op
->
type
()
==
T_INT
)
{
if
(
!
index_op
->
is_constant
())
{
index_op
=
new_register
(
T_LONG
);
__
convert
(
Bytecodes
::
_i2l
,
idx
.
result
(),
index_op
);
}
}
else
{
assert
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
,
"must be"
);
if
(
x
->
index
()
->
type
()
->
is_constant
())
{
assert
(
index_op
->
type
()
==
T_LONG
,
"must be"
);
if
(
index_op
->
is_constant
())
{
index_op
=
new_register
(
T_LONG
);
__
move
(
idx
.
result
(),
index_op
);
}
...
...
@@ -2176,12 +2176,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr
index_op
=
idx
.
result
();
#ifndef _LP64
if
(
x
->
base
()
->
type
()
->
tag
()
==
longTag
)
{
if
(
base_op
->
type
()
==
T_LONG
)
{
base_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
base
.
result
(),
base_op
);
}
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
)
{
if
(
index_op
->
type
()
==
T_LONG
)
{
index_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
idx
.
result
(),
index_op
);
}
...
...
@@ -2191,7 +2191,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
assert
(
!
x
->
has_index
()
||
(
index_op
->
type
()
==
T_INT
&&
!
index_op
->
is_constant
()),
"index should be an non-constant int"
);
#else
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
)
{
if
(
index_op
->
type
()
==
T_INT
)
{
index_op
=
new_register
(
T_LONG
);
__
convert
(
Bytecodes
::
_i2l
,
idx
.
result
(),
index_op
);
}
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
f4d4ba98
...
...
@@ -2733,10 +2733,12 @@ void CFLS_LAB::retire(int tid) {
}
}
void
CompactibleFreeListSpace
::
par_get_chunk_of_blocks
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
)
{
assert
(
fl
->
count
()
==
0
,
"Precondition."
);
assert
(
word_sz
<
CompactibleFreeListSpace
::
IndexSetSize
,
"Precondition"
);
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists. Looks for a chunk with size that is a multiple
// of "word_sz" and if found, splits it into "word_sz" chunks and add
// to the free list "fl". "n" is the maximum number of chunks to
// be added to "fl".
bool
CompactibleFreeListSpace
::
par_get_chunk_of_blocks_IFL
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
)
{
// We'll try all multiples of word_sz in the indexed set, starting with
// word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
...
...
@@ -2817,11 +2819,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
Mutex
::
_no_safepoint_check_flag
);
ssize_t
births
=
_indexedFreeList
[
word_sz
].
split_births
()
+
num
;
_indexedFreeList
[
word_sz
].
set_split_births
(
births
);
return
;
return
true
;
}
}
return
found
;
}
// Otherwise, we'll split a block from the dictionary.
}
FreeChunk
*
CompactibleFreeListSpace
::
get_n_way_chunk_to_split
(
size_t
word_sz
,
size_t
n
)
{
FreeChunk
*
fc
=
NULL
;
FreeChunk
*
rem_fc
=
NULL
;
size_t
rem
;
...
...
@@ -2832,16 +2838,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc
=
dictionary
()
->
get_chunk
(
MAX2
(
n
*
word_sz
,
_dictionary
->
min_size
()),
FreeBlockDictionary
<
FreeChunk
>::
atLeast
);
if
(
fc
!=
NULL
)
{
_bt
.
allocated
((
HeapWord
*
)
fc
,
fc
->
size
(),
true
/* reducing */
);
// update _unallocated_blk
dictionary
()
->
dict_census_update
(
fc
->
size
(),
true
/*split*/
,
false
/*birth*/
);
break
;
}
else
{
n
--
;
}
}
if
(
fc
==
NULL
)
return
;
if
(
fc
==
NULL
)
return
NULL
;
// Otherwise, split up that block.
assert
((
ssize_t
)
n
>=
1
,
"Control point invariant"
);
assert
(
fc
->
is_free
(),
"Error: should be a free block"
);
...
...
@@ -2863,10 +2865,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// dictionary and return, leaving "fl" empty.
if
(
n
==
0
)
{
returnChunkToDictionary
(
fc
);
assert
(
fl
->
count
()
==
0
,
"We never allocated any blocks"
);
return
;
return
NULL
;
}
_bt
.
allocated
((
HeapWord
*
)
fc
,
fc
->
size
(),
true
/* reducing */
);
// update _unallocated_blk
dictionary
()
->
dict_census_update
(
fc
->
size
(),
true
/*split*/
,
false
/*birth*/
);
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
// back the remainder to the dictionary, since a concurrent allocation
...
...
@@ -2899,7 +2905,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_indexedFreeList
[
rem
].
return_chunk_at_head
(
rem_fc
);
smallSplitBirth
(
rem
);
}
assert
((
ssize_t
)
n
>
0
&&
fc
!=
NULL
,
"Consistency"
);
assert
(
n
*
word_sz
==
fc
->
size
(),
err_msg
(
"Chunk size "
SIZE_FORMAT
" is not exactly splittable by "
SIZE_FORMAT
" sized chunks of size "
SIZE_FORMAT
,
fc
->
size
(),
n
,
word_sz
));
return
fc
;
}
void
CompactibleFreeListSpace
::
par_get_chunk_of_blocks_dictionary
(
size_t
word_sz
,
size_t
targetted_number_of_chunks
,
AdaptiveFreeList
<
FreeChunk
>*
fl
)
{
FreeChunk
*
fc
=
get_n_way_chunk_to_split
(
word_sz
,
targetted_number_of_chunks
);
if
(
fc
==
NULL
)
{
return
;
}
size_t
n
=
fc
->
size
()
/
word_sz
;
assert
((
ssize_t
)
n
>
0
,
"Consistency"
);
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
...
...
@@ -2947,6 +2970,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
assert
(
fl
->
tail
()
->
next
()
==
NULL
,
"List invariant."
);
}
void
CompactibleFreeListSpace
::
par_get_chunk_of_blocks
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
)
{
assert
(
fl
->
count
()
==
0
,
"Precondition."
);
assert
(
word_sz
<
CompactibleFreeListSpace
::
IndexSetSize
,
"Precondition"
);
if
(
par_get_chunk_of_blocks_IFL
(
word_sz
,
n
,
fl
))
{
// Got it
return
;
}
// Otherwise, we'll split a block from the dictionary.
par_get_chunk_of_blocks_dictionary
(
word_sz
,
n
,
fl
);
}
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan. See CMSParRemarkTask where this is currently used.
// XXX Need to suitably abstract and generalize this and the next
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
浏览文件 @
f4d4ba98
...
...
@@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// list of size "word_sz", and must now be decremented.
void
par_get_chunk_of_blocks
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
);
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists.
bool
par_get_chunk_of_blocks_IFL
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
);
// Used by par_get_chunk_of_blocks_dictionary() to get a chunk
// evenly splittable into "n" "word_sz" chunks. Returns that
// evenly splittable chunk. May split a larger chunk to get the
// evenly splittable chunk.
FreeChunk
*
get_n_way_chunk_to_split
(
size_t
word_sz
,
size_t
n
);
// Used by par_get_chunk_of_blocks() for the chunks from the
// dictionary.
void
par_get_chunk_of_blocks_dictionary
(
size_t
word_sz
,
size_t
n
,
AdaptiveFreeList
<
FreeChunk
>*
fl
);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
// first. This allocation strategy assumes a companion sweeping
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
f4d4ba98
...
...
@@ -2343,6 +2343,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
case
GCCause
::
_gc_locker
:
return
GCLockerInvokesConcurrent
;
case
GCCause
::
_java_lang_system_gc
:
return
ExplicitGCInvokesConcurrent
;
case
GCCause
::
_g1_humongous_allocation
:
return
true
;
case
GCCause
::
_update_allocation_context_stats_inc
:
return
true
;
default:
return
false
;
}
}
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
f4d4ba98
...
...
@@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() {
assert
(
!
_should_initiate_conc_mark
||
((
_gc_cause
==
GCCause
::
_gc_locker
&&
GCLockerInvokesConcurrent
)
||
(
_gc_cause
==
GCCause
::
_java_lang_system_gc
&&
ExplicitGCInvokesConcurrent
)
||
_gc_cause
==
GCCause
::
_g1_humongous_allocation
),
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"
);
_gc_cause
==
GCCause
::
_g1_humongous_allocation
||
_gc_cause
==
GCCause
::
_update_allocation_context_stats_inc
),
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle"
);
if
(
_word_size
>
0
)
{
// An allocation has been requested. So, try to do that first.
...
...
src/share/vm/gc_interface/gcCause.cpp
浏览文件 @
f4d4ba98
...
...
@@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case
_wb_young_gc
:
return
"WhiteBox Initiated Young GC"
;
case
_update_allocation_context_stats
:
case
_update_allocation_context_stats_inc
:
case
_update_allocation_context_stats_full
:
return
"Update Allocation Context Stats"
;
case
_no_gc
:
...
...
src/share/vm/gc_interface/gcCause.hpp
浏览文件 @
f4d4ba98
...
...
@@ -47,7 +47,8 @@ class GCCause : public AllStatic {
_heap_inspection
,
_heap_dump
,
_wb_young_gc
,
_update_allocation_context_stats
,
_update_allocation_context_stats_inc
,
_update_allocation_context_stats_full
,
/* implementation independent, but reserved for GC use */
_no_gc
,
...
...
test/
gc/class_unloading/AllocateBeyondMetaspaceSize
.java
→
test/
compiler/osr/TestRangeCheck
.java
浏览文件 @
f4d4ba98
...
...
@@ -21,39 +21,32 @@
* questions.
*/
import
sun.hotspot.WhiteBox
;
class
AllocateBeyondMetaspaceSize
{
public
static
Object
dummy
;
/*
* @test TestRangeCheck
* @bug 8054883
* @summary Tests that range check is not skipped
*/
public
static
void
main
(
String
[]
args
)
{
if
(
args
.
length
!=
2
)
{
throw
new
IllegalArgumentException
(
"Usage: <MetaspaceSize> <YoungGenSize>"
);
public
class
TestRangeCheck
{
public
static
void
main
(
String
args
[])
{
try
{
test
();
throw
new
AssertionError
(
"Expected ArrayIndexOutOfBoundsException was not thrown"
);
}
catch
(
ArrayIndexOutOfBoundsException
e
)
{
System
.
out
.
println
(
"Expected ArrayIndexOutOfBoundsException was thrown"
);
}
}
long
metaspaceSize
=
Long
.
parseLong
(
args
[
0
]);
long
youngGenSize
=
Long
.
parseLong
(
args
[
1
]);
run
(
metaspaceSize
,
youngGenSize
);
}
private
static
void
run
(
long
metaspaceSize
,
long
youngGenSize
)
{
WhiteBox
wb
=
WhiteBox
.
getWhiteBox
();
long
allocationBeyondMetaspaceSize
=
metaspaceSize
*
2
;
long
metaspace
=
wb
.
allocateMetaspace
(
null
,
allocationBeyondMetaspaceSize
);
triggerYoungGC
(
youngGenSize
);
wb
.
freeMetaspace
(
null
,
metaspace
,
metaspace
);
}
private
static
void
test
()
{
int
arr
[]
=
new
int
[
1
];
int
result
=
1
;
private
static
void
triggerYoungGC
(
long
youngGenSize
)
{
long
approxAllocSize
=
32
*
1024
;
long
numAllocations
=
2
*
youngGenSize
/
approxAllocSize
;
// provoke OSR compilation
for
(
int
i
=
0
;
i
<
Integer
.
MAX_VALUE
;
i
++)
{
}
for
(
long
i
=
0
;
i
<
numAllocations
;
i
++)
{
dummy
=
new
byte
[(
int
)
approxAllocSize
];
if
(
result
>
0
&&
arr
[~
result
]
>
0
)
{
arr
[~
result
]
=
0
;
}
}
}
}
test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
浏览文件 @
f4d4ba98
...
...
@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
* @build TestCMSClassUnloadingEnabledHWM
AllocateBeyondMetaspaceSize
* @build TestCMSClassUnloadingEnabledHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestCMSClassUnloadingEnabledHWM
* @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
...
...
@@ -34,9 +34,11 @@
import
com.oracle.java.testlibrary.OutputAnalyzer
;
import
com.oracle.java.testlibrary.ProcessTools
;
import
java.lang.management.GarbageCollectorMXBean
;
import
java.lang.management.ManagementFactory
;
import
java.util.ArrayList
;
import
java.util.Arrays
;
import
sun.hotspot.WhiteBox
;
public
class
TestCMSClassUnloadingEnabledHWM
{
private
static
long
MetaspaceSize
=
32
*
1024
*
1024
;
...
...
@@ -47,15 +49,18 @@ public class TestCMSClassUnloadingEnabledHWM {
"-Xbootclasspath/a:."
,
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:+WhiteBoxAPI"
,
"-Xmx128m"
,
"-XX:CMSMaxAbortablePrecleanTime=1"
,
"-XX:CMSWaitDuration=50"
,
"-XX:MetaspaceSize="
+
MetaspaceSize
,
"-Xmn"
+
YoungGenSize
,
"-XX:+UseConcMarkSweepGC"
,
"-XX:"
+
(
enableUnloading
?
"+"
:
"-"
)
+
"CMSClassUnloadingEnabled"
,
"-XX:+PrintHeapAtGC"
,
"-XX:+PrintGCDetails"
,
"
AllocateBeyondMetaspaceSize
"
,
""
+
MetaspaceSize
,
""
+
YoungGen
Size
);
"
-XX:+PrintGCTimeStamps
"
,
TestCMSClassUnloadingEnabledHWM
.
AllocateBeyondMetaspaceSize
.
class
.
getName
()
,
""
+
Metaspace
Size
);
return
new
OutputAnalyzer
(
pb
.
start
());
}
...
...
@@ -87,5 +92,37 @@ public class TestCMSClassUnloadingEnabledHWM {
testWithCMSClassUnloading
();
testWithoutCMSClassUnloading
();
}
public
static
class
AllocateBeyondMetaspaceSize
{
public
static
void
main
(
String
[]
args
)
throws
Exception
{
if
(
args
.
length
!=
1
)
{
throw
new
IllegalArgumentException
(
"Usage: <MetaspaceSize>"
);
}
WhiteBox
wb
=
WhiteBox
.
getWhiteBox
();
// Allocate past the MetaspaceSize limit.
long
metaspaceSize
=
Long
.
parseLong
(
args
[
0
]);
long
allocationBeyondMetaspaceSize
=
metaspaceSize
*
2
;
long
metaspace
=
wb
.
allocateMetaspace
(
null
,
allocationBeyondMetaspaceSize
);
// Wait for at least one GC to occur. The caller will parse the log files produced.
GarbageCollectorMXBean
cmsGCBean
=
getCMSGCBean
();
while
(
cmsGCBean
.
getCollectionCount
()
==
0
)
{
Thread
.
sleep
(
100
);
}
wb
.
freeMetaspace
(
null
,
metaspace
,
metaspace
);
}
private
static
GarbageCollectorMXBean
getCMSGCBean
()
{
for
(
GarbageCollectorMXBean
gcBean
:
ManagementFactory
.
getGarbageCollectorMXBeans
())
{
if
(
gcBean
.
getObjectName
().
toString
().
equals
(
"java.lang:type=GarbageCollector,name=ConcurrentMarkSweep"
))
{
return
gcBean
;
}
}
return
null
;
}
}
}
test/gc/class_unloading/TestG1ClassUnloadingHWM.java
浏览文件 @
f4d4ba98
...
...
@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
* @build TestG1ClassUnloadingHWM
AllocateBeyondMetaspaceSize
* @build TestG1ClassUnloadingHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestG1ClassUnloadingHWM
* @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
...
...
@@ -34,9 +34,9 @@
import
com.oracle.java.testlibrary.OutputAnalyzer
;
import
com.oracle.java.testlibrary.ProcessTools
;
import
java.util.ArrayList
;
import
java.util.Arrays
;
import
sun.hotspot.WhiteBox
;
public
class
TestG1ClassUnloadingHWM
{
private
static
long
MetaspaceSize
=
32
*
1024
*
1024
;
...
...
@@ -53,7 +53,7 @@ public class TestG1ClassUnloadingHWM {
"-XX:"
+
(
enableUnloading
?
"+"
:
"-"
)
+
"ClassUnloadingWithConcurrentMark"
,
"-XX:+PrintHeapAtGC"
,
"-XX:+PrintGCDetails"
,
"AllocateBeyondMetaspaceSize"
,
TestG1ClassUnloadingHWM
.
AllocateBeyondMetaspaceSize
.
class
.
getName
()
,
""
+
MetaspaceSize
,
""
+
YoungGenSize
);
return
new
OutputAnalyzer
(
pb
.
start
());
...
...
@@ -87,5 +87,36 @@ public class TestG1ClassUnloadingHWM {
testWithG1ClassUnloading
();
testWithoutG1ClassUnloading
();
}
public
static
class
AllocateBeyondMetaspaceSize
{
public
static
Object
dummy
;
public
static
void
main
(
String
[]
args
)
throws
Exception
{
if
(
args
.
length
!=
2
)
{
throw
new
IllegalArgumentException
(
"Usage: <MetaspaceSize> <YoungGenSize>"
);
}
WhiteBox
wb
=
WhiteBox
.
getWhiteBox
();
// Allocate past the MetaspaceSize limit
long
metaspaceSize
=
Long
.
parseLong
(
args
[
0
]);
long
allocationBeyondMetaspaceSize
=
metaspaceSize
*
2
;
long
metaspace
=
wb
.
allocateMetaspace
(
null
,
allocationBeyondMetaspaceSize
);
long
youngGenSize
=
Long
.
parseLong
(
args
[
1
]);
triggerYoungGCs
(
youngGenSize
);
wb
.
freeMetaspace
(
null
,
metaspace
,
metaspace
);
}
public
static
void
triggerYoungGCs
(
long
youngGenSize
)
{
long
approxAllocSize
=
32
*
1024
;
long
numAllocations
=
2
*
youngGenSize
/
approxAllocSize
;
for
(
long
i
=
0
;
i
<
numAllocations
;
i
++)
{
dummy
=
new
byte
[(
int
)
approxAllocSize
];
}
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录