Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
6fe0e0c8
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6fe0e0c8
编写于
2月 12, 2013
作者:
J
jmasa
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
8008966: NPG: Inefficient Metaspace counter functions cause large young GC regressions
Reviewed-by: mgerdin, coleenp
上级
383c051b
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
398 addition
and
175 deletion
+398
-175
src/share/vm/classfile/classLoaderData.cpp
src/share/vm/classfile/classLoaderData.cpp
+2
-0
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+2
-1
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+2
-1
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+2
-1
src/share/vm/gc_implementation/shared/vmGCOperations.cpp
src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+4
-1
src/share/vm/memory/filemap.cpp
src/share/vm/memory/filemap.cpp
+2
-2
src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
+2
-1
src/share/vm/memory/metaspace.cpp
src/share/vm/memory/metaspace.cpp
+282
-140
src/share/vm/memory/metaspace.hpp
src/share/vm/memory/metaspace.hpp
+80
-16
src/share/vm/memory/metaspaceCounters.cpp
src/share/vm/memory/metaspaceCounters.cpp
+15
-7
src/share/vm/memory/metaspaceCounters.hpp
src/share/vm/memory/metaspaceCounters.hpp
+1
-0
src/share/vm/memory/metaspaceShared.cpp
src/share/vm/memory/metaspaceShared.cpp
+4
-5
未找到文件。
src/share/vm/classfile/classLoaderData.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -672,6 +672,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
dead
->
unload
();
data
=
data
->
next
();
// Remove from loader list.
// This class loader data will no longer be found
// in the ClassLoaderDataGraph.
if
(
prev
!=
NULL
)
{
prev
->
set_next
(
data
);
}
else
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -1304,7 +1304,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
print_heap_before_gc
();
size_t
metadata_prev_used
=
MetaspaceAux
::
used_in
_bytes
();
size_t
metadata_prev_used
=
MetaspaceAux
::
allocated_used
_bytes
();
HRSPhaseSetter
x
(
HRSPhaseFullGC
);
verify_region_sets_optional
();
...
...
@@ -1425,6 +1425,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph
::
purge
();
MetaspaceAux
::
verify_metrics
();
// Note: since we've just done a full GC, concurrent
// marking is no longer active. Therefore we need not
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -177,7 +177,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
size_t
prev_used
=
heap
->
used
();
// Capture metadata size before collection for sizing.
size_t
metadata_prev_used
=
MetaspaceAux
::
used_in
_bytes
();
size_t
metadata_prev_used
=
MetaspaceAux
::
allocated_used
_bytes
();
// For PrintGCDetails
size_t
old_gen_prev_used
=
old_gen
->
used_in_bytes
();
...
...
@@ -238,6 +238,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph
::
purge
();
MetaspaceAux
::
verify_metrics
();
BiasedLocking
::
restore_marks
();
Threads
::
gc_epilogue
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -891,7 +891,7 @@ public:
_heap_used
=
heap
->
used
();
_young_gen_used
=
heap
->
young_gen
()
->
used_in_bytes
();
_old_gen_used
=
heap
->
old_gen
()
->
used_in_bytes
();
_metadata_used
=
MetaspaceAux
::
used_in
_bytes
();
_metadata_used
=
MetaspaceAux
::
allocated_used
_bytes
();
};
size_t
heap_used
()
const
{
return
_heap_used
;
}
...
...
@@ -1026,6 +1026,7 @@ void PSParallelCompact::post_compact()
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph
::
purge
();
MetaspaceAux
::
verify_metrics
();
Threads
::
gc_epilogue
();
CodeCache
::
gc_epilogue
();
...
...
src/share/vm/gc_implementation/shared/vmGCOperations.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -225,7 +225,10 @@ void VM_CollectForMetadataAllocation::doit() {
gclog_or_tty
->
print_cr
(
"
\n
CMS full GC for Metaspace"
);
}
heap
->
collect_as_vm_thread
(
GCCause
::
_metadata_GC_threshold
);
_result
=
_loader_data
->
metaspace_non_null
()
->
allocate
(
_size
,
_mdtype
);
// After a GC try to allocate without expanding. Could fail
// and expansion will be tried below.
_result
=
_loader_data
->
metaspace_non_null
()
->
allocate
(
_size
,
_mdtype
);
}
if
(
_result
==
NULL
&&
!
UseConcMarkSweepGC
/* CMS already tried */
)
{
// If still failing, allow the Metaspace to expand.
...
...
src/share/vm/memory/filemap.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -238,8 +238,8 @@ void FileMapInfo::write_header() {
void
FileMapInfo
::
write_space
(
int
i
,
Metaspace
*
space
,
bool
read_only
)
{
align_file_position
();
size_t
used
=
space
->
used_
words
(
Metaspace
::
NonClassType
)
*
BytesPerWord
;
size_t
capacity
=
space
->
capacity_
words
(
Metaspace
::
NonClassType
)
*
BytesPerWord
;
size_t
used
=
space
->
used_
bytes_slow
(
Metaspace
::
NonClassType
)
;
size_t
capacity
=
space
->
capacity_
bytes_slow
(
Metaspace
::
NonClassType
)
;
struct
FileMapInfo
::
FileMapHeader
::
space_info
*
si
=
&
_header
.
_space
[
i
];
write_region
(
i
,
(
char
*
)
space
->
bottom
(),
used
,
capacity
,
read_only
,
false
);
}
...
...
src/share/vm/memory/genCollectedHeap.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -377,7 +377,7 @@ void GenCollectedHeap::do_collection(bool full,
ClearedAllSoftRefs
casr
(
do_clear_all_soft_refs
,
collector_policy
());
const
size_t
metadata_prev_used
=
MetaspaceAux
::
used_in
_bytes
();
const
size_t
metadata_prev_used
=
MetaspaceAux
::
allocated_used
_bytes
();
print_heap_before_gc
();
...
...
@@ -556,6 +556,7 @@ void GenCollectedHeap::do_collection(bool full,
if
(
complete
)
{
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph
::
purge
();
MetaspaceAux
::
verify_metrics
();
// Resize the metaspace capacity after full collections
MetaspaceGC
::
compute_new_size
();
update_full_collections_completed
();
...
...
src/share/vm/memory/metaspace.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -47,7 +47,6 @@ typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
// the free chunk lists
const
bool
metaspace_slow_verify
=
false
;
// Parameters for stress mode testing
const
uint
metadata_deallocate_a_lot_block
=
10
;
const
uint
metadata_deallocate_a_lock_chunk
=
3
;
...
...
@@ -220,7 +219,6 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void
print_on
(
outputStream
*
st
);
};
// Used to manage the free list of Metablocks (a block corresponds
// to the allocation of a quantum of metadata).
class
BlockFreelist
VALUE_OBJ_CLASS_SPEC
{
...
...
@@ -298,7 +296,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
MemRegion
*
reserved
()
{
return
&
_reserved
;
}
VirtualSpace
*
virtual_space
()
const
{
return
(
VirtualSpace
*
)
&
_virtual_space
;
}
// Returns true if "word_size" is available in the
virtual s
pace
// Returns true if "word_size" is available in the
VirtualS
pace
bool
is_available
(
size_t
word_size
)
{
return
_top
+
word_size
<=
end
();
}
MetaWord
*
top
()
const
{
return
_top
;
}
...
...
@@ -313,6 +311,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// used and capacity in this single entry in the list
size_t
used_words_in_vs
()
const
;
size_t
capacity_words_in_vs
()
const
;
size_t
free_words_in_vs
()
const
;
bool
initialize
();
...
...
@@ -449,6 +448,8 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
VirtualSpaceList
(
size_t
word_size
);
VirtualSpaceList
(
ReservedSpace
rs
);
size_t
free_bytes
();
Metachunk
*
get_new_chunk
(
size_t
word_size
,
size_t
grow_chunks_by_words
,
size_t
medium_chunk_bunch
);
...
...
@@ -579,7 +580,11 @@ class SpaceManager : public CHeapObj<mtClass> {
bool
has_small_chunk_limit
()
{
return
!
vs_list
()
->
is_class
();
}
// Sum of all space in allocated chunks
size_t
_allocation_total
;
size_t
_allocated_blocks_words
;
// Sum of all allocated chunks
size_t
_allocated_chunks_words
;
size_t
_allocated_chunks_count
;
// Free lists of blocks are per SpaceManager since they
// are assumed to be in chunks in use by the SpaceManager
...
...
@@ -635,12 +640,27 @@ class SpaceManager : public CHeapObj<mtClass> {
size_t
medium_chunk_size
()
{
return
(
size_t
)
vs_list
()
->
is_class
()
?
ClassMediumChunk
:
MediumChunk
;
}
size_t
medium_chunk_bunch
()
{
return
medium_chunk_size
()
*
MediumChunkMultiple
;
}
size_t
allocation_total
()
const
{
return
_allocation_total
;
}
void
inc_allocation_total
(
size_t
v
)
{
Atomic
::
add_ptr
(
v
,
&
_allocation_total
);
}
size_t
allocated_blocks_words
()
const
{
return
_allocated_blocks_words
;
}
size_t
allocated_blocks_bytes
()
const
{
return
_allocated_blocks_words
*
BytesPerWord
;
}
size_t
allocated_chunks_words
()
const
{
return
_allocated_chunks_words
;
}
size_t
allocated_chunks_count
()
const
{
return
_allocated_chunks_count
;
}
bool
is_humongous
(
size_t
word_size
)
{
return
word_size
>
medium_chunk_size
();
}
static
Mutex
*
expand_lock
()
{
return
_expand_lock
;
}
// Increment the per Metaspace and global running sums for Metachunks
// by the given size. This is used when a Metachunk to added to
// the in-use list.
void
inc_size_metrics
(
size_t
words
);
// Increment the per Metaspace and global running sums Metablocks by the given
// size. This is used when a Metablock is allocated.
void
inc_used_metrics
(
size_t
words
);
// Delete the portion of the running sums for this SpaceManager. That is,
// the globals running sums for the Metachunks and Metablocks are
// decremented for all the Metachunks in-use by this SpaceManager.
void
dec_total_from_size_metrics
();
// Set the sizes for the initial chunks.
void
get_initial_chunk_sizes
(
Metaspace
::
MetaspaceType
type
,
size_t
*
chunk_word_size
,
...
...
@@ -686,7 +706,7 @@ class SpaceManager : public CHeapObj<mtClass> {
void
verify_chunk_size
(
Metachunk
*
chunk
);
NOT_PRODUCT
(
void
mangle_freed_chunks
();)
#ifdef ASSERT
void
verify_allocat
ion_total
();
void
verify_allocat
ed_blocks_words
();
#endif
};
...
...
@@ -797,6 +817,9 @@ size_t VirtualSpaceNode::capacity_words_in_vs() const {
return
pointer_delta
(
end
(),
bottom
(),
sizeof
(
MetaWord
));
}
size_t
VirtualSpaceNode
::
free_words_in_vs
()
const
{
return
pointer_delta
(
end
(),
top
(),
sizeof
(
MetaWord
));
}
// Allocates the chunk from the virtual space only.
// This interface is also used internally for debugging. Not all
...
...
@@ -1071,6 +1094,10 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
link_vs
(
class_entry
,
rs
.
size
()
/
BytesPerWord
);
}
size_t
VirtualSpaceList
::
free_bytes
()
{
return
virtual_space_list
()
->
free_words_in_vs
()
*
BytesPerWord
;
}
// Allocate another meta virtual space and add it to the list.
bool
VirtualSpaceList
::
grow_vs
(
size_t
vs_word_size
)
{
assert_lock_strong
(
SpaceManager
::
expand_lock
());
...
...
@@ -1211,9 +1238,9 @@ bool VirtualSpaceList::contains(const void *ptr) {
//
// After the GC the compute_new_size() for MetaspaceGC is called to
// resize the capacity of the metaspaces. The current implementation
// is based on the flags MinMetaspaceFreeRatio and Max
Heap
FreeRatio used
// is based on the flags MinMetaspaceFreeRatio and Max
Metaspace
FreeRatio used
// to resize the Java heap by some GC's. New flags can be implemented
// if really needed. Min
Heap
FreeRatio is used to calculate how much
// if really needed. Min
Metaspace
FreeRatio is used to calculate how much
// free space is desirable in the metaspace capacity to decide how much
// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
// free space is desirable in the metaspace capacity before decreasing
...
...
@@ -1248,7 +1275,11 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
}
bool
MetaspaceGC
::
should_expand
(
VirtualSpaceList
*
vsl
,
size_t
word_size
)
{
size_t
committed_capacity_bytes
=
MetaspaceAux
::
allocated_capacity_bytes
();
// If the user wants a limit, impose one.
size_t
max_metaspace_size_bytes
=
MaxMetaspaceSize
;
size_t
metaspace_size_bytes
=
MetaspaceSize
;
if
(
!
FLAG_IS_DEFAULT
(
MaxMetaspaceSize
)
&&
MetaspaceAux
::
reserved_in_bytes
()
>=
MaxMetaspaceSize
)
{
return
false
;
...
...
@@ -1260,57 +1291,48 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
// If this is part of an allocation after a GC, expand
// unconditionally.
if
(
MetaspaceGC
::
expand_after_GC
())
{
if
(
MetaspaceGC
::
expand_after_GC
())
{
return
true
;
}
size_t
metaspace_size_words
=
MetaspaceSize
/
BytesPerWord
;
// If the capacity is below the minimum capacity, allow the
// expansion. Also set the high-water-mark (capacity_until_GC)
// to that minimum capacity so that a GC will not be induced
// until that minimum capacity is exceeded.
if
(
vsl
->
capacity_words_sum
()
<
metaspace_size_word
s
||
if
(
committed_capacity_bytes
<
metaspace_size_byte
s
||
capacity_until_GC
()
==
0
)
{
set_capacity_until_GC
(
metaspace_size_
word
s
);
set_capacity_until_GC
(
metaspace_size_
byte
s
);
return
true
;
}
else
{
if
(
vsl
->
capacity_words_sum
()
<
capacity_until_GC
())
{
if
(
committed_capacity_bytes
<
capacity_until_GC
())
{
return
true
;
}
else
{
if
(
TraceMetadataChunkAllocation
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" allocation request size "
SIZE_FORMAT
" capacity_until_GC "
SIZE_FORMAT
" capacity_words_sum "
SIZE_FORMAT
" used_words_sum "
SIZE_FORMAT
" free chunks "
SIZE_FORMAT
" free chunks count %d"
,
" allocated_capacity_bytes "
SIZE_FORMAT
,
word_size
,
capacity_until_GC
(),
vsl
->
capacity_words_sum
(),
vsl
->
used_words_sum
(),
vsl
->
chunk_manager
()
->
free_chunks_total
(),
vsl
->
chunk_manager
()
->
free_chunks_count
());
MetaspaceAux
::
allocated_capacity_bytes
());
}
return
false
;
}
}
}
// Variables are in bytes
void
MetaspaceGC
::
compute_new_size
()
{
assert
(
_shrink_factor
<=
100
,
"invalid shrink factor"
);
uint
current_shrink_factor
=
_shrink_factor
;
_shrink_factor
=
0
;
VirtualSpaceList
*
vsl
=
Metaspace
::
space_list
();
size_t
capacity_after_gc
=
vsl
->
capacity_bytes_sum
();
// Check to see if these two can be calculated without walking the CLDG
size_t
used_after_gc
=
vsl
->
used_bytes_sum
();
size_t
capacity_until_GC
=
vsl
->
capacity_bytes_sum
();
size_t
free_after_gc
=
capacity_until_GC
-
used_after_gc
;
// Until a faster way of calculating the "used" quantity is implemented,
// use "capacity".
const
size_t
used_after_gc
=
MetaspaceAux
::
allocated_capacity_bytes
();
const
size_t
capacity_until_GC
=
MetaspaceGC
::
capacity_until_GC
();
const
double
minimum_free_percentage
=
MinMetaspaceFreeRatio
/
100.0
;
const
double
maximum_used_percentage
=
1.0
-
minimum_free_percentage
;
...
...
@@ -1323,45 +1345,34 @@ void MetaspaceGC::compute_new_size() {
MetaspaceSize
);
if
(
PrintGCDetails
&&
Verbose
)
{
const
double
free_percentage
=
((
double
)
free_after_gc
)
/
capacity_until_GC
;
gclog_or_tty
->
print_cr
(
"
\n
MetaspaceGC::compute_new_size: "
);
gclog_or_tty
->
print_cr
(
" "
" minimum_free_percentage: %6.2f"
" maximum_used_percentage: %6.2f"
,
minimum_free_percentage
,
maximum_used_percentage
);
double
d_free_after_gc
=
free_after_gc
/
(
double
)
K
;
gclog_or_tty
->
print_cr
(
" "
" free_after_gc : %6.1fK"
" used_after_gc : %6.1fK"
" capacity_after_gc : %6.1fK"
" metaspace HWM : %6.1fK"
,
free_after_gc
/
(
double
)
K
,
used_after_gc
/
(
double
)
K
,
capacity_after_gc
/
(
double
)
K
,
capacity_until_GC
/
(
double
)
K
);
gclog_or_tty
->
print_cr
(
" "
" free_percentage: %6.2f"
,
free_percentage
);
" used_after_gc : %6.1fKB"
,
used_after_gc
/
(
double
)
K
);
}
size_t
shrink_bytes
=
0
;
if
(
capacity_until_GC
<
minimum_desired_capacity
)
{
// If we have less capacity below the metaspace HWM, then
// increment the HWM.
size_t
expand_bytes
=
minimum_desired_capacity
-
capacity_until_GC
;
// Don't expand unless it's significant
if
(
expand_bytes
>=
MinMetaspaceExpansion
)
{
size_t
expand_words
=
expand_bytes
/
BytesPerWord
;
MetaspaceGC
::
inc_capacity_until_GC
(
expand_words
);
MetaspaceGC
::
set_capacity_until_GC
(
capacity_until_GC
+
expand_bytes
);
}
if
(
PrintGCDetails
&&
Verbose
)
{
size_t
new_capacity_until_GC
=
MetaspaceGC
::
capacity_until_GC_in_bytes
()
;
size_t
new_capacity_until_GC
=
capacity_until_GC
;
gclog_or_tty
->
print_cr
(
" expanding:"
" minimum_desired_capacity: %6.1fK"
" expand_
words: %6.1fK
"
" MinMetaspaceExpansion: %6.1fK"
" new metaspace HWM: %6.1fK"
,
" minimum_desired_capacity: %6.1fK
B
"
" expand_
bytes: %6.1fKB
"
" MinMetaspaceExpansion: %6.1fK
B
"
" new metaspace HWM: %6.1fK
B
"
,
minimum_desired_capacity
/
(
double
)
K
,
expand_bytes
/
(
double
)
K
,
MinMetaspaceExpansion
/
(
double
)
K
,
...
...
@@ -1371,11 +1382,10 @@ void MetaspaceGC::compute_new_size() {
}
// No expansion, now see if we want to shrink
size_t
shrink_words
=
0
;
// We would never want to shrink more than this
size_t
max_shrink_
word
s
=
capacity_until_GC
-
minimum_desired_capacity
;
assert
(
max_shrink_
words
>=
0
,
err_msg
(
"max_shrink_word
s "
SIZE_FORMAT
,
max_shrink_
word
s
));
size_t
max_shrink_
byte
s
=
capacity_until_GC
-
minimum_desired_capacity
;
assert
(
max_shrink_
bytes
>=
0
,
err_msg
(
"max_shrink_byte
s "
SIZE_FORMAT
,
max_shrink_
byte
s
));
// Should shrinking be considered?
if
(
MaxMetaspaceFreeRatio
<
100
)
{
...
...
@@ -1385,17 +1395,15 @@ void MetaspaceGC::compute_new_size() {
size_t
maximum_desired_capacity
=
(
size_t
)
MIN2
(
max_tmp
,
double
(
max_uintx
));
maximum_desired_capacity
=
MAX2
(
maximum_desired_capacity
,
MetaspaceSize
);
if
(
PrintGC
&&
Verbose
)
{
if
(
PrintGC
Details
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
" "
" maximum_free_percentage: %6.2f"
" minimum_used_percentage: %6.2f"
,
maximum_free_percentage
,
minimum_used_percentage
);
gclog_or_tty
->
print_cr
(
" "
" capacity_until_GC: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK"
,
capacity_until_GC
/
(
double
)
K
,
" minimum_desired_capacity: %6.1fKB"
" maximum_desired_capacity: %6.1fKB"
,
minimum_desired_capacity
/
(
double
)
K
,
maximum_desired_capacity
/
(
double
)
K
);
}
...
...
@@ -1405,17 +1413,17 @@ void MetaspaceGC::compute_new_size() {
if
(
capacity_until_GC
>
maximum_desired_capacity
)
{
// Capacity too large, compute shrinking size
shrink_
word
s
=
capacity_until_GC
-
maximum_desired_capacity
;
shrink_
byte
s
=
capacity_until_GC
-
maximum_desired_capacity
;
// We don't want shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_
words
=
shrink_word
s
/
100
*
current_shrink_factor
;
assert
(
shrink_
words
<=
max_shrink_word
s
,
shrink_
bytes
=
shrink_byte
s
/
100
*
current_shrink_factor
;
assert
(
shrink_
bytes
<=
max_shrink_byte
s
,
err_msg
(
"invalid shrink size "
SIZE_FORMAT
" not <= "
SIZE_FORMAT
,
shrink_
words
,
max_shrink_word
s
));
shrink_
bytes
,
max_shrink_byte
s
));
if
(
current_shrink_factor
==
0
)
{
_shrink_factor
=
10
;
}
else
{
...
...
@@ -1429,11 +1437,11 @@ void MetaspaceGC::compute_new_size() {
MetaspaceSize
/
(
double
)
K
,
maximum_desired_capacity
/
(
double
)
K
);
gclog_or_tty
->
print_cr
(
" "
" shrink_
word
s: %.1fK"
" shrink_
byte
s: %.1fK"
" current_shrink_factor: %d"
" new shrink factor: %d"
" MinMetaspaceExpansion: %.1fK"
,
shrink_
word
s
/
(
double
)
K
,
shrink_
byte
s
/
(
double
)
K
,
current_shrink_factor
,
_shrink_factor
,
MinMetaspaceExpansion
/
(
double
)
K
);
...
...
@@ -1441,23 +1449,11 @@ void MetaspaceGC::compute_new_size() {
}
}
// Don't shrink unless it's significant
if
(
shrink_words
>=
MinMetaspaceExpansion
)
{
VirtualSpaceNode
*
csp
=
vsl
->
current_virtual_space
();
size_t
available_to_shrink
=
csp
->
capacity_words_in_vs
()
-
csp
->
used_words_in_vs
();
shrink_words
=
MIN2
(
shrink_words
,
available_to_shrink
);
csp
->
shrink_by
(
shrink_words
);
MetaspaceGC
::
dec_capacity_until_GC
(
shrink_words
);
if
(
PrintGCDetails
&&
Verbose
)
{
size_t
new_capacity_until_GC
=
MetaspaceGC
::
capacity_until_GC_in_bytes
();
gclog_or_tty
->
print_cr
(
" metaspace HWM: %.1fK"
,
new_capacity_until_GC
/
(
double
)
K
);
if
(
shrink_bytes
>=
MinMetaspaceExpansion
&&
((
capacity_until_GC
-
shrink_bytes
)
>=
MetaspaceSize
))
{
MetaspaceGC
::
set_capacity_until_GC
(
capacity_until_GC
-
shrink_bytes
);
}
}
assert
(
used_after_gc
<=
vsl
->
capacity_bytes_sum
(),
"sanity check"
);
}
// Metadebug methods
...
...
@@ -1860,18 +1856,28 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
}
size_t
SpaceManager
::
sum_capacity_in_chunks_in_use
()
const
{
// For CMS use "allocated_chunks_words()" which does not need the
// Metaspace lock. For the other collectors sum over the
// lists. Use both methods as a check that "allocated_chunks_words()"
// is correct. That is, sum_capacity_in_chunks() is too expensive
// to use in the product and allocated_chunks_words() should be used
// but allow for checking that allocated_chunks_words() returns the same
// value as sum_capacity_in_chunks_in_use() which is the definitive
// answer.
if
(
UseConcMarkSweepGC
)
{
return
allocated_chunks_words
();
}
else
{
MutexLockerEx
cl
(
lock
(),
Mutex
::
_no_safepoint_check_flag
);
size_t
sum
=
0
;
for
(
ChunkIndex
i
=
ZeroIndex
;
i
<
NumberOfInUseLists
;
i
=
next_chunk_index
(
i
))
{
Metachunk
*
chunk
=
chunks_in_use
(
i
);
while
(
chunk
!=
NULL
)
{
// Just changed this sum += chunk->capacity_word_size();
// sum += chunk->word_size() - Metachunk::overhead();
sum
+=
chunk
->
capacity_word_size
();
chunk
=
chunk
->
next
();
}
}
return
sum
;
}
}
size_t
SpaceManager
::
sum_count_in_chunks_in_use
()
{
...
...
@@ -2029,12 +2035,44 @@ void SpaceManager::print_on(outputStream* st) const {
SpaceManager
::
SpaceManager
(
Mutex
*
lock
,
VirtualSpaceList
*
vs_list
)
:
_vs_list
(
vs_list
),
_allocation_total
(
0
),
_allocated_blocks_words
(
0
),
_allocated_chunks_words
(
0
),
_allocated_chunks_count
(
0
),
_lock
(
lock
)
{
initialize
();
}
void
SpaceManager
::
inc_size_metrics
(
size_t
words
)
{
assert_lock_strong
(
SpaceManager
::
expand_lock
());
// Total of allocated Metachunks and allocated Metachunks count
// for each SpaceManager
_allocated_chunks_words
=
_allocated_chunks_words
+
words
;
_allocated_chunks_count
++
;
// Global total of capacity in allocated Metachunks
MetaspaceAux
::
inc_capacity
(
words
);
// Global total of allocated Metablocks.
// used_words_slow() includes the overhead in each
// Metachunk so include it in the used when the
// Metachunk is first added (so only added once per
// Metachunk).
MetaspaceAux
::
inc_used
(
Metachunk
::
overhead
());
}
void
SpaceManager
::
inc_used_metrics
(
size_t
words
)
{
// Add to the per SpaceManager total
Atomic
::
add_ptr
(
words
,
&
_allocated_blocks_words
);
// Add to the global total
MetaspaceAux
::
inc_used
(
words
);
}
void
SpaceManager
::
dec_total_from_size_metrics
()
{
MetaspaceAux
::
dec_capacity
(
allocated_chunks_words
());
MetaspaceAux
::
dec_used
(
allocated_blocks_words
());
// Also deduct the overhead per Metachunk
MetaspaceAux
::
dec_used
(
allocated_chunks_count
()
*
Metachunk
::
overhead
());
}
void
SpaceManager
::
initialize
()
{
Metadebug
::
init_allocation_fail_alot_count
();
for
(
ChunkIndex
i
=
ZeroIndex
;
i
<
NumberOfInUseLists
;
i
=
next_chunk_index
(
i
))
{
...
...
@@ -2073,7 +2111,10 @@ void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
SpaceManager
::~
SpaceManager
()
{
// This call this->_lock which can't be done while holding expand_lock()
const
size_t
in_use_before
=
sum_capacity_in_chunks_in_use
();
assert
(
sum_capacity_in_chunks_in_use
()
==
allocated_chunks_words
(),
err_msg
(
"sum_capacity_in_chunks_in_use() "
SIZE_FORMAT
" allocated_chunks_words() "
SIZE_FORMAT
,
sum_capacity_in_chunks_in_use
(),
allocated_chunks_words
()));
MutexLockerEx
fcl
(
SpaceManager
::
expand_lock
(),
Mutex
::
_no_safepoint_check_flag
);
...
...
@@ -2082,6 +2123,8 @@ SpaceManager::~SpaceManager() {
chunk_manager
->
slow_locked_verify
();
dec_total_from_size_metrics
();
if
(
TraceMetadataChunkAllocation
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"~SpaceManager(): "
PTR_FORMAT
,
this
);
locked_print_chunks_in_use_on
(
gclog_or_tty
);
...
...
@@ -2092,7 +2135,7 @@ SpaceManager::~SpaceManager() {
// Have to update before the chunks_in_use lists are emptied
// below.
chunk_manager
->
inc_free_chunks_total
(
in_use_before
,
chunk_manager
->
inc_free_chunks_total
(
allocated_chunks_words
()
,
sum_count_in_chunks_in_use
());
// Add all the chunks in use by this space manager
...
...
@@ -2158,7 +2201,6 @@ SpaceManager::~SpaceManager() {
chunk_manager
->
humongous_dictionary
()
->
total_count
(),
chunk_size_name
(
HumongousIndex
));
}
set_chunks_in_use
(
HumongousIndex
,
NULL
);
chunk_manager
->
slow_locked_verify
();
}
...
...
@@ -2238,13 +2280,18 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
assert
(
new_chunk
->
word_size
()
>
medium_chunk_size
(),
"List inconsistency"
);
}
// Add to the running sum of capacity
inc_size_metrics
(
new_chunk
->
word_size
());
assert
(
new_chunk
->
is_empty
(),
"Not ready for reuse"
);
if
(
TraceMetadataChunkAllocation
&&
Verbose
)
{
gclog_or_tty
->
print
(
"SpaceManager::add_chunk: %d) "
,
sum_count_in_chunks_in_use
());
new_chunk
->
print_on
(
gclog_or_tty
);
if
(
vs_list
()
!=
NULL
)
{
vs_list
()
->
chunk_manager
()
->
locked_print_free_chunks
(
tty
);
}
}
}
Metachunk
*
SpaceManager
::
get_new_chunk
(
size_t
word_size
,
...
...
@@ -2314,7 +2361,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
// of memory if this returns null.
if
(
DumpSharedSpaces
)
{
assert
(
current_chunk
()
!=
NULL
,
"should never happen"
);
inc_
allocation_total
(
word_size
);
inc_
used_metrics
(
word_size
);
return
current_chunk
()
->
allocate
(
word_size
);
// caller handles null result
}
if
(
current_chunk
()
!=
NULL
)
{
...
...
@@ -2325,7 +2372,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
result
=
grow_and_allocate
(
word_size
);
}
if
(
result
>
0
)
{
inc_
allocation_total
(
word_size
);
inc_
used_metrics
(
word_size
);
assert
(
result
!=
(
MetaWord
*
)
chunks_in_use
(
MediumIndex
),
"Head of the list is being allocated"
);
}
...
...
@@ -2359,20 +2406,14 @@ void SpaceManager::verify_chunk_size(Metachunk* chunk) {
}
#ifdef ASSERT
void
SpaceManager
::
verify_allocat
ion_total
()
{
void
SpaceManager
::
verify_allocat
ed_blocks_words
()
{
// Verification is only guaranteed at a safepoint.
if
(
SafepointSynchronize
::
is_at_safepoint
())
{
gclog_or_tty
->
print_cr
(
"Chunk "
PTR_FORMAT
" allocation_total "
SIZE_FORMAT
" sum_used_in_chunks_in_use "
SIZE_FORMAT
,
this
,
allocation_total
(),
sum_used_in_chunks_in_use
());
}
MutexLockerEx
cl
(
lock
(),
Mutex
::
_no_safepoint_check_flag
);
assert
(
allocation_total
()
==
sum_used_in_chunks_in_use
(),
assert
(
SafepointSynchronize
::
is_at_safepoint
()
||
!
Universe
::
is_fully_initialized
(),
"Verification can fail if the applications is running"
);
assert
(
allocated_blocks_words
()
==
sum_used_in_chunks_in_use
(),
err_msg
(
"allocation total is not consistent "
SIZE_FORMAT
" vs "
SIZE_FORMAT
,
allocat
ion_total
(),
sum_used_in_chunks_in_use
()));
allocat
ed_blocks_words
(),
sum_used_in_chunks_in_use
()));
}
#endif
...
...
@@ -2428,14 +2469,65 @@ void SpaceManager::mangle_freed_chunks() {
// MetaspaceAux
size_t
MetaspaceAux
::
used_in_bytes
(
Metaspace
::
MetadataType
mdtype
)
{
size_t
MetaspaceAux
::
_allocated_capacity_words
=
0
;
size_t
MetaspaceAux
::
_allocated_used_words
=
0
;
size_t
MetaspaceAux
::
free_bytes
()
{
size_t
result
=
0
;
if
(
Metaspace
::
class_space_list
()
!=
NULL
)
{
result
=
result
+
Metaspace
::
class_space_list
()
->
free_bytes
();
}
if
(
Metaspace
::
space_list
()
!=
NULL
)
{
result
=
result
+
Metaspace
::
space_list
()
->
free_bytes
();
}
return
result
;
}
void
MetaspaceAux
::
dec_capacity
(
size_t
words
)
{
assert_lock_strong
(
SpaceManager
::
expand_lock
());
assert
(
words
<=
_allocated_capacity_words
,
err_msg
(
"About to decrement below 0: words "
SIZE_FORMAT
" is greater than _allocated_capacity_words "
SIZE_FORMAT
,
words
,
_allocated_capacity_words
));
_allocated_capacity_words
=
_allocated_capacity_words
-
words
;
}
void
MetaspaceAux
::
inc_capacity
(
size_t
words
)
{
assert_lock_strong
(
SpaceManager
::
expand_lock
());
// Needs to be atomic
_allocated_capacity_words
=
_allocated_capacity_words
+
words
;
}
void
MetaspaceAux
::
dec_used
(
size_t
words
)
{
assert
(
words
<=
_allocated_used_words
,
err_msg
(
"About to decrement below 0: words "
SIZE_FORMAT
" is greater than _allocated_used_words "
SIZE_FORMAT
,
words
,
_allocated_used_words
));
// For CMS deallocation of the Metaspaces occurs during the
// sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock.
jlong
minus_words
=
(
jlong
)
-
(
jlong
)
words
;
Atomic
::
add_ptr
(
minus_words
,
&
_allocated_used_words
);
}
void
MetaspaceAux
::
inc_used
(
size_t
words
)
{
// _allocated_used_words tracks allocations for
// each piece of metadata. Those allocations are
// generally done concurrently by different application
// threads so must be done atomically.
Atomic
::
add_ptr
(
words
,
&
_allocated_used_words
);
}
size_t
MetaspaceAux
::
used_bytes_slow
(
Metaspace
::
MetadataType
mdtype
)
{
size_t
used
=
0
;
ClassLoaderDataGraphMetaspaceIterator
iter
;
while
(
iter
.
repeat
())
{
Metaspace
*
msp
=
iter
.
get_next
();
// Sum allocat
ion_total
for each metaspace
// Sum allocat
ed_blocks_words
for each metaspace
if
(
msp
!=
NULL
)
{
used
+=
msp
->
used_words
(
mdtype
);
used
+=
msp
->
used_words
_slow
(
mdtype
);
}
}
return
used
*
BytesPerWord
;
...
...
@@ -2453,13 +2545,15 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
return
free
*
BytesPerWord
;
}
size_t
MetaspaceAux
::
capacity_in_bytes
(
Metaspace
::
MetadataType
mdtype
)
{
size_t
capacity
=
free_chunks_total
(
mdtype
);
size_t
MetaspaceAux
::
capacity_bytes_slow
(
Metaspace
::
MetadataType
mdtype
)
{
// Don't count the space in the freelists. That space will be
// added to the capacity calculation as needed.
size_t
capacity
=
0
;
ClassLoaderDataGraphMetaspaceIterator
iter
;
while
(
iter
.
repeat
())
{
Metaspace
*
msp
=
iter
.
get_next
();
if
(
msp
!=
NULL
)
{
capacity
+=
msp
->
capacity_words
(
mdtype
);
capacity
+=
msp
->
capacity_words
_slow
(
mdtype
);
}
}
return
capacity
*
BytesPerWord
;
...
...
@@ -2486,23 +2580,30 @@ size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype)
return
free_chunks_total
(
mdtype
)
*
BytesPerWord
;
}
size_t
MetaspaceAux
::
free_chunks_total
()
{
return
free_chunks_total
(
Metaspace
::
ClassType
)
+
free_chunks_total
(
Metaspace
::
NonClassType
);
}
size_t
MetaspaceAux
::
free_chunks_total_in_bytes
()
{
return
free_chunks_total
()
*
BytesPerWord
;
}
void
MetaspaceAux
::
print_metaspace_change
(
size_t
prev_metadata_used
)
{
gclog_or_tty
->
print
(
", [Metaspace:"
);
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print
(
" "
SIZE_FORMAT
"->"
SIZE_FORMAT
"("
SIZE_FORMAT
"
/"
SIZE_FORMAT
"
)"
,
"("
SIZE_FORMAT
")"
,
prev_metadata_used
,
used_in_bytes
(),
capacity_in_bytes
(),
allocated_capacity_bytes
(),
reserved_in_bytes
());
}
else
{
gclog_or_tty
->
print
(
" "
SIZE_FORMAT
"K"
"->"
SIZE_FORMAT
"K"
"("
SIZE_FORMAT
"K
/"
SIZE_FORMAT
"K
)"
,
"("
SIZE_FORMAT
"K)"
,
prev_metadata_used
/
K
,
used_in_bytes
()
/
K
,
capacity_in_bytes
()
/
K
,
allocated_capacity_bytes
()
/
K
,
reserved_in_bytes
()
/
K
);
}
...
...
@@ -2517,23 +2618,30 @@ void MetaspaceAux::print_on(outputStream* out) {
out
->
print_cr
(
" Metaspace total "
SIZE_FORMAT
"K, used "
SIZE_FORMAT
"K,"
" reserved "
SIZE_FORMAT
"K"
,
capacity_in_bytes
()
/
K
,
used_in_bytes
()
/
K
,
reserved_in_bytes
()
/
K
);
allocated_capacity_bytes
()
/
K
,
allocated_used_bytes
()
/
K
,
reserved_in_bytes
()
/
K
);
#if 0
// The calls to capacity_bytes_slow() and used_bytes_slow() cause
// lock ordering assertion failures with some collectors. Do
// not include this code until the lock ordering is fixed.
if (PrintGCDetails && Verbose) {
out->print_cr(" data space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
capacity_in_bytes
(
nct
)
/
K
,
used_in_bytes
(
nct
)
/
K
,
reserved_in_bytes
(
nct
)
/
K
);
capacity_bytes_slow(nct)/K, used_bytes_slow
(nct)/K, reserved_in_bytes(nct)/K);
out->print_cr(" class space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
capacity_in_bytes
(
ct
)
/
K
,
used_in_bytes
(
ct
)
/
K
,
reserved_in_bytes
(
ct
)
/
K
);
capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K);
}
#endif
}
// Print information for class space and data space separately.
// This is almost the same as above.
void
MetaspaceAux
::
print_on
(
outputStream
*
out
,
Metaspace
::
MetadataType
mdtype
)
{
size_t
free_chunks_capacity_bytes
=
free_chunks_total_in_bytes
(
mdtype
);
size_t
capacity_bytes
=
capacity_
in_bytes
(
mdtype
);
size_t
used_bytes
=
used_
in_bytes
(
mdtype
);
size_t
capacity_bytes
=
capacity_
bytes_slow
(
mdtype
);
size_t
used_bytes
=
used_
bytes_slow
(
mdtype
);
size_t
free_bytes
=
free_in_bytes
(
mdtype
);
size_t
used_and_free
=
used_bytes
+
free_bytes
+
free_chunks_capacity_bytes
;
...
...
@@ -2606,6 +2714,36 @@ void MetaspaceAux::verify_free_chunks() {
Metaspace
::
class_space_list
()
->
chunk_manager
()
->
verify
();
}
void
MetaspaceAux
::
verify_capacity
()
{
#ifdef ASSERT
size_t
running_sum_capacity_bytes
=
allocated_capacity_bytes
();
// For purposes of the running sum of used, verify against capacity
size_t
capacity_in_use_bytes
=
capacity_bytes_slow
();
assert
(
running_sum_capacity_bytes
==
capacity_in_use_bytes
,
err_msg
(
"allocated_capacity_words() * BytesPerWord "
SIZE_FORMAT
" capacity_bytes_slow()"
SIZE_FORMAT
,
running_sum_capacity_bytes
,
capacity_in_use_bytes
));
#endif
}
void
MetaspaceAux
::
verify_used
()
{
#ifdef ASSERT
size_t
running_sum_used_bytes
=
allocated_used_bytes
();
// For purposes of the running sum of used, verify against capacity
size_t
used_in_use_bytes
=
used_bytes_slow
();
assert
(
allocated_used_bytes
()
==
used_in_use_bytes
,
err_msg
(
"allocated_used_bytes() "
SIZE_FORMAT
" used_bytes_slow()()"
SIZE_FORMAT
,
allocated_used_bytes
(),
used_in_use_bytes
));
#endif
}
void
MetaspaceAux
::
verify_metrics
()
{
verify_capacity
();
verify_used
();
}
// Metaspace methods
size_t
Metaspace
::
_first_chunk_word_size
=
0
;
...
...
@@ -2755,8 +2893,8 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype)
MetaWord
*
result
;
MetaspaceGC
::
set_expand_after_GC
(
true
);
size_t
before_inc
=
MetaspaceGC
::
capacity_until_GC
();
size_t
delta_
words
=
MetaspaceGC
::
delta_capacity_until_GC
(
word_size
)
;
MetaspaceGC
::
inc_capacity_until_GC
(
delta_
word
s
);
size_t
delta_
bytes
=
MetaspaceGC
::
delta_capacity_until_GC
(
word_size
)
*
BytesPerWord
;
MetaspaceGC
::
inc_capacity_until_GC
(
delta_
byte
s
);
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"Increase capacity to GC from "
SIZE_FORMAT
" to "
SIZE_FORMAT
,
before_inc
,
MetaspaceGC
::
capacity_until_GC
());
...
...
@@ -2774,8 +2912,8 @@ char* Metaspace::bottom() const {
return
(
char
*
)
vsm
()
->
current_chunk
()
->
bottom
();
}
size_t
Metaspace
::
used_words
(
MetadataType
mdtype
)
const
{
// return vsm()->allocat
ion_total
();
size_t
Metaspace
::
used_words
_slow
(
MetadataType
mdtype
)
const
{
// return vsm()->allocat
ed_used_words
();
return
mdtype
==
ClassType
?
class_vsm
()
->
sum_used_in_chunks_in_use
()
:
vsm
()
->
sum_used_in_chunks_in_use
();
// includes overhead!
}
...
...
@@ -2790,11 +2928,19 @@ size_t Metaspace::free_words(MetadataType mdtype) const {
// have been made. Don't include space in the global freelist and
// in the space available in the dictionary which
// is already counted in some chunk.
size_t
Metaspace
::
capacity_words
(
MetadataType
mdtype
)
const
{
size_t
Metaspace
::
capacity_words
_slow
(
MetadataType
mdtype
)
const
{
return
mdtype
==
ClassType
?
class_vsm
()
->
sum_capacity_in_chunks_in_use
()
:
vsm
()
->
sum_capacity_in_chunks_in_use
();
}
size_t
Metaspace
::
used_bytes_slow
(
MetadataType
mdtype
)
const
{
return
used_words_slow
(
mdtype
)
*
BytesPerWord
;
}
size_t
Metaspace
::
capacity_bytes_slow
(
MetadataType
mdtype
)
const
{
return
capacity_words_slow
(
mdtype
)
*
BytesPerWord
;
}
void
Metaspace
::
deallocate
(
MetaWord
*
ptr
,
size_t
word_size
,
bool
is_class
)
{
if
(
SafepointSynchronize
::
is_at_safepoint
())
{
assert
(
Thread
::
current
()
->
is_VM_thread
(),
"should be the VM thread"
);
...
...
@@ -2921,10 +3067,6 @@ void Metaspace::verify() {
}
void
Metaspace
::
dump
(
outputStream
*
const
out
)
const
{
if
(
UseMallocOnly
)
{
// Just print usage for now
out
->
print_cr
(
"usage %d"
,
used_words
(
Metaspace
::
NonClassType
));
}
out
->
print_cr
(
"
\n
Virtual space manager: "
INTPTR_FORMAT
,
vsm
());
vsm
()
->
dump
(
out
);
out
->
print_cr
(
"
\n
Class space manager: "
INTPTR_FORMAT
,
class_vsm
());
...
...
src/share/vm/memory/metaspace.hpp
浏览文件 @
6fe0e0c8
...
...
@@ -111,6 +111,10 @@ class Metaspace : public CHeapObj<mtClass> {
SpaceManager
*
_class_vsm
;
SpaceManager
*
class_vsm
()
const
{
return
_class_vsm
;
}
// Allocate space for metadata of type mdtype. This is space
// within a Metachunk and is used by
// allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
// which returns a Metablock.
MetaWord
*
allocate
(
size_t
word_size
,
MetadataType
mdtype
);
// Virtual Space lists for both classes and other metadata
...
...
@@ -133,11 +137,14 @@ class Metaspace : public CHeapObj<mtClass> {
static
size_t
first_class_chunk_word_size
()
{
return
_first_class_chunk_word_size
;
}
char
*
bottom
()
const
;
size_t
used_words
(
MetadataType
mdtype
)
const
;
size_t
used_words
_slow
(
MetadataType
mdtype
)
const
;
size_t
free_words
(
MetadataType
mdtype
)
const
;
size_t
capacity_words
(
MetadataType
mdtype
)
const
;
size_t
capacity_words
_slow
(
MetadataType
mdtype
)
const
;
size_t
waste_words
(
MetadataType
mdtype
)
const
;
size_t
used_bytes_slow
(
MetadataType
mdtype
)
const
;
size_t
capacity_bytes_slow
(
MetadataType
mdtype
)
const
;
static
Metablock
*
allocate
(
ClassLoaderData
*
loader_data
,
size_t
size
,
bool
read_only
,
MetadataType
mdtype
,
TRAPS
);
void
deallocate
(
MetaWord
*
ptr
,
size_t
byte_size
,
bool
is_class
);
...
...
@@ -161,28 +168,81 @@ class Metaspace : public CHeapObj<mtClass> {
class
MetaspaceAux
:
AllStatic
{
// Statistics for class space and data space in metaspace.
static
size_t
used_in_bytes
(
Metaspace
::
MetadataType
mdtype
);
// These methods iterate over the classloader data graph
// for the given Metaspace type. These are slow.
static
size_t
used_bytes_slow
(
Metaspace
::
MetadataType
mdtype
);
static
size_t
free_in_bytes
(
Metaspace
::
MetadataType
mdtype
);
static
size_t
capacity_in_bytes
(
Metaspace
::
MetadataType
mdtype
);
static
size_t
capacity_bytes_slow
(
Metaspace
::
MetadataType
mdtype
);
// Iterates over the virtual space list.
static
size_t
reserved_in_bytes
(
Metaspace
::
MetadataType
mdtype
);
static
size_t
free_chunks_total
(
Metaspace
::
MetadataType
mdtype
);
static
size_t
free_chunks_total_in_bytes
(
Metaspace
::
MetadataType
mdtype
);
public:
// Total of space allocated to metadata in all Metaspaces
static
size_t
used_in_bytes
()
{
return
used_in_bytes
(
Metaspace
::
ClassType
)
+
used_in_bytes
(
Metaspace
::
NonClassType
);
// Running sum of space in all Metachunks that has been
// allocated to a Metaspace. This is used instead of
// iterating over all the classloaders
static
size_t
_allocated_capacity_words
;
// Running sum of space in all Metachunks that have
// are being used for metadata.
static
size_t
_allocated_used_words
;
public:
// Decrement and increment _allocated_capacity_words
static
void
dec_capacity
(
size_t
words
);
static
void
inc_capacity
(
size_t
words
);
// Decrement and increment _allocated_used_words
static
void
dec_used
(
size_t
words
);
static
void
inc_used
(
size_t
words
);
// Total of space allocated to metadata in all Metaspaces.
// This sums the space used in each Metachunk by
// iterating over the classloader data graph
static
size_t
used_bytes_slow
()
{
return
used_bytes_slow
(
Metaspace
::
ClassType
)
+
used_bytes_slow
(
Metaspace
::
NonClassType
);
}
// Used by MetaspaceCounters
static
size_t
free_chunks_total
();
static
size_t
free_chunks_total_in_bytes
();
static
size_t
allocated_capacity_words
()
{
return
_allocated_capacity_words
;
}
static
size_t
allocated_capacity_bytes
()
{
return
_allocated_capacity_words
*
BytesPerWord
;
}
static
size_t
allocated_used_words
()
{
return
_allocated_used_words
;
}
static
size_t
allocated_used_bytes
()
{
return
_allocated_used_words
*
BytesPerWord
;
}
// Total of available space in all Metaspaces
// Total of capacity allocated to all Metaspaces. This includes
// space in Metachunks not yet allocated and in the Metachunk
// freelist.
static
size_t
capacity_in_bytes
()
{
return
capacity_in_bytes
(
Metaspace
::
ClassType
)
+
capacity_in_bytes
(
Metaspace
::
NonClassType
);
static
size_t
free_bytes
();
// Total capacity in all Metaspaces
static
size_t
capacity_bytes_slow
()
{
#ifdef PRODUCT
// Use allocated_capacity_bytes() in PRODUCT instead of this function.
guarantee
(
false
,
"Should not call capacity_bytes_slow() in the PRODUCT"
);
#endif
size_t
class_capacity
=
capacity_bytes_slow
(
Metaspace
::
ClassType
);
size_t
non_class_capacity
=
capacity_bytes_slow
(
Metaspace
::
NonClassType
);
assert
(
allocated_capacity_bytes
()
==
class_capacity
+
non_class_capacity
,
err_msg
(
"bad accounting: allocated_capacity_bytes() "
SIZE_FORMAT
" class_capacity + non_class_capacity "
SIZE_FORMAT
" class_capacity "
SIZE_FORMAT
" non_class_capacity "
SIZE_FORMAT
,
allocated_capacity_bytes
(),
class_capacity
+
non_class_capacity
,
class_capacity
,
non_class_capacity
));
return
class_capacity
+
non_class_capacity
;
}
// Total space reserved in all Metaspaces
...
...
@@ -201,6 +261,11 @@ class MetaspaceAux : AllStatic {
static
void
print_waste
(
outputStream
*
out
);
static
void
dump
(
outputStream
*
out
);
static
void
verify_free_chunks
();
// Checks that the values returned by allocated_capacity_bytes() and
// capacity_bytes_slow() are the same.
static
void
verify_capacity
();
static
void
verify_used
();
static
void
verify_metrics
();
};
// Metaspace are deallocated when their class loader are GC'ed.
...
...
@@ -235,7 +300,6 @@ class MetaspaceGC : AllStatic {
public:
static
size_t
capacity_until_GC
()
{
return
_capacity_until_GC
;
}
static
size_t
capacity_until_GC_in_bytes
()
{
return
_capacity_until_GC
*
BytesPerWord
;
}
static
void
inc_capacity_until_GC
(
size_t
v
)
{
_capacity_until_GC
+=
v
;
}
static
void
dec_capacity_until_GC
(
size_t
v
)
{
_capacity_until_GC
=
_capacity_until_GC
>
v
?
_capacity_until_GC
-
v
:
0
;
...
...
src/share/vm/memory/metaspaceCounters.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -29,6 +29,16 @@
MetaspaceCounters
*
MetaspaceCounters
::
_metaspace_counters
=
NULL
;
size_t
MetaspaceCounters
::
calc_total_capacity
()
{
// The total capacity is the sum of
// 1) capacity of Metachunks in use by all Metaspaces
// 2) unused space at the end of each Metachunk
// 3) space in the freelist
size_t
total_capacity
=
MetaspaceAux
::
allocated_capacity_bytes
()
+
MetaspaceAux
::
free_bytes
()
+
MetaspaceAux
::
free_chunks_total_in_bytes
();
return
total_capacity
;
}
MetaspaceCounters
::
MetaspaceCounters
()
:
_capacity
(
NULL
),
_used
(
NULL
),
...
...
@@ -36,8 +46,8 @@ MetaspaceCounters::MetaspaceCounters() :
if
(
UsePerfData
)
{
size_t
min_capacity
=
MetaspaceAux
::
min_chunk_size
();
size_t
max_capacity
=
MetaspaceAux
::
reserved_in_bytes
();
size_t
curr_capacity
=
MetaspaceAux
::
capacity_in_bytes
();
size_t
used
=
MetaspaceAux
::
used_in
_bytes
();
size_t
curr_capacity
=
calc_total_capacity
();
size_t
used
=
MetaspaceAux
::
allocated_used
_bytes
();
initialize
(
min_capacity
,
max_capacity
,
curr_capacity
,
used
);
}
...
...
@@ -82,15 +92,13 @@ void MetaspaceCounters::initialize(size_t min_capacity,
void
MetaspaceCounters
::
update_capacity
()
{
assert
(
UsePerfData
,
"Should not be called unless being used"
);
assert
(
_capacity
!=
NULL
,
"Should be initialized"
);
size_t
capacity_in_bytes
=
MetaspaceAux
::
capacity_in_bytes
();
_capacity
->
set_value
(
capacity_in_bytes
);
size_t
total_capacity
=
calc_total_capacity
();
_capacity
->
set_value
(
total_capacity
);
}
void
MetaspaceCounters
::
update_used
()
{
assert
(
UsePerfData
,
"Should not be called unless being used"
);
assert
(
_used
!=
NULL
,
"Should be initialized"
);
size_t
used_in_bytes
=
MetaspaceAux
::
used_in_bytes
();
size_t
used_in_bytes
=
MetaspaceAux
::
allocated_used_bytes
();
_used
->
set_value
(
used_in_bytes
);
}
...
...
src/share/vm/memory/metaspaceCounters.hpp
浏览文件 @
6fe0e0c8
...
...
@@ -37,6 +37,7 @@ class MetaspaceCounters: public CHeapObj<mtClass> {
size_t
max_capacity
,
size_t
curr_capacity
,
size_t
used
);
size_t
calc_total_capacity
();
public:
MetaspaceCounters
();
~
MetaspaceCounters
();
...
...
src/share/vm/memory/metaspaceShared.cpp
浏览文件 @
6fe0e0c8
...
...
@@ -376,18 +376,17 @@ void VM_PopulateDumpSharedSpace::doit() {
const
char
*
fmt
=
"%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at "
PTR_FORMAT
;
Metaspace
*
ro_space
=
_loader_data
->
ro_metaspace
();
Metaspace
*
rw_space
=
_loader_data
->
rw_metaspace
();
const
size_t
BPW
=
BytesPerWord
;
// Allocated size of each space (may not be all occupied)
const
size_t
ro_alloced
=
ro_space
->
capacity_
words
(
Metaspace
::
NonClassType
)
*
BPW
;
const
size_t
rw_alloced
=
rw_space
->
capacity_
words
(
Metaspace
::
NonClassType
)
*
BPW
;
const
size_t
ro_alloced
=
ro_space
->
capacity_
bytes_slow
(
Metaspace
::
NonClassType
)
;
const
size_t
rw_alloced
=
rw_space
->
capacity_
bytes_slow
(
Metaspace
::
NonClassType
)
;
const
size_t
md_alloced
=
md_end
-
md_low
;
const
size_t
mc_alloced
=
mc_end
-
mc_low
;
const
size_t
total_alloced
=
ro_alloced
+
rw_alloced
+
md_alloced
+
mc_alloced
;
// Occupied size of each space.
const
size_t
ro_bytes
=
ro_space
->
used_
words
(
Metaspace
::
NonClassType
)
*
BPW
;
const
size_t
rw_bytes
=
rw_space
->
used_
words
(
Metaspace
::
NonClassType
)
*
BPW
;
const
size_t
ro_bytes
=
ro_space
->
used_
bytes_slow
(
Metaspace
::
NonClassType
)
;
const
size_t
rw_bytes
=
rw_space
->
used_
bytes_slow
(
Metaspace
::
NonClassType
)
;
const
size_t
md_bytes
=
size_t
(
md_top
-
md_low
);
const
size_t
mc_bytes
=
size_t
(
mc_top
-
mc_low
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录