Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
7b1f4848
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7b1f4848
编写于
5月 04, 2012
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
1e026794
d821a687
变更
27
展开全部
隐藏空白更改
内联
并排
Showing
27 changed file
with
1361 addition
and
1064 deletion
+1361
-1064
src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp
...e/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp
+1
-1
src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp
...e/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp
+2
-2
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+181
-179
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+13
-13
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+28
-28
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+3
-3
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
...re/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
+2
-1
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
...re/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
+12
-12
src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
...m/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
+1
-1
src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
...m/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
+9
-9
src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
...m/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
+11
-11
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+57
-150
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+14
-21
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+7
-12
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+0
-5
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+0
-18
src/share/vm/gc_implementation/g1/heapRegion.inline.hpp
src/share/vm/gc_implementation/g1/heapRegion.inline.hpp
+0
-1
src/share/vm/gc_implementation/shared/allocationStats.hpp
src/share/vm/gc_implementation/shared/allocationStats.hpp
+45
-45
src/share/vm/memory/binaryTreeDictionary.cpp
src/share/vm/memory/binaryTreeDictionary.cpp
+442
-356
src/share/vm/memory/binaryTreeDictionary.hpp
src/share/vm/memory/binaryTreeDictionary.hpp
+329
-0
src/share/vm/memory/freeBlockDictionary.cpp
src/share/vm/memory/freeBlockDictionary.cpp
+14
-6
src/share/vm/memory/freeBlockDictionary.hpp
src/share/vm/memory/freeBlockDictionary.hpp
+25
-26
src/share/vm/memory/freeList.cpp
src/share/vm/memory/freeList.cpp
+87
-77
src/share/vm/memory/freeList.hpp
src/share/vm/memory/freeList.hpp
+74
-80
src/share/vm/memory/generationSpec.cpp
src/share/vm/memory/generationSpec.cpp
+3
-3
src/share/vm/precompiled/precompiled.hpp
src/share/vm/precompiled/precompiled.hpp
+0
-3
src/share/vm/runtime/vmStructs.cpp
src/share/vm/runtime/vmStructs.cpp
+1
-1
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp
浏览文件 @
7b1f4848
...
...
@@ -38,7 +38,7 @@
CMSPermGen
::
CMSPermGen
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
CardTableRS
*
ct
,
FreeBlockDictionary
::
DictionaryChoice
dictionaryChoice
)
{
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
dictionaryChoice
)
{
CMSPermGenGen
*
g
=
new
CMSPermGenGen
(
rs
,
initial_byte_size
,
-
1
,
ct
);
if
(
g
==
NULL
)
{
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp
浏览文件 @
7b1f4848
...
...
@@ -45,7 +45,7 @@ class CMSPermGen: public PermGen {
public:
CMSPermGen
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
CardTableRS
*
ct
,
FreeBlockDictionary
::
DictionaryChoice
);
CardTableRS
*
ct
,
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
);
HeapWord
*
mem_allocate
(
size_t
size
);
...
...
@@ -65,7 +65,7 @@ public:
// regarding not using adaptive free lists for a perm gen.
ConcurrentMarkSweepGeneration
(
rs
,
initial_byte_size
,
// MinPermHeapExapnsion
level
,
ct
,
false
/* use adaptive freelists */
,
(
FreeBlockDictionary
::
DictionaryChoice
)
CMSDictionaryChoice
)
(
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
)
CMSDictionaryChoice
)
{}
void
initialize_performance_counters
();
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
7b1f4848
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
浏览文件 @
7b1f4848
...
...
@@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/freeList.hpp"
#include "memory/space.hpp"
// Classes in support of keeping track of promotions into a non-Contiguous
...
...
@@ -129,10 +129,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Linear allocation blocks
LinearAllocBlock
_smallLinearAllocBlock
;
FreeBlockDictionary
::
DictionaryChoice
_dictionaryChoice
;
FreeBlockDictionary
*
_dictionary
;
// ptr to dictionary for large size blocks
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
_dictionaryChoice
;
FreeBlockDictionary
<
FreeChunk
>
*
_dictionary
;
// ptr to dictionary for large size blocks
FreeList
_indexedFreeList
[
IndexSetSize
];
FreeList
<
FreeChunk
>
_indexedFreeList
[
IndexSetSize
];
// indexed array for small size blocks
// allocation stategy
bool
_fitStrategy
;
// Use best fit strategy.
...
...
@@ -169,7 +169,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the count of "fl" is negative, it's absolute value indicates a
// number of free chunks that had been previously "borrowed" from global
// list of size "word_sz", and must now be decremented.
void
par_get_chunk_of_blocks
(
size_t
word_sz
,
size_t
n
,
FreeList
*
fl
);
void
par_get_chunk_of_blocks
(
size_t
word_sz
,
size_t
n
,
FreeList
<
FreeChunk
>
*
fl
);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
...
...
@@ -215,7 +215,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// and return it. The split off remainder is returned to
// the free lists. The old name for getFromListGreater
// was lookInListGreater.
FreeChunk
*
getFromListGreater
(
FreeList
*
fl
,
size_t
numWords
);
FreeChunk
*
getFromListGreater
(
FreeList
<
FreeChunk
>
*
fl
,
size_t
numWords
);
// Get a chunk in the indexed free list or dictionary,
// by considering a larger chunk and splitting it.
FreeChunk
*
getChunkFromGreater
(
size_t
numWords
);
...
...
@@ -286,10 +286,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Constructor...
CompactibleFreeListSpace
(
BlockOffsetSharedArray
*
bs
,
MemRegion
mr
,
bool
use_adaptive_freelists
,
FreeBlockDictionary
::
DictionaryChoice
);
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
);
// accessors
bool
bestFitFirst
()
{
return
_fitStrategy
==
FreeBlockBestFitFirst
;
}
FreeBlockDictionary
*
dictionary
()
const
{
return
_dictionary
;
}
FreeBlockDictionary
<
FreeChunk
>
*
dictionary
()
const
{
return
_dictionary
;
}
HeapWord
*
nearLargestChunk
()
const
{
return
_nearLargestChunk
;
}
void
set_nearLargestChunk
(
HeapWord
*
v
)
{
_nearLargestChunk
=
v
;
}
...
...
@@ -499,7 +499,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Verify that the given chunk is in the free lists:
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool
verify
ChunkInFreeLists
(
FreeChunk
*
fc
)
const
;
bool
verify
_chunk_in_free_list
(
FreeChunk
*
fc
)
const
;
// Verify that the given chunk is the linear allocation block
bool
verify_chunk_is_linear_alloc_block
(
FreeChunk
*
fc
)
const
;
// Do some basic checks on the the free lists.
...
...
@@ -608,7 +608,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void
coalDeath
(
size_t
size
);
void
smallSplitBirth
(
size_t
size
);
void
smallSplitDeath
(
size_t
size
);
void
split
B
irth
(
size_t
size
);
void
split
_b
irth
(
size_t
size
);
void
splitDeath
(
size_t
size
);
void
split
(
size_t
from
,
size_t
to1
);
...
...
@@ -622,7 +622,7 @@ class CFLS_LAB : public CHeapObj {
CompactibleFreeListSpace
*
_cfls
;
// Our local free lists.
FreeList
_indexedFreeList
[
CompactibleFreeListSpace
::
IndexSetSize
];
FreeList
<
FreeChunk
>
_indexedFreeList
[
CompactibleFreeListSpace
::
IndexSetSize
];
// Initialized from a command-line arg.
...
...
@@ -635,7 +635,7 @@ class CFLS_LAB : public CHeapObj {
size_t
_num_blocks
[
CompactibleFreeListSpace
::
IndexSetSize
];
// Internal work method
void
get_from_global_pool
(
size_t
word_sz
,
FreeList
*
fl
);
void
get_from_global_pool
(
size_t
word_sz
,
FreeList
<
FreeChunk
>
*
fl
);
public:
CFLS_LAB
(
CompactibleFreeListSpace
*
cfls
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
7b1f4848
...
...
@@ -188,7 +188,7 @@ class CMSParGCThreadState: public CHeapObj {
ConcurrentMarkSweepGeneration
::
ConcurrentMarkSweepGeneration
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
int
level
,
CardTableRS
*
ct
,
bool
use_adaptive_freelists
,
FreeBlockDictionary
::
DictionaryChoice
dictionaryChoice
)
:
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
dictionaryChoice
)
:
CardGeneration
(
rs
,
initial_byte_size
,
level
,
ct
),
_dilatation_factor
(((
double
)
MinChunkSize
)
/
((
double
)(
CollectedHeap
::
min_fill_size
()))),
_debug_collection_type
(
Concurrent_collection_type
)
...
...
@@ -1026,7 +1026,7 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
assert
(
oop
(
res
)
->
klass_or_null
()
==
NULL
,
"Object should be uninitialized here."
);
assert
(
!
((
FreeChunk
*
)
res
)
->
is
F
ree
(),
"Error, block will look free but show wrong size"
);
assert
(
!
((
FreeChunk
*
)
res
)
->
is
_f
ree
(),
"Error, block will look free but show wrong size"
);
collector
()
->
direct_allocated
(
res
,
adjustedSize
);
_direct_allocated_words
+=
adjustedSize
;
// allocation counters
...
...
@@ -1391,7 +1391,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop
obj
=
oop
(
obj_ptr
);
OrderAccess
::
storestore
();
assert
(
obj
->
klass_or_null
()
==
NULL
,
"Object should be uninitialized here."
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
F
ree
(),
"Error, block will look free but show wrong size"
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
_f
ree
(),
"Error, block will look free but show wrong size"
);
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
...
...
@@ -1400,7 +1400,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
// Restore the mark word copied above.
obj
->
set_mark
(
m
);
assert
(
obj
->
klass_or_null
()
==
NULL
,
"Object should be uninitialized here."
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
F
ree
(),
"Error, block will look free but show wrong size"
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
_f
ree
(),
"Error, block will look free but show wrong size"
);
OrderAccess
::
storestore
();
if
(
UseCompressedOops
)
{
...
...
@@ -1421,7 +1421,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
promoInfo
->
track
((
PromotedObject
*
)
obj
,
old
->
klass
());
}
assert
(
obj
->
klass_or_null
()
==
NULL
,
"Object should be uninitialized here."
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
F
ree
(),
"Error, block will look free but show wrong size"
);
assert
(
!
((
FreeChunk
*
)
obj_ptr
)
->
is
_f
ree
(),
"Error, block will look free but show wrong size"
);
assert
(
old
->
is_oop
(),
"Will use and dereference old klass ptr below"
);
// Finally, install the klass pointer (this should be volatile).
...
...
@@ -2034,7 +2034,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
pointer_delta
(
cms_space
->
end
(),
cms_space
->
compaction_top
())
*
HeapWordSize
,
"All the free space should be compacted into one chunk at top"
);
assert
(
cms_space
->
dictionary
()
->
total
ChunkS
ize
(
assert
(
cms_space
->
dictionary
()
->
total
_chunk_s
ize
(
debug_only
(
cms_space
->
freelistLock
()))
==
0
||
cms_space
->
totalSizeInIndexedFreeLists
()
==
0
,
"All the free space should be in a single chunk"
);
...
...
@@ -6131,7 +6131,7 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
double
nearLargestPercent
=
FLSLargestBlockCoalesceProximity
;
HeapWord
*
minAddr
=
_cmsSpace
->
bottom
();
HeapWord
*
largestAddr
=
(
HeapWord
*
)
_cmsSpace
->
dictionary
()
->
find
LargestD
ict
();
(
HeapWord
*
)
_cmsSpace
->
dictionary
()
->
find
_largest_d
ict
();
if
(
largestAddr
==
NULL
)
{
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
...
...
@@ -7906,7 +7906,7 @@ SweepClosure::SweepClosure(CMSCollector* collector,
_last_fc
=
NULL
;
_sp
->
initializeIndexedFreeListArrayReturnedBytes
();
_sp
->
dictionary
()
->
initialize
DictReturnedB
ytes
();
_sp
->
dictionary
()
->
initialize
_dict_returned_b
ytes
();
)
assert
(
_limit
>=
_sp
->
bottom
()
&&
_limit
<=
_sp
->
end
(),
"sweep _limit out of bounds"
);
...
...
@@ -7954,13 +7954,13 @@ SweepClosure::~SweepClosure() {
if
(
PrintCMSStatistics
&&
CMSVerifyReturnedBytes
)
{
size_t
indexListReturnedBytes
=
_sp
->
sumIndexedFreeListArrayReturnedBytes
();
size_t
dict
ReturnedBytes
=
_sp
->
dictionary
()
->
sumDictReturnedB
ytes
();
size_t
returned
Bytes
=
indexListReturnedBytes
+
dictReturnedB
ytes
;
gclog_or_tty
->
print
(
"Returned "
SIZE_FORMAT
" bytes"
,
returned
B
ytes
);
size_t
dict
_returned_bytes
=
_sp
->
dictionary
()
->
sum_dict_returned_b
ytes
();
size_t
returned
_bytes
=
indexListReturnedBytes
+
dict_returned_b
ytes
;
gclog_or_tty
->
print
(
"Returned "
SIZE_FORMAT
" bytes"
,
returned
_b
ytes
);
gclog_or_tty
->
print
(
" Indexed List Returned "
SIZE_FORMAT
" bytes"
,
indexListReturnedBytes
);
gclog_or_tty
->
print_cr
(
" Dictionary Returned "
SIZE_FORMAT
" bytes"
,
dict
ReturnedB
ytes
);
dict
_returned_b
ytes
);
}
}
if
(
CMSTraceSweeper
)
{
...
...
@@ -7985,9 +7985,9 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
if
(
CMSTestInFreeList
)
{
if
(
freeRangeInFreeLists
)
{
FreeChunk
*
fc
=
(
FreeChunk
*
)
freeFinger
;
assert
(
fc
->
is
F
ree
(),
"A chunk on the free list should be free."
);
assert
(
fc
->
is
_f
ree
(),
"A chunk on the free list should be free."
);
assert
(
fc
->
size
()
>
0
,
"Free range should have a size"
);
assert
(
_sp
->
verify
ChunkInFreeLists
(
fc
),
"Chunk is not in free lists"
);
assert
(
_sp
->
verify
_chunk_in_free_list
(
fc
),
"Chunk is not in free lists"
);
}
}
}
...
...
@@ -8057,7 +8057,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
assert
(
addr
<
_limit
,
"sweep invariant"
);
// check if we should yield
do_yield_check
(
addr
);
if
(
fc
->
is
F
ree
())
{
if
(
fc
->
is
_f
ree
())
{
// Chunk that is already free
res
=
fc
->
size
();
do_already_free_chunk
(
fc
);
...
...
@@ -8145,7 +8145,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
// Chunks that cannot be coalesced are not in the
// free lists.
if
(
CMSTestInFreeList
&&
!
fc
->
cantCoalesce
())
{
assert
(
_sp
->
verify
ChunkInFreeLists
(
fc
),
assert
(
_sp
->
verify
_chunk_in_free_list
(
fc
),
"free chunk should be in free lists"
);
}
// a chunk that is already free, should not have been
...
...
@@ -8171,7 +8171,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
FreeChunk
*
nextChunk
=
(
FreeChunk
*
)(
addr
+
size
);
assert
((
HeapWord
*
)
nextChunk
<=
_sp
->
end
(),
"Chunk size out of bounds?"
);
if
((
HeapWord
*
)
nextChunk
<
_sp
->
end
()
&&
// There is another free chunk to the right ...
nextChunk
->
is
F
ree
()
&&
// ... which is free...
nextChunk
->
is
_f
ree
()
&&
// ... which is free...
nextChunk
->
cantCoalesce
())
{
// ... but can't be coalesced
// nothing to do
}
else
{
...
...
@@ -8203,7 +8203,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
assert
(
ffc
->
size
()
==
pointer_delta
(
addr
,
freeFinger
()),
"Size of free range is inconsistent with chunk size."
);
if
(
CMSTestInFreeList
)
{
assert
(
_sp
->
verify
ChunkInFreeLists
(
ffc
),
assert
(
_sp
->
verify
_chunk_in_free_list
(
ffc
),
"free range is not in free lists"
);
}
_sp
->
removeFreeChunkFromFreeLists
(
ffc
);
...
...
@@ -8262,7 +8262,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
assert
(
ffc
->
size
()
==
pointer_delta
(
addr
,
freeFinger
()),
"Size of free range is inconsistent with chunk size."
);
if
(
CMSTestInFreeList
)
{
assert
(
_sp
->
verify
ChunkInFreeLists
(
ffc
),
assert
(
_sp
->
verify
_chunk_in_free_list
(
ffc
),
"free range is not in free lists"
);
}
_sp
->
removeFreeChunkFromFreeLists
(
ffc
);
...
...
@@ -8351,11 +8351,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t
chunkSize
)
{
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
const
bool
fcInFreeLists
=
fc
->
is
F
ree
();
const
bool
fcInFreeLists
=
fc
->
is
_f
ree
();
assert
(
_sp
->
adaptive_freelists
(),
"Should only be used in this case."
);
assert
((
HeapWord
*
)
fc
<=
_limit
,
"sweep invariant"
);
if
(
CMSTestInFreeList
&&
fcInFreeLists
)
{
assert
(
_sp
->
verify
ChunkInFreeLists
(
fc
),
"free chunk is not in free lists"
);
assert
(
_sp
->
verify
_chunk_in_free_list
(
fc
),
"free chunk is not in free lists"
);
}
if
(
CMSTraceSweeper
)
{
...
...
@@ -8410,7 +8410,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
assert
(
ffc
->
size
()
==
pointer_delta
(
fc_addr
,
freeFinger
()),
"Size of free range is inconsistent with chunk size."
);
if
(
CMSTestInFreeList
)
{
assert
(
_sp
->
verify
ChunkInFreeLists
(
ffc
),
assert
(
_sp
->
verify
_chunk_in_free_list
(
ffc
),
"Chunk is not in free lists"
);
}
_sp
->
coalDeath
(
ffc
->
size
());
...
...
@@ -8459,7 +8459,7 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
" when examining fc = "
PTR_FORMAT
"("
SIZE_FORMAT
")"
,
_limit
,
_sp
->
bottom
(),
_sp
->
end
(),
fc
,
chunk_size
));
if
(
eob
>=
_limit
)
{
assert
(
eob
==
_limit
||
fc
->
is
F
ree
(),
"Only a free chunk should allow us to cross over the limit"
);
assert
(
eob
==
_limit
||
fc
->
is
_f
ree
(),
"Only a free chunk should allow us to cross over the limit"
);
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print_cr
(
"_limit "
PTR_FORMAT
" reached or crossed by block "
"["
PTR_FORMAT
","
PTR_FORMAT
") in space "
...
...
@@ -8482,8 +8482,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
if
(
!
freeRangeInFreeLists
())
{
if
(
CMSTestInFreeList
)
{
FreeChunk
*
fc
=
(
FreeChunk
*
)
chunk
;
fc
->
set
S
ize
(
size
);
assert
(
!
_sp
->
verify
ChunkInFreeLists
(
fc
),
fc
->
set
_s
ize
(
size
);
assert
(
!
_sp
->
verify
_chunk_in_free_list
(
fc
),
"chunk should not be in free lists yet"
);
}
if
(
CMSTraceSweeper
)
{
...
...
@@ -8557,8 +8557,8 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
// This is actually very useful in a product build if it can
// be called from the debugger. Compile it into the product
// as needed.
bool
debug_verify
ChunkInFreeLists
(
FreeChunk
*
fc
)
{
return
debug_cms_space
->
verify
ChunkInFreeLists
(
fc
);
bool
debug_verify
_chunk_in_free_list
(
FreeChunk
*
fc
)
{
return
debug_cms_space
->
verify
_chunk_in_free_list
(
fc
);
}
#endif
...
...
@@ -9255,7 +9255,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t
chunk_at_end_old_size
=
chunk_at_end
->
size
();
assert
(
chunk_at_end_old_size
>=
word_size_change
,
"Shrink is too large"
);
chunk_at_end
->
set
S
ize
(
chunk_at_end_old_size
-
chunk_at_end
->
set
_s
ize
(
chunk_at_end_old_size
-
word_size_change
);
_cmsSpace
->
freed
((
HeapWord
*
)
chunk_at_end
->
end
(),
word_size_change
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
7b1f4848
...
...
@@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
...
...
@@ -1106,7 +1106,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
ConcurrentMarkSweepGeneration
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
int
level
,
CardTableRS
*
ct
,
bool
use_adaptive_freelists
,
FreeBlockDictionary
::
DictionaryChoice
);
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
);
// Accessors
CMSCollector
*
collector
()
const
{
return
_collector
;
}
...
...
@@ -1328,7 +1328,7 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
ASConcurrentMarkSweepGeneration
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
int
level
,
CardTableRS
*
ct
,
bool
use_adaptive_freelists
,
FreeBlockDictionary
::
DictionaryChoice
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
dictionaryChoice
)
:
ConcurrentMarkSweepGeneration
(
rs
,
initial_byte_size
,
level
,
ct
,
use_adaptive_freelists
,
dictionaryChoice
)
{}
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
浏览文件 @
7b1f4848
...
...
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "utilities/copy.hpp"
#ifndef PRODUCT
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
浏览文件 @
7b1f4848
...
...
@@ -75,20 +75,20 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
// calls. We really want the read of _mark and _prev from this pointer
// to be volatile but making the fields volatile causes all sorts of
// compilation errors.
return
((
volatile
FreeChunk
*
)
addr
)
->
is
F
ree
();
return
((
volatile
FreeChunk
*
)
addr
)
->
is
_f
ree
();
}
bool
is
F
ree
()
const
volatile
{
bool
is
_f
ree
()
const
volatile
{
LP64_ONLY
(
if
(
UseCompressedOops
)
return
mark
()
->
is_cms_free_chunk
();
else
)
return
(((
intptr_t
)
_prev
)
&
0x1
)
==
0x1
;
}
bool
cantCoalesce
()
const
{
assert
(
is
F
ree
(),
"can't get coalesce bit on not free"
);
assert
(
is
_f
ree
(),
"can't get coalesce bit on not free"
);
return
(((
intptr_t
)
_prev
)
&
0x2
)
==
0x2
;
}
void
dontCoalesce
()
{
// the block should be free
assert
(
is
F
ree
(),
"Should look like a free block"
);
assert
(
is
_f
ree
(),
"Should look like a free block"
);
_prev
=
(
FreeChunk
*
)(((
intptr_t
)
_prev
)
|
0x2
);
}
FreeChunk
*
prev
()
const
{
...
...
@@ -103,23 +103,23 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
LP64_ONLY
(
if
(
UseCompressedOops
)
return
mark
()
->
get_size
();
else
)
return
_size
;
}
void
set
S
ize
(
size_t
sz
)
{
void
set
_s
ize
(
size_t
sz
)
{
LP64_ONLY
(
if
(
UseCompressedOops
)
set_mark
(
markOopDesc
::
set_size_and_free
(
sz
));
else
)
_size
=
sz
;
}
FreeChunk
*
next
()
const
{
return
_next
;
}
void
link
A
fter
(
FreeChunk
*
ptr
)
{
link
N
ext
(
ptr
);
if
(
ptr
!=
NULL
)
ptr
->
link
P
rev
(
this
);
void
link
_a
fter
(
FreeChunk
*
ptr
)
{
link
_n
ext
(
ptr
);
if
(
ptr
!=
NULL
)
ptr
->
link
_p
rev
(
this
);
}
void
link
N
ext
(
FreeChunk
*
ptr
)
{
_next
=
ptr
;
}
void
link
P
rev
(
FreeChunk
*
ptr
)
{
void
link
_n
ext
(
FreeChunk
*
ptr
)
{
_next
=
ptr
;
}
void
link
_p
rev
(
FreeChunk
*
ptr
)
{
LP64_ONLY
(
if
(
UseCompressedOops
)
_prev
=
ptr
;
else
)
_prev
=
(
FreeChunk
*
)((
intptr_t
)
ptr
|
0x1
);
}
void
clear
N
ext
()
{
_next
=
NULL
;
}
void
clear
_n
ext
()
{
_next
=
NULL
;
}
void
markNotFree
()
{
// Set _prev (klass) to null before (if) clearing the mark word below
_prev
=
NULL
;
...
...
@@ -129,7 +129,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
set_mark
(
markOopDesc
::
prototype
());
}
#endif
assert
(
!
is
F
ree
(),
"Error"
);
assert
(
!
is
_f
ree
(),
"Error"
);
}
// Return the address past the end of this chunk
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
浏览文件 @
7b1f4848
...
...
@@ -121,7 +121,7 @@ void PromotionInfo::track(PromotedObject* trackOop) {
void
PromotionInfo
::
track
(
PromotedObject
*
trackOop
,
klassOop
klassOfOop
)
{
// make a copy of header as it may need to be spooled
markOop
mark
=
oop
(
trackOop
)
->
mark
();
trackOop
->
clear
N
ext
();
trackOop
->
clear
_n
ext
();
if
(
mark
->
must_be_preserved_for_cms_scavenge
(
klassOfOop
))
{
// save non-prototypical header, and mark oop
saveDisplacedHeader
(
mark
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
浏览文件 @
7b1f4848
...
...
@@ -43,7 +43,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
// whose position will depend on endian-ness of the platform.
// This is so that there is no interference with the
// cms_free_bit occupying bit position 7 (lsb == 0)
// when we are using compressed oops; see FreeChunk::is
F
ree().
// when we are using compressed oops; see FreeChunk::is
_f
ree().
// We cannot move the cms_free_bit down because currently
// biased locking code assumes that age bits are contiguous
// with the lock bits. Even if that assumption were relaxed,
...
...
@@ -65,7 +65,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
};
public:
inline
PromotedObject
*
next
()
const
{
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
PromotedObject
*
res
;
if
(
UseCompressedOops
)
{
// The next pointer is a compressed oop stored in the top 32 bits
...
...
@@ -85,27 +85,27 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
}
else
{
_next
|=
(
intptr_t
)
x
;
}
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
}
inline
void
setPromotedMark
()
{
_next
|=
promoted_mask
;
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
}
inline
bool
hasPromotedMark
()
const
{
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
return
(
_next
&
promoted_mask
)
==
promoted_mask
;
}
inline
void
setDisplacedMark
()
{
_next
|=
displaced_mark
;
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
}
inline
bool
hasDisplacedMark
()
const
{
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
return
(
_next
&
displaced_mark
)
!=
0
;
}
inline
void
clear
N
ext
()
{
inline
void
clear
_n
ext
()
{
_next
=
0
;
assert
(
!
((
FreeChunk
*
)
this
)
->
is
F
ree
(),
"Error"
);
assert
(
!
((
FreeChunk
*
)
this
)
->
is
_f
ree
(),
"Error"
);
}
debug_only
(
void
*
next_addr
()
{
return
(
void
*
)
&
_next
;
})
};
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
浏览文件 @
7b1f4848
...
...
@@ -44,11 +44,11 @@
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList
,
_size, size_t) \
nonstatic_field(FreeList
,
_count, ssize_t) \
nonstatic_field(BinaryTreeDictionary
, _totalSize,
size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary
*)
\
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList
)
\
nonstatic_field(FreeList
<FreeChunk>,
_size, size_t) \
nonstatic_field(FreeList
<FreeChunk>,
_count, ssize_t) \
nonstatic_field(BinaryTreeDictionary
<FreeChunk>,_total_size,
size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary
<FreeChunk>*)
\
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList
<FreeChunk>)
\
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
...
...
@@ -70,13 +70,13 @@
declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(BinaryTreeDictionary
*)
\
declare_toplevel_type(FreeBlockDictionary
*)
\
declare_toplevel_type(FreeList
*)
\
declare_toplevel_type(FreeList
)
\
declare_toplevel_type(BinaryTreeDictionary
<FreeChunk>*)
\
declare_toplevel_type(FreeBlockDictionary
<FreeChunk>*)
\
declare_toplevel_type(FreeList
<FreeChunk>*)
\
declare_toplevel_type(FreeList
<FreeChunk>)
\
declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary
)
\
declare_type(BinaryTreeDictionary
, FreeBlockDictionary
)
declare_toplevel_type(FreeBlockDictionary
<FreeChunk>)
\
declare_type(BinaryTreeDictionary
<FreeChunk>, FreeBlockDictionary<FreeChunk>
)
#define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
7b1f4848
...
...
@@ -1183,35 +1183,31 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p
->
record_concurrent_mark_remark_end
();
}
// Used to calculate the # live objects per region
// for verification purposes
class
CalcLiveObjectsClosure
:
public
HeapRegionClosure
{
CMBitMapRO
*
_bm
;
// Base class of the closures that finalize and verify the
// liveness counting data.
class
CMCountDataClosureBase
:
public
HeapRegionClosure
{
protected:
ConcurrentMark
*
_cm
;
BitMap
*
_region_bm
;
BitMap
*
_card_bm
;
size_t
_region_marked_bytes
;
intptr_t
_bottom_card_num
;
void
mark_card_num_range
(
intptr_t
start_card_num
,
intptr_t
last_card_num
)
{
assert
(
start_card_num
<=
last_card_num
,
"sanity"
);
BitMap
::
idx_t
start_idx
=
start_card_num
-
_bottom_card_num
;
BitMap
::
idx_t
last_idx
=
last_card_num
-
_bottom_card_num
;
void
set_card_bitmap_range
(
BitMap
::
idx_t
start_idx
,
BitMap
::
idx_t
last_idx
)
{
assert
(
start_idx
<=
last_idx
,
"sanity"
);
for
(
BitMap
::
idx_t
i
=
start_idx
;
i
<=
last_idx
;
i
+=
1
)
{
_card_bm
->
par_at_put
(
i
,
1
);
// Set the inclusive bit range [start_idx, last_idx].
// For small ranges (up to 8 cards) use a simple loop; otherwise
// use par_at_put_range.
if
((
last_idx
-
start_idx
)
<
8
)
{
for
(
BitMap
::
idx_t
i
=
start_idx
;
i
<=
last_idx
;
i
+=
1
)
{
_card_bm
->
par_set_bit
(
i
);
}
}
else
{
assert
(
last_idx
<
_card_bm
->
size
(),
"sanity"
);
// Note BitMap::par_at_put_range() is exclusive.
_card_bm
->
par_at_put_range
(
start_idx
,
last_idx
+
1
,
true
);
}
}
public:
CalcLiveObjectsClosure
(
CMBitMapRO
*
bm
,
ConcurrentMark
*
cm
,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
:
_bm
(
bm
),
_cm
(
cm
),
_region_bm
(
region_bm
),
_card_bm
(
card_bm
),
_region_marked_bytes
(
0
),
_bottom_card_num
(
cm
->
heap_bottom_card_num
())
{
}
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
...
...
@@ -1234,6 +1230,24 @@ public:
}
}
public:
CMCountDataClosureBase
(
ConcurrentMark
*
cm
,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
:
_cm
(
cm
),
_region_bm
(
region_bm
),
_card_bm
(
card_bm
)
{
}
};
// Closure that calculates the # live objects per region. Used
// for verification purposes during the cleanup pause.
class
CalcLiveObjectsClosure
:
public
CMCountDataClosureBase
{
CMBitMapRO
*
_bm
;
size_t
_region_marked_bytes
;
public:
CalcLiveObjectsClosure
(
CMBitMapRO
*
bm
,
ConcurrentMark
*
cm
,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
:
CMCountDataClosureBase
(
cm
,
region_bm
,
card_bm
),
_bm
(
bm
),
_region_marked_bytes
(
0
)
{
}
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
if
(
hr
->
continuesHumongous
())
{
...
...
@@ -1260,65 +1274,31 @@ public:
size_t
marked_bytes
=
0
;
// Below, the term "card num" means the result of shifting an address
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
// The first card num of the sequence of live cards currently being
// constructed. -1 ==> no sequence.
intptr_t
start_card_num
=
-
1
;
// The last card num of the sequence of live cards currently being
// constructed. -1 ==> no sequence.
intptr_t
last_card_num
=
-
1
;
while
(
start
<
nextTop
)
{
oop
obj
=
oop
(
start
);
int
obj_sz
=
obj
->
size
();
// The card num of the start of the current object.
intptr_t
obj_card_num
=
intptr_t
(
uintptr_t
(
start
)
>>
CardTableModRefBS
::
card_shift
);
HeapWord
*
obj_last
=
start
+
obj_sz
-
1
;
intptr_t
obj_last_card_num
=
intptr_t
(
uintptr_t
(
obj_last
)
>>
CardTableModRefBS
::
card_shift
);
if
(
obj_card_num
!=
last_card_num
)
{
if
(
start_card_num
==
-
1
)
{
assert
(
last_card_num
==
-
1
,
"Both or neither."
);
start_card_num
=
obj_card_num
;
}
else
{
assert
(
last_card_num
!=
-
1
,
"Both or neither."
);
assert
(
obj_card_num
>=
last_card_num
,
"Inv"
);
if
((
obj_card_num
-
last_card_num
)
>
1
)
{
// Mark the last run, and start a new one.
mark_card_num_range
(
start_card_num
,
last_card_num
);
start_card_num
=
obj_card_num
;
}
}
}
// In any case, we set the last card num.
last_card_num
=
obj_last_card_num
;
BitMap
::
idx_t
start_idx
=
_cm
->
card_bitmap_index_for
(
start
);
BitMap
::
idx_t
last_idx
=
_cm
->
card_bitmap_index_for
(
obj_last
);
// Set the bits in the card BM for this object (inclusive).
set_card_bitmap_range
(
start_idx
,
last_idx
);
// Add the size of this object to the number of marked bytes.
marked_bytes
+=
(
size_t
)
obj_sz
*
HeapWordSize
;
// Find the next marked object after this one.
start
=
_bm
->
getNextMarkedWordAddress
(
start
+
1
,
nextTop
);
}
// Handle the last range, if any.
if
(
start_card_num
!=
-
1
)
{
mark_card_num_range
(
start_card_num
,
last_card_num
);
start
=
_bm
->
getNextMarkedWordAddress
(
obj_last
+
1
,
nextTop
);
}
// Mark the allocated-since-marking portion...
HeapWord
*
top
=
hr
->
top
();
if
(
nextTop
<
top
)
{
start_card_num
=
intptr_t
(
uintptr_t
(
nextTop
)
>>
CardTableModRefBS
::
card_shift
);
last_card_num
=
intptr_t
(
uintptr_t
(
top
)
>>
CardTableModRefBS
::
card_shift
);
BitMap
::
idx_t
start_idx
=
_cm
->
card_bitmap_index_for
(
nextTop
);
BitMap
::
idx_t
last_idx
=
_cm
->
card_bitmap_index_for
(
top
-
1
);
mark_card_num_range
(
start_card_num
,
last_card_num
);
set_card_bitmap_range
(
start_idx
,
last_idx
);
// This definitely means the region has live objects.
set_bit_for_region
(
hr
);
...
...
@@ -1394,17 +1374,6 @@ public:
MutexLockerEx
x
((
_verbose
?
ParGCRareEvent_lock
:
NULL
),
Mutex
::
_no_safepoint_check_flag
);
// Verify that _top_at_conc_count == ntams
if
(
hr
->
top_at_conc_mark_count
()
!=
hr
->
next_top_at_mark_start
())
{
if
(
_verbose
)
{
gclog_or_tty
->
print_cr
(
"Region %u: top at conc count incorrect: "
"expected "
PTR_FORMAT
", actual: "
PTR_FORMAT
,
hr
->
hrs_index
(),
hr
->
next_top_at_mark_start
(),
hr
->
top_at_conc_mark_count
());
}
failures
+=
1
;
}
// Verify the marked bytes for this region.
size_t
exp_marked_bytes
=
_calc_cl
.
region_marked_bytes
();
size_t
act_marked_bytes
=
hr
->
next_marked_bytes
();
...
...
@@ -1470,7 +1439,7 @@ public:
_failures
+=
failures
;
// We could stop iteration over the heap when we
// find the first v
oi
lating region by returning true.
// find the first v
io
lating region by returning true.
return
false
;
}
};
...
...
@@ -1543,62 +1512,19 @@ public:
int
failures
()
const
{
return
_failures
;
}
};
// Final update of count data (during cleanup).
// Adds [top_at_count, NTAMS) to the marked bytes for each
// region. Sets the bits in the card bitmap corresponding
// to the interval [top_at_count, top], and sets the
// liveness bit for each region containing live data
// in the region bitmap.
class
FinalCountDataUpdateClosure
:
public
HeapRegionClosure
{
ConcurrentMark
*
_cm
;
BitMap
*
_region_bm
;
BitMap
*
_card_bm
;
void
set_card_bitmap_range
(
BitMap
::
idx_t
start_idx
,
BitMap
::
idx_t
last_idx
)
{
assert
(
start_idx
<=
last_idx
,
"sanity"
);
// Set the inclusive bit range [start_idx, last_idx].
// For small ranges (up to 8 cards) use a simple loop; otherwise
// use par_at_put_range.
if
((
last_idx
-
start_idx
)
<=
8
)
{
for
(
BitMap
::
idx_t
i
=
start_idx
;
i
<=
last_idx
;
i
+=
1
)
{
_card_bm
->
par_set_bit
(
i
);
}
}
else
{
assert
(
last_idx
<
_card_bm
->
size
(),
"sanity"
);
// Note BitMap::par_at_put_range() is exclusive.
_card_bm
->
par_at_put_range
(
start_idx
,
last_idx
+
1
,
true
);
}
}
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
// to 1 the bits on the region bitmap that correspond to its
// associated "continues humongous" regions.
void
set_bit_for_region
(
HeapRegion
*
hr
)
{
assert
(
!
hr
->
continuesHumongous
(),
"should have filtered those out"
);
BitMap
::
idx_t
index
=
(
BitMap
::
idx_t
)
hr
->
hrs_index
();
if
(
!
hr
->
startsHumongous
())
{
// Normal (non-humongous) case: just set the bit.
_region_bm
->
par_set_bit
(
index
);
}
else
{
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range.
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
HeapRegion
*
last_hr
=
g1h
->
heap_region_containing_raw
(
hr
->
end
()
-
1
);
BitMap
::
idx_t
end_index
=
(
BitMap
::
idx_t
)
last_hr
->
hrs_index
()
+
1
;
_region_bm
->
par_at_put_range
(
index
,
end_index
,
true
);
}
}
// Closure that finalizes the liveness counting data.
// Used during the cleanup pause.
// Sets the bits corresponding to the interval [NTAMS, top]
// (which contains the implicitly live objects) in the
// card liveness bitmap. Also sets the bit for each region,
// containing live data, in the region liveness bitmap.
class
FinalCountDataUpdateClosure
:
public
CMCountDataClosureBase
{
public:
FinalCountDataUpdateClosure
(
ConcurrentMark
*
cm
,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
:
_cm
(
cm
),
_region_bm
(
region_bm
),
_card_bm
(
card_bm
)
{
}
CMCountDataClosureBase
(
cm
,
region_bm
,
card_bm
)
{
}
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
...
...
@@ -1613,26 +1539,10 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
return
false
;
}
HeapWord
*
start
=
hr
->
top_at_conc_mark_count
();
HeapWord
*
ntams
=
hr
->
next_top_at_mark_start
();
HeapWord
*
top
=
hr
->
top
();
assert
(
hr
->
bottom
()
<=
start
&&
start
<=
hr
->
end
()
&&
hr
->
bottom
()
<=
ntams
&&
ntams
<=
hr
->
end
(),
"Preconditions."
);
if
(
start
<
ntams
)
{
// Region was changed between remark and cleanup pauses
// We need to add (ntams - start) to the marked bytes
// for this region, and set bits for the range
// [ card_idx(start), card_idx(ntams) ) in the card bitmap.
size_t
live_bytes
=
(
ntams
-
start
)
*
HeapWordSize
;
hr
->
add_to_marked_bytes
(
live_bytes
);
// Record the new top at conc count
hr
->
set_top_at_conc_mark_count
(
ntams
);
// The setting of the bits in the card bitmap takes place below
}
assert
(
hr
->
bottom
()
<=
ntams
&&
ntams
<=
hr
->
end
(),
"Preconditions."
);
// Mark the allocated-since-marking portion...
if
(
ntams
<
top
)
{
...
...
@@ -1640,8 +1550,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
set_bit_for_region
(
hr
);
}
// Now set the bits for [
start
, top]
BitMap
::
idx_t
start_idx
=
_cm
->
card_bitmap_index_for
(
start
);
// Now set the bits for [
ntams
, top]
BitMap
::
idx_t
start_idx
=
_cm
->
card_bitmap_index_for
(
ntams
);
BitMap
::
idx_t
last_idx
=
_cm
->
card_bitmap_index_for
(
top
);
set_card_bitmap_range
(
start_idx
,
last_idx
);
...
...
@@ -3072,9 +2982,6 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
// Update the marked bytes for this region.
hr
->
add_to_marked_bytes
(
marked_bytes
);
// Now set the top at count to NTAMS.
hr
->
set_top_at_conc_mark_count
(
limit
);
// Next heap region
return
false
;
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
7b1f4848
...
...
@@ -368,16 +368,11 @@ void YoungList::print() {
if
(
curr
==
NULL
)
gclog_or_tty
->
print_cr
(
" empty"
);
while
(
curr
!=
NULL
)
{
gclog_or_tty
->
print_cr
(
" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, surv: %d"
,
curr
->
bottom
(),
curr
->
end
(),
curr
->
top
(),
gclog_or_tty
->
print_cr
(
" "
HR_FORMAT
", P: "
PTR_FORMAT
"N: "
PTR_FORMAT
", age: %4d"
,
HR_FORMAT_PARAMS
(
curr
),
curr
->
prev_top_at_mark_start
(),
curr
->
next_top_at_mark_start
(),
curr
->
top_at_conc_mark_count
(),
curr
->
age_in_surv_rate_group_cond
(),
curr
->
is_young
(),
curr
->
is_survivor
());
curr
->
age_in_surv_rate_group_cond
());
curr
=
curr
->
get_next_young_region
();
}
}
...
...
@@ -1253,12 +1248,13 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
IsGCActiveMark
x
;
// Timing
bool
system_gc
=
(
gc_cause
()
==
GCCause
::
_java_lang_system_gc
);
assert
(
!
system_gc
||
explicit_gc
,
"invariant"
);
assert
(
gc_cause
()
!=
GCCause
::
_java_lang_system_gc
||
explicit_gc
,
"invariant"
);
gclog_or_tty
->
date_stamp
(
G1Log
::
fine
()
&&
PrintGCDateStamps
);
TraceCPUTime
tcpu
(
G1Log
::
finer
(),
true
,
gclog_or_tty
);
TraceTime
t
(
system_gc
?
"Full GC (System.gc())"
:
"Full GC"
,
G1Log
::
fine
(),
true
,
gclog_or_tty
);
char
verbose_str
[
128
];
sprintf
(
verbose_str
,
"Full GC (%s)"
,
GCCause
::
to_string
(
gc_cause
()));
TraceTime
t
(
verbose_str
,
G1Log
::
fine
(),
true
,
gclog_or_tty
);
TraceCollectorStats
tcs
(
g1mm
()
->
full_collection_counters
());
TraceMemoryManagerStats
tms
(
true
/* fullGC */
,
gc_cause
());
...
...
@@ -3593,25 +3589,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Inner scope for scope based logging, timers, and stats collection
{
char
verbose_str
[
128
];
sprintf
(
verbose_str
,
"GC pause "
);
if
(
g1_policy
()
->
gcs_are_young
())
{
strcat
(
verbose_str
,
"(young)"
);
}
else
{
strcat
(
verbose_str
,
"(mixed)"
);
}
if
(
g1_policy
()
->
during_initial_mark_pause
())
{
strcat
(
verbose_str
,
" (initial-mark)"
);
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_total_full_collections
();
}
// if the log level is "finer" is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
// is messy if we do.
gclog_or_tty
->
date_stamp
(
G1Log
::
fine
()
&&
PrintGCDateStamps
);
TraceCPUTime
tcpu
(
G1Log
::
finer
(),
true
,
gclog_or_tty
);
char
verbose_str
[
128
];
sprintf
(
verbose_str
,
"GC pause (%s) (%s)%s"
,
GCCause
::
to_string
(
gc_cause
()),
g1_policy
()
->
gcs_are_young
()
?
"young"
:
"mixed"
,
g1_policy
()
->
during_initial_mark_pause
()
?
" (initial-mark)"
:
""
);
TraceTime
t
(
verbose_str
,
G1Log
::
fine
()
&&
!
G1Log
::
finer
(),
true
,
gclog_or_tty
);
TraceCollectorStats
tcs
(
g1mm
()
->
incremental_collection_counters
());
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
7b1f4848
...
...
@@ -886,8 +886,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
size_t
start_used
)
{
if
(
G1Log
::
finer
())
{
gclog_or_tty
->
stamp
(
PrintGCTimeStamps
);
gclog_or_tty
->
print
(
"[GC pause"
);
gclog_or_tty
->
print
(
" (%s)"
,
gcs_are_young
()
?
"young"
:
"mixed"
);
gclog_or_tty
->
print
(
"[GC pause (%s) (%s)"
,
GCCause
::
to_string
(
_g1
->
gc_cause
()),
gcs_are_young
()
?
"young"
:
"mixed"
);
}
// We only need to do this here as the policy will only be applied
...
...
@@ -2459,16 +2460,10 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
while
(
csr
!=
NULL
)
{
HeapRegion
*
next
=
csr
->
next_in_collection_set
();
assert
(
csr
->
in_collection_set
(),
"bad CS"
);
st
->
print_cr
(
" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, surv: %d"
,
csr
->
bottom
(),
csr
->
end
(),
csr
->
top
(),
csr
->
prev_top_at_mark_start
(),
csr
->
next_top_at_mark_start
(),
csr
->
top_at_conc_mark_count
(),
csr
->
age_in_surv_rate_group_cond
(),
csr
->
is_young
(),
csr
->
is_survivor
());
st
->
print_cr
(
" "
HR_FORMAT
", P: "
PTR_FORMAT
"N: "
PTR_FORMAT
", age: %4d"
,
HR_FORMAT_PARAMS
(
csr
),
csr
->
prev_top_at_mark_start
(),
csr
->
next_top_at_mark_start
(),
csr
->
age_in_surv_rate_group_cond
());
csr
=
next
;
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
7b1f4848
...
...
@@ -510,9 +510,6 @@ HeapRegion::HeapRegion(uint hrs_index,
_rem_set
=
new
HeapRegionRemSet
(
sharedOffsetArray
,
this
);
assert
(
HeapRegionRemSet
::
num_par_rem_sets
()
>
0
,
"Invariant."
);
// In case the region is allocated during a pause, note the top.
// We haven't done any counting on a brand new region.
_top_at_conc_mark_count
=
bottom
();
}
class
NextCompactionHeapRegionClosure
:
public
HeapRegionClosure
{
...
...
@@ -585,14 +582,12 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
// we find to be self-forwarded on the next bitmap. So all
// objects need to be below NTAMS.
_next_top_at_mark_start
=
top
();
set_top_at_conc_mark_count
(
bottom
());
_next_marked_bytes
=
0
;
}
else
if
(
during_conc_mark
)
{
// During concurrent mark, all objects in the CSet (including
// the ones we find to be self-forwarded) are implicitly live.
// So all objects need to be above NTAMS.
_next_top_at_mark_start
=
bottom
();
set_top_at_conc_mark_count
(
bottom
());
_next_marked_bytes
=
0
;
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
7b1f4848
...
...
@@ -306,9 +306,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If a collection pause is in progress, this is the top at the start
// of that pause.
// We've counted the marked bytes of objects below here.
HeapWord
*
_top_at_conc_mark_count
;
void
init_top_at_mark_start
()
{
assert
(
_prev_marked_bytes
==
0
&&
_next_marked_bytes
==
0
,
...
...
@@ -316,7 +313,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapWord
*
bot
=
bottom
();
_prev_top_at_mark_start
=
bot
;
_next_top_at_mark_start
=
bot
;
_top_at_conc_mark_count
=
bot
;
}
void
set_young_type
(
YoungType
new_type
)
{
...
...
@@ -625,19 +621,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// last mark phase ended.
bool
is_marked
()
{
return
_prev_top_at_mark_start
!=
bottom
();
}
void
init_top_at_conc_mark_count
()
{
_top_at_conc_mark_count
=
bottom
();
}
void
set_top_at_conc_mark_count
(
HeapWord
*
cur
)
{
assert
(
bottom
()
<=
cur
&&
cur
<=
end
(),
"Sanity."
);
_top_at_conc_mark_count
=
cur
;
}
HeapWord
*
top_at_conc_mark_count
()
{
return
_top_at_conc_mark_count
;
}
void
reset_during_compaction
()
{
guarantee
(
isHumongous
()
&&
startsHumongous
(),
"should only be called for humongous regions"
);
...
...
@@ -733,7 +716,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
_evacuation_failed
=
b
;
if
(
b
)
{
init_top_at_conc_mark_count
();
_next_marked_bytes
=
0
;
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegion.inline.hpp
浏览文件 @
7b1f4848
...
...
@@ -56,7 +56,6 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
}
inline
void
HeapRegion
::
note_start_of_marking
()
{
init_top_at_conc_mark_count
();
_next_marked_bytes
=
0
;
_next_top_at_mark_start
=
top
();
}
...
...
src/share/vm/gc_implementation/shared/allocationStats.hpp
浏览文件 @
7b1f4848
...
...
@@ -39,7 +39,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// We measure the demand between the end of the previous sweep and
// beginning of this sweep:
// Count(end_last_sweep) - Count(start_this_sweep)
// + split
Births(between) - splitD
eaths(between)
// + split
_births(between) - split_d
eaths(between)
// The above number divided by the time since the end of the
// previous sweep gives us a time rate of demand for blocks
// of this size. We compute a padded average of this rate as
...
...
@@ -51,34 +51,34 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
AdaptivePaddedAverage
_demand_rate_estimate
;
ssize_t
_desired
;
// Demand stimate computed as described above
ssize_t
_coal
D
esired
;
// desired +/- small-percent for tuning coalescing
ssize_t
_coal
_d
esired
;
// desired +/- small-percent for tuning coalescing
ssize_t
_surplus
;
// count - (desired +/- small-percent),
// used to tune splitting in best fit
ssize_t
_bfr
S
urp
;
// surplus at start of current sweep
ssize_t
_prev
S
weep
;
// count from end of previous sweep
ssize_t
_before
S
weep
;
// count from before current sweep
ssize_t
_coal
B
irths
;
// additional chunks from coalescing
ssize_t
_coal
D
eaths
;
// loss from coalescing
ssize_t
_split
B
irths
;
// additional chunks from splitting
ssize_t
_split
D
eaths
;
// loss from splitting
size_t
_returned
B
ytes
;
// number of bytes returned to list.
ssize_t
_bfr
_s
urp
;
// surplus at start of current sweep
ssize_t
_prev
_s
weep
;
// count from end of previous sweep
ssize_t
_before
_s
weep
;
// count from before current sweep
ssize_t
_coal
_b
irths
;
// additional chunks from coalescing
ssize_t
_coal
_d
eaths
;
// loss from coalescing
ssize_t
_split
_b
irths
;
// additional chunks from splitting
ssize_t
_split
_d
eaths
;
// loss from splitting
size_t
_returned
_b
ytes
;
// number of bytes returned to list.
public:
void
initialize
(
bool
split_birth
=
false
)
{
AdaptivePaddedAverage
*
dummy
=
new
(
&
_demand_rate_estimate
)
AdaptivePaddedAverage
(
CMS_FLSWeight
,
CMS_FLSPadding
);
_desired
=
0
;
_coal
D
esired
=
0
;
_coal
_d
esired
=
0
;
_surplus
=
0
;
_bfr
S
urp
=
0
;
_prev
S
weep
=
0
;
_before
S
weep
=
0
;
_coal
B
irths
=
0
;
_coal
D
eaths
=
0
;
_split
B
irths
=
(
split_birth
?
1
:
0
);
_split
D
eaths
=
0
;
_returned
B
ytes
=
0
;
_bfr
_s
urp
=
0
;
_prev
_s
weep
=
0
;
_before
_s
weep
=
0
;
_coal
_b
irths
=
0
;
_coal
_d
eaths
=
0
;
_split
_b
irths
=
(
split_birth
?
1
:
0
);
_split
_d
eaths
=
0
;
_returned
_b
ytes
=
0
;
}
AllocationStats
()
{
...
...
@@ -99,12 +99,12 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
assert
(
prev
Sweep
()
+
splitBirths
()
+
coalB
irths
()
// "Total Production Stock"
>=
split
Deaths
()
+
coalD
eaths
()
+
(
ssize_t
)
count
,
// "Current stock + depletion"
assert
(
prev
_sweep
()
+
split_births
()
+
coal_b
irths
()
// "Total Production Stock"
>=
split
_deaths
()
+
coal_d
eaths
()
+
(
ssize_t
)
count
,
// "Current stock + depletion"
"Conservation Principle"
);
if
(
inter_sweep_current
>
_threshold
)
{
ssize_t
demand
=
prev
Sweep
()
-
(
ssize_t
)
count
+
splitBirths
()
+
coalB
irths
()
-
split
Deaths
()
-
coalD
eaths
();
ssize_t
demand
=
prev
_sweep
()
-
(
ssize_t
)
count
+
split_births
()
+
coal_b
irths
()
-
split
_deaths
()
-
coal_d
eaths
();
assert
(
demand
>=
0
,
err_msg
(
"Demand ("
SSIZE_FORMAT
") should be non-negative for "
PTR_FORMAT
" (size="
SIZE_FORMAT
")"
,
...
...
@@ -130,40 +130,40 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
ssize_t
desired
()
const
{
return
_desired
;
}
void
set_desired
(
ssize_t
v
)
{
_desired
=
v
;
}
ssize_t
coal
Desired
()
const
{
return
_coalD
esired
;
}
void
set_coal
Desired
(
ssize_t
v
)
{
_coalD
esired
=
v
;
}
ssize_t
coal
_desired
()
const
{
return
_coal_d
esired
;
}
void
set_coal
_desired
(
ssize_t
v
)
{
_coal_d
esired
=
v
;
}
ssize_t
surplus
()
const
{
return
_surplus
;
}
void
set_surplus
(
ssize_t
v
)
{
_surplus
=
v
;
}
void
increment_surplus
()
{
_surplus
++
;
}
void
decrement_surplus
()
{
_surplus
--
;
}
ssize_t
bfr
Surp
()
const
{
return
_bfrS
urp
;
}
void
set_bfr
Surp
(
ssize_t
v
)
{
_bfrS
urp
=
v
;
}
ssize_t
prev
Sweep
()
const
{
return
_prevS
weep
;
}
void
set_prev
Sweep
(
ssize_t
v
)
{
_prevS
weep
=
v
;
}
ssize_t
before
Sweep
()
const
{
return
_beforeS
weep
;
}
void
set_before
Sweep
(
ssize_t
v
)
{
_beforeS
weep
=
v
;
}
ssize_t
bfr
_surp
()
const
{
return
_bfr_s
urp
;
}
void
set_bfr
_surp
(
ssize_t
v
)
{
_bfr_s
urp
=
v
;
}
ssize_t
prev
_sweep
()
const
{
return
_prev_s
weep
;
}
void
set_prev
_sweep
(
ssize_t
v
)
{
_prev_s
weep
=
v
;
}
ssize_t
before
_sweep
()
const
{
return
_before_s
weep
;
}
void
set_before
_sweep
(
ssize_t
v
)
{
_before_s
weep
=
v
;
}
ssize_t
coal
Births
()
const
{
return
_coalB
irths
;
}
void
set_coal
Births
(
ssize_t
v
)
{
_coalB
irths
=
v
;
}
void
increment_coal
Births
()
{
_coalB
irths
++
;
}
ssize_t
coal
_births
()
const
{
return
_coal_b
irths
;
}
void
set_coal
_births
(
ssize_t
v
)
{
_coal_b
irths
=
v
;
}
void
increment_coal
_births
()
{
_coal_b
irths
++
;
}
ssize_t
coal
Deaths
()
const
{
return
_coalD
eaths
;
}
void
set_coal
Deaths
(
ssize_t
v
)
{
_coalD
eaths
=
v
;
}
void
increment_coal
Deaths
()
{
_coalD
eaths
++
;
}
ssize_t
coal
_deaths
()
const
{
return
_coal_d
eaths
;
}
void
set_coal
_deaths
(
ssize_t
v
)
{
_coal_d
eaths
=
v
;
}
void
increment_coal
_deaths
()
{
_coal_d
eaths
++
;
}
ssize_t
split
Births
()
const
{
return
_splitB
irths
;
}
void
set_split
Births
(
ssize_t
v
)
{
_splitB
irths
=
v
;
}
void
increment_split
Births
()
{
_splitB
irths
++
;
}
ssize_t
split
_births
()
const
{
return
_split_b
irths
;
}
void
set_split
_births
(
ssize_t
v
)
{
_split_b
irths
=
v
;
}
void
increment_split
_births
()
{
_split_b
irths
++
;
}
ssize_t
split
Deaths
()
const
{
return
_splitD
eaths
;
}
void
set_split
Deaths
(
ssize_t
v
)
{
_splitD
eaths
=
v
;
}
void
increment_split
Deaths
()
{
_splitD
eaths
++
;
}
ssize_t
split
_deaths
()
const
{
return
_split_d
eaths
;
}
void
set_split
_deaths
(
ssize_t
v
)
{
_split_d
eaths
=
v
;
}
void
increment_split
_deaths
()
{
_split_d
eaths
++
;
}
NOT_PRODUCT
(
size_t
returned
Bytes
()
const
{
return
_returnedB
ytes
;
}
void
set_returned
Bytes
(
size_t
v
)
{
_returnedB
ytes
=
v
;
}
size_t
returned
_bytes
()
const
{
return
_returned_b
ytes
;
}
void
set_returned
_bytes
(
size_t
v
)
{
_returned_b
ytes
=
v
;
}
)
};
...
...
src/share/vm/
gc_implementation/concurrentMarkSweep
/binaryTreeDictionary.cpp
→
src/share/vm/
memory
/binaryTreeDictionary.cpp
浏览文件 @
7b1f4848
此差异已折叠。
点击以展开。
src/share/vm/
gc_implementation/concurrentMarkSweep
/binaryTreeDictionary.hpp
→
src/share/vm/
memory
/binaryTreeDictionary.hpp
浏览文件 @
7b1f4848
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -22,87 +22,101 @@
*
*/
#ifndef SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_BINARYTREEDICTIONARY_HPP
#define SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_BINARYTREEDICTIONARY_HPP
#ifndef SHARE_VM_
MEMORY
_BINARYTREEDICTIONARY_HPP
#define SHARE_VM_
MEMORY
_BINARYTREEDICTIONARY_HPP
#include "
gc_implementation/concurrentMarkSweep
/freeBlockDictionary.hpp"
#include "
gc_implementation/concurrentMarkSweep
/freeList.hpp"
#include "
memory
/freeBlockDictionary.hpp"
#include "
memory
/freeList.hpp"
/*
* A binary tree based search structure for free blocks.
* This is currently used in the Concurrent Mark&Sweep implementation.
* This is currently used in the Concurrent Mark&Sweep implementation, but
* will be used for free block management for metadata.
*/
// A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists.
class
TreeChunk
;
class
BinaryTreeDictionary
;
class
AscendTreeCensusClosure
;
class
DescendTreeCensusClosure
;
class
DescendTreeSearchClosure
;
template
<
class
Chunk
>
class
TreeChunk
;
template
<
class
Chunk
>
class
BinaryTreeDictionary
;
template
<
class
Chunk
>
class
AscendTreeCensusClosure
;
template
<
class
Chunk
>
class
DescendTreeCensusClosure
;
template
<
class
Chunk
>
class
DescendTreeSearchClosure
;
class
TreeList
:
public
FreeList
{
friend
class
TreeChunk
;
friend
class
BinaryTreeDictionary
;
friend
class
AscendTreeCensusClosure
;
friend
class
DescendTreeCensusClosure
;
friend
class
DescendTreeSearchClosure
;
template
<
class
Chunk
>
class
TreeList
:
public
FreeList
<
Chunk
>
{
friend
class
TreeChunk
<
Chunk
>
;
friend
class
BinaryTreeDictionary
<
Chunk
>
;
friend
class
AscendTreeCensusClosure
<
Chunk
>
;
friend
class
DescendTreeCensusClosure
<
Chunk
>
;
friend
class
DescendTreeSearchClosure
<
Chunk
>
;
TreeList
<
Chunk
>*
_parent
;
TreeList
<
Chunk
>*
_left
;
TreeList
<
Chunk
>*
_right
;
protected:
TreeList
*
parent
()
const
{
return
_parent
;
}
TreeList
*
left
()
const
{
return
_left
;
}
TreeList
*
right
()
const
{
return
_right
;
}
TreeList
<
Chunk
>*
parent
()
const
{
return
_parent
;
}
TreeList
<
Chunk
>*
left
()
const
{
return
_left
;
}
TreeList
<
Chunk
>*
right
()
const
{
return
_right
;
}
// Wrapper on call to base class, to get the template to compile.
Chunk
*
head
()
const
{
return
FreeList
<
Chunk
>::
head
();
}
Chunk
*
tail
()
const
{
return
FreeList
<
Chunk
>::
tail
();
}
void
set_head
(
Chunk
*
head
)
{
FreeList
<
Chunk
>::
set_head
(
head
);
}
void
set_tail
(
Chunk
*
tail
)
{
FreeList
<
Chunk
>::
set_tail
(
tail
);
}
size_t
size
()
const
{
return
FreeList
<
Chunk
>::
size
();
}
// Accessors for links in tree.
void
set
Left
(
TreeList
*
tl
)
{
void
set
_left
(
TreeList
<
Chunk
>
*
tl
)
{
_left
=
tl
;
if
(
tl
!=
NULL
)
tl
->
set
P
arent
(
this
);
tl
->
set
_p
arent
(
this
);
}
void
set
Right
(
TreeList
*
tl
)
{
void
set
_right
(
TreeList
<
Chunk
>
*
tl
)
{
_right
=
tl
;
if
(
tl
!=
NULL
)
tl
->
set
P
arent
(
this
);
tl
->
set
_p
arent
(
this
);
}
void
set
Parent
(
TreeList
*
tl
)
{
_parent
=
tl
;
}
void
set
_parent
(
TreeList
<
Chunk
>
*
tl
)
{
_parent
=
tl
;
}
void
clearLeft
()
{
_left
=
NULL
;
}
void
clear
R
ight
()
{
_right
=
NULL
;
}
void
clear
P
arent
()
{
_parent
=
NULL
;
}
void
initialize
()
{
clearLeft
();
clear
Right
(),
clearP
arent
();
}
void
clear
_r
ight
()
{
_right
=
NULL
;
}
void
clear
_p
arent
()
{
_parent
=
NULL
;
}
void
initialize
()
{
clearLeft
();
clear
_right
(),
clear_p
arent
();
}
// For constructing a TreeList from a Tree chunk or
// address and size.
static
TreeList
*
as_TreeList
(
TreeChunk
*
tc
);
static
TreeList
*
as_TreeList
(
HeapWord
*
addr
,
size_t
size
);
static
TreeList
<
Chunk
>*
as_TreeList
(
TreeChunk
<
Chunk
>
*
tc
);
static
TreeList
<
Chunk
>
*
as_TreeList
(
HeapWord
*
addr
,
size_t
size
);
// Returns the head of the free list as a pointer to a TreeChunk.
TreeChunk
*
head_as_TreeChunk
();
TreeChunk
<
Chunk
>
*
head_as_TreeChunk
();
// Returns the first available chunk in the free list as a pointer
// to a TreeChunk.
TreeChunk
*
first_available
();
TreeChunk
<
Chunk
>
*
first_available
();
// Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive,
// use with caution!
TreeChunk
*
largest_address
();
TreeChunk
<
Chunk
>
*
largest_address
();
// remove
ChunkReplaceIfN
eeded() removes the given "tc" from the TreeList.
// remove
_chunk_replace_if_n
eeded() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
// TreeList that is the node in the tree. remove
ChunkReplaceIfN
eeded()
// TreeList that is the node in the tree. remove
_chunk_replace_if_n
eeded()
// returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original
// node to point to the new node.
TreeList
*
removeChunkReplaceIfNeeded
(
TreeChunk
*
tc
);
TreeList
<
Chunk
>*
remove_chunk_replace_if_needed
(
TreeChunk
<
Chunk
>
*
tc
);
// See FreeList.
void
return
ChunkAtHead
(
TreeChunk
*
tc
);
void
return
ChunkAtTail
(
TreeChunk
*
tc
);
void
return
_chunk_at_head
(
TreeChunk
<
Chunk
>
*
tc
);
void
return
_chunk_at_tail
(
TreeChunk
<
Chunk
>
*
tc
);
};
// A TreeChunk is a subclass of a
Free
Chunk that additionally
// A TreeChunk is a subclass of a Chunk that additionally
// maintains a pointer to the free list on which it is currently
// linked.
// A TreeChunk is also used as a node in the binary tree. This
...
...
@@ -115,92 +129,111 @@ class TreeList: public FreeList {
// on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list.
class
TreeChunk
:
public
FreeChunk
{
friend
class
TreeList
;
TreeList
*
_list
;
TreeList
_embedded_list
;
// if non-null, this chunk is on _list
template
<
class
Chunk
>
class
TreeChunk
:
public
Chunk
{
friend
class
TreeList
<
Chunk
>
;
TreeList
<
Chunk
>*
_list
;
TreeList
<
Chunk
>
_embedded_list
;
// if non-null, this chunk is on _list
protected:
TreeList
*
embedded_list
()
const
{
return
(
TreeList
*
)
&
_embedded_list
;
}
void
set_embedded_list
(
TreeList
*
v
)
{
_embedded_list
=
*
v
;
}
TreeList
<
Chunk
>*
embedded_list
()
const
{
return
(
TreeList
<
Chunk
>
*
)
&
_embedded_list
;
}
void
set_embedded_list
(
TreeList
<
Chunk
>
*
v
)
{
_embedded_list
=
*
v
;
}
public:
TreeList
*
list
()
{
return
_list
;
}
void
set_list
(
TreeList
*
v
)
{
_list
=
v
;
}
static
TreeChunk
*
as_TreeChunk
(
Free
Chunk
*
fc
);
TreeList
<
Chunk
>
*
list
()
{
return
_list
;
}
void
set_list
(
TreeList
<
Chunk
>
*
v
)
{
_list
=
v
;
}
static
TreeChunk
<
Chunk
>*
as_TreeChunk
(
Chunk
*
fc
);
// Initialize fields in a TreeChunk that should be
// initialized when the TreeChunk is being added to
// a free list in the tree.
void
initialize
()
{
embedded_list
()
->
initialize
();
}
Chunk
*
next
()
const
{
return
Chunk
::
next
();
}
Chunk
*
prev
()
const
{
return
Chunk
::
prev
();
}
size_t
size
()
const
volatile
{
return
Chunk
::
size
();
}
// debugging
void
verify
TreeChunkL
ist
()
const
;
void
verify
_tree_chunk_l
ist
()
const
;
};
const
size_t
MIN_TREE_CHUNK_SIZE
=
sizeof
(
TreeChunk
)
/
HeapWordSize
;
class
BinaryTreeDictionary
:
public
FreeBlockDictionary
{
template
<
class
Chunk
>
class
BinaryTreeDictionary
:
public
FreeBlockDictionary
<
Chunk
>
{
friend
class
VMStructs
;
bool
_splay
;
size_t
_totalSize
;
size_t
_totalFreeBlocks
;
TreeList
*
_root
;
size_t
_total_size
;
size_t
_total_free_blocks
;
TreeList
<
Chunk
>*
_root
;
bool
_adaptive_freelists
;
// private accessors
bool
splay
()
const
{
return
_splay
;
}
void
set_splay
(
bool
v
)
{
_splay
=
v
;
}
size_t
totalSize
()
const
{
return
_totalSize
;
}
void
set_totalSize
(
size_t
v
)
{
_totalSize
=
v
;
}
virtual
void
inc_totalSize
(
size_t
v
);
virtual
void
dec_totalSize
(
size_t
v
);
size_t
totalFreeBlocks
()
const
{
return
_totalFreeBlocks
;
}
void
set_totalFreeBlocks
(
size_t
v
)
{
_totalFreeBlocks
=
v
;
}
TreeList
*
root
()
const
{
return
_root
;
}
void
set_root
(
TreeList
*
v
)
{
_root
=
v
;
}
void
set_total_size
(
size_t
v
)
{
_total_size
=
v
;
}
virtual
void
inc_total_size
(
size_t
v
);
virtual
void
dec_total_size
(
size_t
v
);
size_t
total_free_blocks
()
const
{
return
_total_free_blocks
;
}
void
set_total_free_blocks
(
size_t
v
)
{
_total_free_blocks
=
v
;
}
TreeList
<
Chunk
>*
root
()
const
{
return
_root
;
}
void
set_root
(
TreeList
<
Chunk
>*
v
)
{
_root
=
v
;
}
bool
adaptive_freelists
()
{
return
_adaptive_freelists
;
}
// This field is added and can be set to point to the
// the Mutex used to synchronize access to the
// dictionary so that assertion checking can be done.
// For example it is set to point to _parDictionaryAllocLock.
NOT_PRODUCT
(
Mutex
*
_lock
;)
// Remove a chunk of size "size" or larger from the tree and
// return it. If the chunk
// is the last chunk of that size, remove the node for that size
// from the tree.
TreeChunk
*
getChunkFromTree
(
size_t
size
,
Dither
dither
,
bool
splay
);
TreeChunk
<
Chunk
>*
get_chunk_from_tree
(
size_t
size
,
enum
FreeBlockDictionary
<
Chunk
>::
Dither
dither
,
bool
splay
);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList
*
findL
ist
(
size_t
size
)
const
;
TreeList
<
Chunk
>*
find_l
ist
(
size_t
size
)
const
;
// Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list.
TreeChunk
*
removeChunkFromTree
(
TreeChunk
*
tc
);
TreeChunk
<
Chunk
>*
remove_chunk_from_tree
(
TreeChunk
<
Chunk
>
*
tc
);
// Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed.
TreeList
*
removeTreeMinimum
(
TreeList
*
tl
);
void
semi
SplayStep
(
TreeList
*
tl
);
TreeList
<
Chunk
>*
remove_tree_minimum
(
TreeList
<
Chunk
>
*
tl
);
void
semi
_splay_step
(
TreeList
<
Chunk
>
*
tl
);
// Add this free chunk to the tree.
void
insert
ChunkInTree
(
Free
Chunk
*
freeChunk
);
void
insert
_chunk_in_tree
(
Chunk
*
freeChunk
);
public:
void
verifyTree
()
const
;
static
const
size_t
min_tree_chunk_size
=
sizeof
(
TreeChunk
<
Chunk
>
)
/
HeapWordSize
;
void
verify_tree
()
const
;
// verify that the given chunk is in the tree.
bool
verify
ChunkInFreeLists
(
Free
Chunk
*
tc
)
const
;
bool
verify
_chunk_in_free_list
(
Chunk
*
tc
)
const
;
private:
void
verify
TreeHelper
(
TreeList
*
tl
)
const
;
static
size_t
verify
PrevFreePtrs
(
TreeList
*
tl
);
void
verify
_tree_helper
(
TreeList
<
Chunk
>
*
tl
)
const
;
static
size_t
verify
_prev_free_ptrs
(
TreeList
<
Chunk
>
*
tl
);
// Returns the total number of chunks in the list.
size_t
total
ListLength
(
TreeList
*
tl
)
const
;
size_t
total
_list_length
(
TreeList
<
Chunk
>
*
tl
)
const
;
// Returns the total number of words in the chunks in the tree
// starting at "tl".
size_t
total
SizeInTree
(
TreeList
*
tl
)
const
;
size_t
total
_size_in_tree
(
TreeList
<
Chunk
>
*
tl
)
const
;
// Returns the sum of the square of the size of each block
// in the tree starting at "tl".
double
sum_of_squared_block_sizes
(
TreeList
*
const
tl
)
const
;
double
sum_of_squared_block_sizes
(
TreeList
<
Chunk
>
*
const
tl
)
const
;
// Returns the total number of free blocks in the tree starting
// at "tl".
size_t
total
FreeBlocksInTree
(
TreeList
*
tl
)
const
;
size_t
num
FreeB
locks
()
const
;
size_t
total
_free_blocks_in_tree
(
TreeList
<
Chunk
>
*
tl
)
const
;
size_t
num
_free_b
locks
()
const
;
size_t
treeHeight
()
const
;
size_t
tree
HeightHelper
(
TreeList
*
tl
)
const
;
size_t
total
NodesInTree
(
TreeList
*
tl
)
const
;
size_t
total
NodesHelper
(
TreeList
*
tl
)
const
;
size_t
tree
_height_helper
(
TreeList
<
Chunk
>
*
tl
)
const
;
size_t
total
_nodes_in_tree
(
TreeList
<
Chunk
>
*
tl
)
const
;
size_t
total
_nodes_helper
(
TreeList
<
Chunk
>
*
tl
)
const
;
public:
// Constructor
BinaryTreeDictionary
(
MemRegion
mr
,
bool
splay
=
false
);
BinaryTreeDictionary
(
bool
adaptive_freelists
,
bool
splay
=
false
);
BinaryTreeDictionary
(
MemRegion
mr
,
bool
adaptive_freelists
,
bool
splay
=
false
);
// Public accessors
size_t
total_size
()
const
{
return
_total_size
;
}
// Reset the dictionary to the initial conditions with
// a single free chunk.
...
...
@@ -212,85 +245,85 @@ class BinaryTreeDictionary: public FreeBlockDictionary {
// Return a chunk of size "size" or greater from
// the tree.
// want a better dynamic splay strategy for the future.
FreeChunk
*
getChunk
(
size_t
size
,
Dither
dither
)
{
verify_par_locked
();
FreeChunk
*
res
=
getChunkFromT
ree
(
size
,
dither
,
splay
());
assert
(
res
==
NULL
||
res
->
is
F
ree
(),
Chunk
*
get_chunk
(
size_t
size
,
enum
FreeBlockDictionary
<
Chunk
>::
Dither
dither
)
{
FreeBlockDictionary
<
Chunk
>::
verify_par_locked
();
Chunk
*
res
=
get_chunk_from_t
ree
(
size
,
dither
,
splay
());
assert
(
res
==
NULL
||
res
->
is
_f
ree
(),
"Should be returning a free chunk"
);
return
res
;
}
void
return
Chunk
(
Free
Chunk
*
chunk
)
{
verify_par_locked
();
insert
ChunkInT
ree
(
chunk
);
void
return
_chunk
(
Chunk
*
chunk
)
{
FreeBlockDictionary
<
Chunk
>::
verify_par_locked
();
insert
_chunk_in_t
ree
(
chunk
);
}
void
remove
Chunk
(
Free
Chunk
*
chunk
)
{
verify_par_locked
();
remove
ChunkFromTree
((
TreeChunk
*
)
chunk
);
assert
(
chunk
->
is
F
ree
(),
"Should still be a free chunk"
);
void
remove
_chunk
(
Chunk
*
chunk
)
{
FreeBlockDictionary
<
Chunk
>::
verify_par_locked
();
remove
_chunk_from_tree
((
TreeChunk
<
Chunk
>
*
)
chunk
);
assert
(
chunk
->
is
_f
ree
(),
"Should still be a free chunk"
);
}
size_t
max
ChunkS
ize
()
const
;
size_t
total
ChunkS
ize
(
debug_only
(
const
Mutex
*
lock
))
const
{
size_t
max
_chunk_s
ize
()
const
;
size_t
total
_chunk_s
ize
(
debug_only
(
const
Mutex
*
lock
))
const
{
debug_only
(
if
(
lock
!=
NULL
&&
lock
->
owned_by_self
())
{
assert
(
total
SizeInTree
(
root
())
==
totalS
ize
(),
"_total
S
ize inconsistency"
);
assert
(
total
_size_in_tree
(
root
())
==
total_s
ize
(),
"_total
_s
ize inconsistency"
);
}
)
return
total
S
ize
();
return
total
_s
ize
();
}
size_t
min
S
ize
()
const
{
return
MIN_TREE_CHUNK_SIZE
;
size_t
min
_s
ize
()
const
{
return
min_tree_chunk_size
;
}
double
sum_of_squared_block_sizes
()
const
{
return
sum_of_squared_block_sizes
(
root
());
}
Free
Chunk
*
find_chunk_ends_at
(
HeapWord
*
target
)
const
;
Chunk
*
find_chunk_ends_at
(
HeapWord
*
target
)
const
;
// Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed).
void
dict
CensusUpd
ate
(
size_t
size
,
bool
split
,
bool
birth
);
void
dict
_census_udp
ate
(
size_t
size
,
bool
split
,
bool
birth
);
// Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size".
bool
coal
DictOverP
opulated
(
size_t
size
);
bool
coal
_dict_over_p
opulated
(
size_t
size
);
// Methods called at the beginning of a sweep to prepare the
// statistics for the sweep.
void
begin
SweepDictC
ensus
(
double
coalSurplusPercent
,
void
begin
_sweep_dict_c
ensus
(
double
coalSurplusPercent
,
float
inter_sweep_current
,
float
inter_sweep_estimate
,
float
intra_sweep_estimate
);
// Methods called after the end of a sweep to modify the
// statistics for the sweep.
void
end
SweepDictC
ensus
(
double
splitSurplusPercent
);
void
end
_sweep_dict_c
ensus
(
double
splitSurplusPercent
);
// Return the largest free chunk in the tree.
FreeChunk
*
findLargestD
ict
()
const
;
Chunk
*
find_largest_d
ict
()
const
;
// Accessors for statistics
void
set
TreeS
urplus
(
double
splitSurplusPercent
);
void
set
TreeH
ints
(
void
);
void
set
_tree_s
urplus
(
double
splitSurplusPercent
);
void
set
_tree_h
ints
(
void
);
// Reset statistics for all the lists in the tree.
void
clear
TreeC
ensus
(
void
);
void
clear
_tree_c
ensus
(
void
);
// Print the statistcis for all the lists in the tree. Also may
// print out summaries.
void
print
DictC
ensus
(
void
)
const
;
void
print
_dict_c
ensus
(
void
)
const
;
void
print_free_lists
(
outputStream
*
st
)
const
;
// For debugging. Returns the sum of the _returned
B
ytes for
// For debugging. Returns the sum of the _returned
_b
ytes for
// all lists in the tree.
size_t
sum
DictReturnedB
ytes
()
PRODUCT_RETURN0
;
// Sets the _returned
B
ytes for all the lists in the tree to zero.
void
initialize
DictReturnedB
ytes
()
PRODUCT_RETURN
;
size_t
sum
_dict_returned_b
ytes
()
PRODUCT_RETURN0
;
// Sets the _returned
_b
ytes for all the lists in the tree to zero.
void
initialize
_dict_returned_b
ytes
()
PRODUCT_RETURN
;
// For debugging. Return the total number of chunks in the dictionary.
size_t
total
C
ount
()
PRODUCT_RETURN0
;
size_t
total
_c
ount
()
PRODUCT_RETURN0
;
void
report
S
tatistics
()
const
;
void
report
_s
tatistics
()
const
;
void
verify
()
const
;
};
#endif // SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_BINARYTREEDICTIONARY_HPP
#endif // SHARE_VM_
MEMORY
_BINARYTREEDICTIONARY_HPP
src/share/vm/
gc_implementation/concurrentMarkSweep
/freeBlockDictionary.cpp
→
src/share/vm/
memory
/freeBlockDictionary.cpp
浏览文件 @
7b1f4848
...
...
@@ -23,7 +23,10 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC
#include "memory/freeBlockDictionary.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
...
...
@@ -38,19 +41,19 @@
#endif
#ifndef PRODUCT
Mutex
*
FreeBlockDictionary
::
par_lock
()
const
{
template
<
class
Chunk
>
Mutex
*
FreeBlockDictionary
<
Chunk
>
::
par_lock
()
const
{
return
_lock
;
}
void
FreeBlockDictionary
::
set_par_lock
(
Mutex
*
lock
)
{
template
<
class
Chunk
>
void
FreeBlockDictionary
<
Chunk
>
::
set_par_lock
(
Mutex
*
lock
)
{
_lock
=
lock
;
}
void
FreeBlockDictionary
::
verify_par_locked
()
const
{
template
<
class
Chunk
>
void
FreeBlockDictionary
<
Chunk
>
::
verify_par_locked
()
const
{
#ifdef ASSERT
if
(
ParallelGCThreads
>
0
)
{
Thread
*
my
T
hread
=
Thread
::
current
();
if
(
my
T
hread
->
is_GC_task_thread
())
{
Thread
*
my
_t
hread
=
Thread
::
current
();
if
(
my
_t
hread
->
is_GC_task_thread
())
{
assert
(
par_lock
()
!=
NULL
,
"Should be using locking?"
);
assert_lock_strong
(
par_lock
());
}
...
...
@@ -58,3 +61,8 @@ void FreeBlockDictionary::verify_par_locked() const {
#endif // ASSERT
}
#endif
#ifndef SERIALGC
// Explicitly instantiate for FreeChunk
template
class
FreeBlockDictionary
<
FreeChunk
>;
#endif // SERIALGC
src/share/vm/
gc_implementation/concurrentMarkSweep
/freeBlockDictionary.hpp
→
src/share/vm/
memory
/freeBlockDictionary.hpp
浏览文件 @
7b1f4848
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -22,12 +22,10 @@
*
*/
#ifndef SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREEBLOCKDICTIONARY_HPP
#define SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREEBLOCKDICTIONARY_HPP
#ifndef SHARE_VM_
MEMORY
_FREEBLOCKDICTIONARY_HPP
#define SHARE_VM_
MEMORY
_FREEBLOCKDICTIONARY_HPP
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/mutex.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
...
...
@@ -35,6 +33,7 @@
// A FreeBlockDictionary is an abstract superclass that will allow
// a number of alternative implementations in the future.
template
<
class
Chunk
>
class
FreeBlockDictionary
:
public
CHeapObj
{
public:
enum
Dither
{
...
...
@@ -52,45 +51,45 @@ class FreeBlockDictionary: public CHeapObj {
NOT_PRODUCT
(
Mutex
*
_lock
;)
public:
virtual
void
remove
Chunk
(
Free
Chunk
*
fc
)
=
0
;
virtual
FreeChunk
*
getC
hunk
(
size_t
size
,
Dither
dither
=
atLeast
)
=
0
;
virtual
void
return
Chunk
(
Free
Chunk
*
chunk
)
=
0
;
virtual
size_t
total
ChunkS
ize
(
debug_only
(
const
Mutex
*
lock
))
const
=
0
;
virtual
size_t
max
ChunkS
ize
()
const
=
0
;
virtual
size_t
min
S
ize
()
const
=
0
;
virtual
void
remove
_chunk
(
Chunk
*
fc
)
=
0
;
virtual
Chunk
*
get_c
hunk
(
size_t
size
,
Dither
dither
=
atLeast
)
=
0
;
virtual
void
return
_chunk
(
Chunk
*
chunk
)
=
0
;
virtual
size_t
total
_chunk_s
ize
(
debug_only
(
const
Mutex
*
lock
))
const
=
0
;
virtual
size_t
max
_chunk_s
ize
()
const
=
0
;
virtual
size_t
min
_s
ize
()
const
=
0
;
// Reset the dictionary to the initial conditions for a single
// block.
virtual
void
reset
(
HeapWord
*
addr
,
size_t
size
)
=
0
;
virtual
void
reset
()
=
0
;
virtual
void
dict
CensusUpd
ate
(
size_t
size
,
bool
split
,
bool
birth
)
=
0
;
virtual
bool
coal
DictOverP
opulated
(
size_t
size
)
=
0
;
virtual
void
begin
SweepDictC
ensus
(
double
coalSurplusPercent
,
virtual
void
dict
_census_udp
ate
(
size_t
size
,
bool
split
,
bool
birth
)
=
0
;
virtual
bool
coal
_dict_over_p
opulated
(
size_t
size
)
=
0
;
virtual
void
begin
_sweep_dict_c
ensus
(
double
coalSurplusPercent
,
float
inter_sweep_current
,
float
inter_sweep_estimate
,
float
intra__sweep_current
)
=
0
;
virtual
void
end
SweepDictC
ensus
(
double
splitSurplusPercent
)
=
0
;
virtual
FreeChunk
*
findLargestD
ict
()
const
=
0
;
virtual
void
end
_sweep_dict_c
ensus
(
double
splitSurplusPercent
)
=
0
;
virtual
Chunk
*
find_largest_d
ict
()
const
=
0
;
// verify that the given chunk is in the dictionary.
virtual
bool
verify
ChunkInFreeLists
(
Free
Chunk
*
tc
)
const
=
0
;
virtual
bool
verify
_chunk_in_free_list
(
Chunk
*
tc
)
const
=
0
;
// Sigma_{all_free_blocks} (block_size^2)
virtual
double
sum_of_squared_block_sizes
()
const
=
0
;
virtual
Free
Chunk
*
find_chunk_ends_at
(
HeapWord
*
target
)
const
=
0
;
virtual
void
inc_total
S
ize
(
size_t
v
)
=
0
;
virtual
void
dec_total
S
ize
(
size_t
v
)
=
0
;
virtual
Chunk
*
find_chunk_ends_at
(
HeapWord
*
target
)
const
=
0
;
virtual
void
inc_total
_s
ize
(
size_t
v
)
=
0
;
virtual
void
dec_total
_s
ize
(
size_t
v
)
=
0
;
NOT_PRODUCT
(
virtual
size_t
sum
DictReturnedB
ytes
()
=
0
;
virtual
void
initialize
DictReturnedB
ytes
()
=
0
;
virtual
size_t
total
C
ount
()
=
0
;
virtual
size_t
sum
_dict_returned_b
ytes
()
=
0
;
virtual
void
initialize
_dict_returned_b
ytes
()
=
0
;
virtual
size_t
total
_c
ount
()
=
0
;
)
virtual
void
report
S
tatistics
()
const
{
virtual
void
report
_s
tatistics
()
const
{
gclog_or_tty
->
print
(
"No statistics available"
);
}
virtual
void
print
DictC
ensus
()
const
=
0
;
virtual
void
print
_dict_c
ensus
()
const
=
0
;
virtual
void
print_free_lists
(
outputStream
*
st
)
const
=
0
;
virtual
void
verify
()
const
=
0
;
...
...
@@ -100,4 +99,4 @@ class FreeBlockDictionary: public CHeapObj {
void
verify_par_locked
()
const
PRODUCT_RETURN
;
};
#endif // SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREEBLOCKDICTIONARY_HPP
#endif // SHARE_VM_
MEMORY
_FREEBLOCKDICTIONARY_HPP
src/share/vm/
gc_implementation/concurrentMarkSweep
/freeList.cpp
→
src/share/vm/
memory
/freeList.cpp
浏览文件 @
7b1f4848
...
...
@@ -23,20 +23,25 @@
*/
#include "precompiled.hpp"
#include "
gc_implementation/concurrentMarkSweep
/freeBlockDictionary.hpp"
#include "
gc_implementation/concurrentMarkSweep
/freeList.hpp"
#include "
memory
/freeBlockDictionary.hpp"
#include "
memory
/freeList.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC
// Free list. A FreeList is used to access a linked list of chunks
// of space in the heap. The head and tail are maintained so that
// items can be (as in the current implementation) added at the
// at the tail of the list and removed from the head of the list to
// maintain a FIFO queue.
FreeList
::
FreeList
()
:
template
<
class
Chunk
>
FreeList
<
Chunk
>::
FreeList
()
:
_head
(
NULL
),
_tail
(
NULL
)
#ifdef ASSERT
,
_protecting_lock
(
NULL
)
...
...
@@ -48,7 +53,8 @@ FreeList::FreeList() :
init_statistics
();
}
FreeList
::
FreeList
(
FreeChunk
*
fc
)
:
template
<
class
Chunk
>
FreeList
<
Chunk
>::
FreeList
(
Chunk
*
fc
)
:
_head
(
fc
),
_tail
(
fc
)
#ifdef ASSERT
,
_protecting_lock
(
NULL
)
...
...
@@ -59,48 +65,35 @@ FreeList::FreeList(FreeChunk* fc) :
_hint
=
0
;
init_statistics
();
#ifndef PRODUCT
_allocation_stats
.
set_returnedBytes
(
size
()
*
HeapWordSize
);
#endif
}
FreeList
::
FreeList
(
HeapWord
*
addr
,
size_t
size
)
:
_head
((
FreeChunk
*
)
addr
),
_tail
((
FreeChunk
*
)
addr
)
#ifdef ASSERT
,
_protecting_lock
(
NULL
)
#endif
{
assert
(
size
>
sizeof
(
FreeChunk
),
"size is too small"
);
head
()
->
setSize
(
size
);
_size
=
size
;
_count
=
1
;
init_statistics
();
#ifndef PRODUCT
_allocation_stats
.
set_returnedBytes
(
_size
*
HeapWordSize
);
_allocation_stats
.
set_returned_bytes
(
size
()
*
HeapWordSize
);
#endif
}
void
FreeList
::
reset
(
size_t
hint
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
reset
(
size_t
hint
)
{
set_count
(
0
);
set_head
(
NULL
);
set_tail
(
NULL
);
set_hint
(
hint
);
}
void
FreeList
::
init_statistics
(
bool
split_birth
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
init_statistics
(
bool
split_birth
)
{
_allocation_stats
.
initialize
(
split_birth
);
}
FreeChunk
*
FreeList
::
getChunkAtHead
()
{
template
<
class
Chunk
>
Chunk
*
FreeList
<
Chunk
>::
get_chunk_at_head
()
{
assert_proper_lock_protection
();
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
Free
Chunk
*
fc
=
head
();
Chunk
*
fc
=
head
();
if
(
fc
!=
NULL
)
{
Free
Chunk
*
nextFC
=
fc
->
next
();
Chunk
*
nextFC
=
fc
->
next
();
if
(
nextFC
!=
NULL
)
{
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC
->
link
P
rev
(
NULL
);
nextFC
->
link
_p
rev
(
NULL
);
}
else
{
// removed tail of list
link_tail
(
NULL
);
}
...
...
@@ -113,29 +106,30 @@ FreeChunk* FreeList::getChunkAtHead() {
}
void
FreeList
::
getFirstNChunksFromList
(
size_t
n
,
FreeList
*
fl
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
getFirstNChunksFromList
(
size_t
n
,
FreeList
<
Chunk
>*
fl
)
{
assert_proper_lock_protection
();
assert
(
fl
->
count
()
==
0
,
"Precondition"
);
if
(
count
()
>
0
)
{
int
k
=
1
;
fl
->
set_head
(
head
());
n
--
;
Free
Chunk
*
tl
=
head
();
Chunk
*
tl
=
head
();
while
(
tl
->
next
()
!=
NULL
&&
n
>
0
)
{
tl
=
tl
->
next
();
n
--
;
k
++
;
}
assert
(
tl
!=
NULL
,
"Loop Inv."
);
// First, fix up the list we took from.
Free
Chunk
*
new_head
=
tl
->
next
();
Chunk
*
new_head
=
tl
->
next
();
set_head
(
new_head
);
set_count
(
count
()
-
k
);
if
(
new_head
==
NULL
)
{
set_tail
(
NULL
);
}
else
{
new_head
->
link
P
rev
(
NULL
);
new_head
->
link
_p
rev
(
NULL
);
}
// Now we can fix up the tail.
tl
->
link
N
ext
(
NULL
);
tl
->
link
_n
ext
(
NULL
);
// And return the result.
fl
->
set_tail
(
tl
);
fl
->
set_count
(
k
);
...
...
@@ -143,7 +137,8 @@ void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
}
// Remove this chunk from the list
void
FreeList
::
removeChunk
(
FreeChunk
*
fc
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
remove_chunk
(
Chunk
*
fc
)
{
assert_proper_lock_protection
();
assert
(
head
()
!=
NULL
,
"Remove from empty list"
);
assert
(
fc
!=
NULL
,
"Remove a NULL chunk"
);
...
...
@@ -151,12 +146,12 @@ void FreeList::removeChunk(FreeChunk*fc) {
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
Free
Chunk
*
prevFC
=
fc
->
prev
();
Free
Chunk
*
nextFC
=
fc
->
next
();
Chunk
*
prevFC
=
fc
->
prev
();
Chunk
*
nextFC
=
fc
->
next
();
if
(
nextFC
!=
NULL
)
{
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC
->
link
P
rev
(
prevFC
);
nextFC
->
link
_p
rev
(
prevFC
);
}
else
{
// removed tail of list
link_tail
(
prevFC
);
}
...
...
@@ -165,7 +160,7 @@ void FreeList::removeChunk(FreeChunk*fc) {
assert
(
nextFC
==
NULL
||
nextFC
->
prev
()
==
NULL
,
"Prev of head should be NULL"
);
}
else
{
prevFC
->
link
N
ext
(
nextFC
);
prevFC
->
link
_n
ext
(
nextFC
);
assert
(
tail
()
!=
prevFC
||
prevFC
->
next
()
==
NULL
,
"Next of tail should be NULL"
);
}
...
...
@@ -174,10 +169,10 @@ void FreeList::removeChunk(FreeChunk*fc) {
"H/T/C Inconsistency"
);
// clear next and prev fields of fc, debug only
NOT_PRODUCT
(
fc
->
link
P
rev
(
NULL
);
fc
->
link
N
ext
(
NULL
);
fc
->
link
_p
rev
(
NULL
);
fc
->
link
_n
ext
(
NULL
);
)
assert
(
fc
->
is
F
ree
(),
"Should still be a free chunk"
);
assert
(
fc
->
is
_f
ree
(),
"Should still be a free chunk"
);
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
assert
(
head
()
==
NULL
||
head
()
->
size
()
==
size
(),
"wrong item on list"
);
...
...
@@ -185,16 +180,17 @@ void FreeList::removeChunk(FreeChunk*fc) {
}
// Add this chunk at the head of the list.
void
FreeList
::
returnChunkAtHead
(
FreeChunk
*
chunk
,
bool
record_return
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
return_chunk_at_head
(
Chunk
*
chunk
,
bool
record_return
)
{
assert_proper_lock_protection
();
assert
(
chunk
!=
NULL
,
"insert a NULL chunk"
);
assert
(
size
()
==
chunk
->
size
(),
"Wrong size"
);
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
Free
Chunk
*
oldHead
=
head
();
Chunk
*
oldHead
=
head
();
assert
(
chunk
!=
oldHead
,
"double insertion"
);
chunk
->
link
A
fter
(
oldHead
);
chunk
->
link
_a
fter
(
oldHead
);
link_head
(
chunk
);
if
(
oldHead
==
NULL
)
{
// only chunk in list
assert
(
tail
()
==
NULL
,
"inconsistent FreeList"
);
...
...
@@ -203,7 +199,7 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
increment_count
();
// of # of chunks in list
DEBUG_ONLY
(
if
(
record_return
)
{
increment_returned
B
ytes_by
(
size
()
*
HeapWordSize
);
increment_returned
_b
ytes_by
(
size
()
*
HeapWordSize
);
}
)
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
...
...
@@ -212,23 +208,25 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
assert
(
tail
()
==
NULL
||
tail
()
->
size
()
==
size
(),
"wrong item on list"
);
}
void
FreeList
::
returnChunkAtHead
(
FreeChunk
*
chunk
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
return_chunk_at_head
(
Chunk
*
chunk
)
{
assert_proper_lock_protection
();
return
ChunkAtH
ead
(
chunk
,
true
);
return
_chunk_at_h
ead
(
chunk
,
true
);
}
// Add this chunk at the tail of the list.
void
FreeList
::
returnChunkAtTail
(
FreeChunk
*
chunk
,
bool
record_return
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
return_chunk_at_tail
(
Chunk
*
chunk
,
bool
record_return
)
{
assert_proper_lock_protection
();
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
assert
(
chunk
!=
NULL
,
"insert a NULL chunk"
);
assert
(
size
()
==
chunk
->
size
(),
"wrong size"
);
Free
Chunk
*
oldTail
=
tail
();
Chunk
*
oldTail
=
tail
();
assert
(
chunk
!=
oldTail
,
"double insertion"
);
if
(
oldTail
!=
NULL
)
{
oldTail
->
link
A
fter
(
chunk
);
oldTail
->
link
_a
fter
(
chunk
);
}
else
{
// only chunk in list
assert
(
head
()
==
NULL
,
"inconsistent FreeList"
);
link_head
(
chunk
);
...
...
@@ -237,7 +235,7 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
increment_count
();
// of # of chunks in list
DEBUG_ONLY
(
if
(
record_return
)
{
increment_returned
B
ytes_by
(
size
()
*
HeapWordSize
);
increment_returned
_b
ytes_by
(
size
()
*
HeapWordSize
);
}
)
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
...
...
@@ -246,11 +244,13 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
assert
(
tail
()
==
NULL
||
tail
()
->
size
()
==
size
(),
"wrong item on list"
);
}
void
FreeList
::
returnChunkAtTail
(
FreeChunk
*
chunk
)
{
returnChunkAtTail
(
chunk
,
true
);
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
return_chunk_at_tail
(
Chunk
*
chunk
)
{
return_chunk_at_tail
(
chunk
,
true
);
}
void
FreeList
::
prepend
(
FreeList
*
fl
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
prepend
(
FreeList
<
Chunk
>*
fl
)
{
assert_proper_lock_protection
();
if
(
fl
->
count
()
>
0
)
{
if
(
count
()
==
0
)
{
...
...
@@ -259,11 +259,11 @@ void FreeList::prepend(FreeList* fl) {
set_count
(
fl
->
count
());
}
else
{
// Both are non-empty.
Free
Chunk
*
fl_tail
=
fl
->
tail
();
Free
Chunk
*
this_head
=
head
();
Chunk
*
fl_tail
=
fl
->
tail
();
Chunk
*
this_head
=
head
();
assert
(
fl_tail
->
next
()
==
NULL
,
"Well-formedness of fl"
);
fl_tail
->
link
N
ext
(
this_head
);
this_head
->
link
P
rev
(
fl_tail
);
fl_tail
->
link
_n
ext
(
this_head
);
this_head
->
link
_p
rev
(
fl_tail
);
set_head
(
fl
->
head
());
set_count
(
count
()
+
fl
->
count
());
}
...
...
@@ -273,13 +273,14 @@ void FreeList::prepend(FreeList* fl) {
}
}
// verify
ChunkInFreeLists
() is used to verify that an item is in this free list.
// verify
_chunk_in_free_list
() is used to verify that an item is in this free list.
// It is used as a debugging aid.
bool
FreeList
::
verifyChunkInFreeLists
(
FreeChunk
*
fc
)
const
{
template
<
class
Chunk
>
bool
FreeList
<
Chunk
>::
verify_chunk_in_free_list
(
Chunk
*
fc
)
const
{
// This is an internal consistency check, not part of the check that the
// chunk is in the free lists.
guarantee
(
fc
->
size
()
==
size
(),
"Wrong list is being searched"
);
Free
Chunk
*
curFC
=
head
();
Chunk
*
curFC
=
head
();
while
(
curFC
)
{
// This is an internal consistency check.
guarantee
(
size
()
==
curFC
->
size
(),
"Chunk is in wrong list."
);
...
...
@@ -292,7 +293,8 @@ bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
}
#ifndef PRODUCT
void
FreeList
::
verify_stats
()
const
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
verify_stats
()
const
{
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
...
...
@@ -300,24 +302,25 @@ void FreeList::verify_stats() const {
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert
((
_allocation_stats
.
prev
Sweep
()
+
_allocation_stats
.
splitB
irths
()
+
_allocation_stats
.
coal
B
irths
()
+
1
)
// Total Production Stock + 1
>=
(
_allocation_stats
.
split
Deaths
()
+
_allocation_stats
.
coalD
eaths
()
assert
((
_allocation_stats
.
prev
_sweep
()
+
_allocation_stats
.
split_b
irths
()
+
_allocation_stats
.
coal
_b
irths
()
+
1
)
// Total Production Stock + 1
>=
(
_allocation_stats
.
split
_deaths
()
+
_allocation_stats
.
coal_d
eaths
()
+
(
ssize_t
)
count
()),
// Total Current Stock + depletion
err_msg
(
"FreeList "
PTR_FORMAT
" of size "
SIZE_FORMAT
" violates Conservation Principle: "
"prev
S
weep("
SIZE_FORMAT
")"
" + split
B
irths("
SIZE_FORMAT
")"
" + coal
B
irths("
SIZE_FORMAT
") + 1 >= "
" split
D
eaths("
SIZE_FORMAT
")"
" coal
D
eaths("
SIZE_FORMAT
")"
"prev
_s
weep("
SIZE_FORMAT
")"
" + split
_b
irths("
SIZE_FORMAT
")"
" + coal
_b
irths("
SIZE_FORMAT
") + 1 >= "
" split
_d
eaths("
SIZE_FORMAT
")"
" coal
_d
eaths("
SIZE_FORMAT
")"
" + count("
SSIZE_FORMAT
")"
,
this
,
_size
,
_allocation_stats
.
prev
Sweep
(),
_allocation_stats
.
splitB
irths
(),
_allocation_stats
.
split
Births
(),
_allocation_stats
.
splitD
eaths
(),
_allocation_stats
.
coal
D
eaths
(),
count
()));
this
,
_size
,
_allocation_stats
.
prev
_sweep
(),
_allocation_stats
.
split_b
irths
(),
_allocation_stats
.
split
_births
(),
_allocation_stats
.
split_d
eaths
(),
_allocation_stats
.
coal
_d
eaths
(),
count
()));
}
void
FreeList
::
assert_proper_lock_protection_work
()
const
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
assert_proper_lock_protection_work
()
const
{
assert
(
_protecting_lock
!=
NULL
,
"Don't call this directly"
);
assert
(
ParallelGCThreads
>
0
,
"Don't call this directly"
);
Thread
*
thr
=
Thread
::
current
();
...
...
@@ -334,7 +337,8 @@ void FreeList::assert_proper_lock_protection_work() const {
#endif
// Print the "label line" for free list stats.
void
FreeList
::
print_labels_on
(
outputStream
*
st
,
const
char
*
c
)
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
print_labels_on
(
outputStream
*
st
,
const
char
*
c
)
{
st
->
print
(
"%16s
\t
"
,
c
);
st
->
print
(
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"%14s
\t
"
"
\n
"
,
...
...
@@ -346,7 +350,8 @@ void FreeList::print_labels_on(outputStream* st, const char* c) {
// to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column.
void
FreeList
::
print_on
(
outputStream
*
st
,
const
char
*
c
)
const
{
template
<
class
Chunk
>
void
FreeList
<
Chunk
>::
print_on
(
outputStream
*
st
,
const
char
*
c
)
const
{
if
(
c
!=
NULL
)
{
st
->
print
(
"%16s"
,
c
);
}
else
{
...
...
@@ -355,6 +360,11 @@ void FreeList::print_on(outputStream* st, const char* c) const {
st
->
print
(
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\t
"
SSIZE_FORMAT_W
(
14
)
"
\n
"
,
bfr
Surp
(),
surplus
(),
desired
(),
prevSweep
(),
beforeS
weep
(),
count
(),
coal
Births
(),
coalDeaths
(),
splitBirths
(),
splitD
eaths
());
bfr
_surp
(),
surplus
(),
desired
(),
prev_sweep
(),
before_s
weep
(),
count
(),
coal
_births
(),
coal_deaths
(),
split_births
(),
split_d
eaths
());
}
#ifndef SERIALGC
// Needs to be after the definitions have been seen.
template
class
FreeList
<
FreeChunk
>;
#endif // SERIALGC
src/share/vm/
gc_implementation/concurrentMarkSweep
/freeList.hpp
→
src/share/vm/
memory
/freeList.hpp
浏览文件 @
7b1f4848
...
...
@@ -22,39 +22,36 @@
*
*/
#ifndef SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREELIST_HPP
#define SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREELIST_HPP
#ifndef SHARE_VM_
MEMORY
_FREELIST_HPP
#define SHARE_VM_
MEMORY
_FREELIST_HPP
#include "gc_implementation/shared/allocationStats.hpp"
class
CompactibleFreeListSpace
;
// A class for maintaining a free list of
Free
Chunk's. The FreeList
// A class for maintaining a free list of Chunk's. The FreeList
// maintains a the structure of the list (head, tail, etc.) plus
// statistics for allocations from the list. The links between items
// are not part of FreeList. The statistics are
// used to make decisions about coalescing
Free
Chunk's when they
// used to make decisions about coalescing Chunk's when they
// are swept during collection.
//
// See the corresponding .cpp file for a description of the specifics
// for that implementation.
class
Mutex
;
class
TreeList
;
template
<
class
Chunk
>
class
TreeList
;
template
<
class
Chunk
>
class
PrintTreeCensusClosure
;
template
<
class
Chunk
>
class
FreeList
VALUE_OBJ_CLASS_SPEC
{
friend
class
CompactibleFreeListSpace
;
friend
class
VMStructs
;
friend
class
PrintTreeCensusClosure
;
protected:
TreeList
*
_parent
;
TreeList
*
_left
;
TreeList
*
_right
;
friend
class
PrintTreeCensusClosure
<
Chunk
>
;
private:
FreeChunk
*
_head
;
// Head of list of free chunks
FreeChunk
*
_tail
;
// Tail of list of free chunks
Chunk
*
_head
;
// Head of list of free chunks
Chunk
*
_tail
;
// Tail of list of free chunks
size_t
_size
;
// Size in Heap words of each chunk
ssize_t
_count
;
// Number of entries in list
size_t
_hint
;
// next larger size list with a positive surplus
...
...
@@ -92,10 +89,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Construct a list without any entries.
FreeList
();
// Construct a list with "fc" as the first (and lone) entry in the list.
FreeList
(
FreeChunk
*
fc
);
// Construct a list which will have a FreeChunk at address "addr" and
// of size "size" as the first (and lone) entry in the list.
FreeList
(
HeapWord
*
addr
,
size_t
size
);
FreeList
(
Chunk
*
fc
);
// Reset the head, tail, hint, and count of a free list.
void
reset
(
size_t
hint
);
...
...
@@ -108,43 +102,43 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
#endif
// Accessors.
Free
Chunk
*
head
()
const
{
Chunk
*
head
()
const
{
assert_proper_lock_protection
();
return
_head
;
}
void
set_head
(
Free
Chunk
*
v
)
{
void
set_head
(
Chunk
*
v
)
{
assert_proper_lock_protection
();
_head
=
v
;
assert
(
!
_head
||
_head
->
size
()
==
_size
,
"bad chunk size"
);
}
// Set the head of the list and set the prev field of non-null
// values to NULL.
void
link_head
(
Free
Chunk
*
v
)
{
void
link_head
(
Chunk
*
v
)
{
assert_proper_lock_protection
();
set_head
(
v
);
// If this method is not used (just set the head instead),
// this check can be avoided.
if
(
v
!=
NULL
)
{
v
->
link
P
rev
(
NULL
);
v
->
link
_p
rev
(
NULL
);
}
}
Free
Chunk
*
tail
()
const
{
Chunk
*
tail
()
const
{
assert_proper_lock_protection
();
return
_tail
;
}
void
set_tail
(
Free
Chunk
*
v
)
{
void
set_tail
(
Chunk
*
v
)
{
assert_proper_lock_protection
();
_tail
=
v
;
assert
(
!
_tail
||
_tail
->
size
()
==
_size
,
"bad chunk size"
);
}
// Set the tail of the list and set the next field of non-null
// values to NULL.
void
link_tail
(
Free
Chunk
*
v
)
{
void
link_tail
(
Chunk
*
v
)
{
assert_proper_lock_protection
();
set_tail
(
v
);
if
(
v
!=
NULL
)
{
v
->
clear
N
ext
();
v
->
clear
_n
ext
();
}
}
...
...
@@ -191,12 +185,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
inter_sweep_estimate
,
intra_sweep_estimate
);
}
ssize_t
coal
D
esired
()
const
{
return
_allocation_stats
.
coal
D
esired
();
ssize_t
coal
_d
esired
()
const
{
return
_allocation_stats
.
coal
_d
esired
();
}
void
set_coal
D
esired
(
ssize_t
v
)
{
void
set_coal
_d
esired
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_coal
D
esired
(
v
);
_allocation_stats
.
set_coal
_d
esired
(
v
);
}
ssize_t
surplus
()
const
{
...
...
@@ -215,114 +209,114 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
_allocation_stats
.
decrement_surplus
();
}
ssize_t
bfr
S
urp
()
const
{
return
_allocation_stats
.
bfr
S
urp
();
ssize_t
bfr
_s
urp
()
const
{
return
_allocation_stats
.
bfr
_s
urp
();
}
void
set_bfr
S
urp
(
ssize_t
v
)
{
void
set_bfr
_s
urp
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_bfr
S
urp
(
v
);
_allocation_stats
.
set_bfr
_s
urp
(
v
);
}
ssize_t
prev
S
weep
()
const
{
return
_allocation_stats
.
prev
S
weep
();
ssize_t
prev
_s
weep
()
const
{
return
_allocation_stats
.
prev
_s
weep
();
}
void
set_prev
S
weep
(
ssize_t
v
)
{
void
set_prev
_s
weep
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_prev
S
weep
(
v
);
_allocation_stats
.
set_prev
_s
weep
(
v
);
}
ssize_t
before
S
weep
()
const
{
return
_allocation_stats
.
before
S
weep
();
ssize_t
before
_s
weep
()
const
{
return
_allocation_stats
.
before
_s
weep
();
}
void
set_before
S
weep
(
ssize_t
v
)
{
void
set_before
_s
weep
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_before
S
weep
(
v
);
_allocation_stats
.
set_before
_s
weep
(
v
);
}
ssize_t
coal
B
irths
()
const
{
return
_allocation_stats
.
coal
B
irths
();
ssize_t
coal
_b
irths
()
const
{
return
_allocation_stats
.
coal
_b
irths
();
}
void
set_coal
B
irths
(
ssize_t
v
)
{
void
set_coal
_b
irths
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_coal
B
irths
(
v
);
_allocation_stats
.
set_coal
_b
irths
(
v
);
}
void
increment_coal
B
irths
()
{
void
increment_coal
_b
irths
()
{
assert_proper_lock_protection
();
_allocation_stats
.
increment_coal
B
irths
();
_allocation_stats
.
increment_coal
_b
irths
();
}
ssize_t
coal
D
eaths
()
const
{
return
_allocation_stats
.
coal
D
eaths
();
ssize_t
coal
_d
eaths
()
const
{
return
_allocation_stats
.
coal
_d
eaths
();
}
void
set_coal
D
eaths
(
ssize_t
v
)
{
void
set_coal
_d
eaths
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_coal
D
eaths
(
v
);
_allocation_stats
.
set_coal
_d
eaths
(
v
);
}
void
increment_coal
D
eaths
()
{
void
increment_coal
_d
eaths
()
{
assert_proper_lock_protection
();
_allocation_stats
.
increment_coal
D
eaths
();
_allocation_stats
.
increment_coal
_d
eaths
();
}
ssize_t
split
B
irths
()
const
{
return
_allocation_stats
.
split
B
irths
();
ssize_t
split
_b
irths
()
const
{
return
_allocation_stats
.
split
_b
irths
();
}
void
set_split
B
irths
(
ssize_t
v
)
{
void
set_split
_b
irths
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_split
B
irths
(
v
);
_allocation_stats
.
set_split
_b
irths
(
v
);
}
void
increment_split
B
irths
()
{
void
increment_split
_b
irths
()
{
assert_proper_lock_protection
();
_allocation_stats
.
increment_split
B
irths
();
_allocation_stats
.
increment_split
_b
irths
();
}
ssize_t
split
D
eaths
()
const
{
return
_allocation_stats
.
split
D
eaths
();
ssize_t
split
_d
eaths
()
const
{
return
_allocation_stats
.
split
_d
eaths
();
}
void
set_split
D
eaths
(
ssize_t
v
)
{
void
set_split
_d
eaths
(
ssize_t
v
)
{
assert_proper_lock_protection
();
_allocation_stats
.
set_split
D
eaths
(
v
);
_allocation_stats
.
set_split
_d
eaths
(
v
);
}
void
increment_split
D
eaths
()
{
void
increment_split
_d
eaths
()
{
assert_proper_lock_protection
();
_allocation_stats
.
increment_split
D
eaths
();
_allocation_stats
.
increment_split
_d
eaths
();
}
NOT_PRODUCT
(
// For debugging. The "_returned
B
ytes" in all the lists are summed
// For debugging. The "_returned
_b
ytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t
returned
Bytes
()
const
{
return
_allocation_stats
.
returnedB
ytes
();
}
void
set_returned
Bytes
(
size_t
v
)
{
_allocation_stats
.
set_returnedB
ytes
(
v
);
}
void
increment_returned
B
ytes_by
(
size_t
v
)
{
_allocation_stats
.
set_returned
Bytes
(
_allocation_stats
.
returnedB
ytes
()
+
v
);
size_t
returned
_bytes
()
const
{
return
_allocation_stats
.
returned_b
ytes
();
}
void
set_returned
_bytes
(
size_t
v
)
{
_allocation_stats
.
set_returned_b
ytes
(
v
);
}
void
increment_returned
_b
ytes_by
(
size_t
v
)
{
_allocation_stats
.
set_returned
_bytes
(
_allocation_stats
.
returned_b
ytes
()
+
v
);
}
)
// Unlink head of list and return it. Returns NULL if
// the list is empty.
FreeChunk
*
getChunkAtH
ead
();
Chunk
*
get_chunk_at_h
ead
();
// Remove the first "n" or "count", whichever is smaller, chunks from the
// list, setting "fl", which is required to be empty, to point to them.
void
getFirstNChunksFromList
(
size_t
n
,
FreeList
*
fl
);
void
getFirstNChunksFromList
(
size_t
n
,
FreeList
<
Chunk
>
*
fl
);
// Unlink this chunk from it's free list
void
remove
Chunk
(
Free
Chunk
*
fc
);
void
remove
_chunk
(
Chunk
*
fc
);
// Add this chunk to this free list.
void
return
ChunkAtHead
(
Free
Chunk
*
fc
);
void
return
ChunkAtTail
(
Free
Chunk
*
fc
);
void
return
_chunk_at_head
(
Chunk
*
fc
);
void
return
_chunk_at_tail
(
Chunk
*
fc
);
// Similar to returnChunk* but also records some diagnostic
// information.
void
return
ChunkAtHead
(
Free
Chunk
*
fc
,
bool
record_return
);
void
return
ChunkAtTail
(
Free
Chunk
*
fc
,
bool
record_return
);
void
return
_chunk_at_head
(
Chunk
*
fc
,
bool
record_return
);
void
return
_chunk_at_tail
(
Chunk
*
fc
,
bool
record_return
);
// Prepend "fl" (whose size is required to be the same as that of "this")
// to the front of "this" list.
void
prepend
(
FreeList
*
fl
);
void
prepend
(
FreeList
<
Chunk
>
*
fl
);
// Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found.
bool
verify
ChunkInFreeLists
(
Free
Chunk
*
fc
)
const
;
bool
verify
_chunk_in_free_list
(
Chunk
*
fc
)
const
;
// Stats verification
void
verify_stats
()
const
PRODUCT_RETURN
;
...
...
@@ -332,4 +326,4 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
void
print_on
(
outputStream
*
st
,
const
char
*
c
=
NULL
)
const
;
};
#endif // SHARE_VM_
GC_IMPLEMENTATION_CONCURRENTMARKSWEEP
_FREELIST_HPP
#endif // SHARE_VM_
MEMORY
_FREELIST_HPP
src/share/vm/memory/generationSpec.cpp
浏览文件 @
7b1f4848
...
...
@@ -68,7 +68,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
ConcurrentMarkSweepGeneration
*
g
=
NULL
;
g
=
new
ConcurrentMarkSweepGeneration
(
rs
,
init_size
(),
level
,
ctrs
,
UseCMSAdaptiveFreeLists
,
(
FreeBlockDictionary
::
DictionaryChoice
)
CMSDictionaryChoice
);
(
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
)
CMSDictionaryChoice
);
g
->
initialize_performance_counters
();
...
...
@@ -88,7 +88,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
ASConcurrentMarkSweepGeneration
*
g
=
NULL
;
g
=
new
ASConcurrentMarkSweepGeneration
(
rs
,
init_size
(),
level
,
ctrs
,
UseCMSAdaptiveFreeLists
,
(
FreeBlockDictionary
::
DictionaryChoice
)
CMSDictionaryChoice
);
(
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
)
CMSDictionaryChoice
);
g
->
initialize_performance_counters
();
...
...
@@ -175,7 +175,7 @@ PermGen* PermanentGenerationSpec::init(ReservedSpace rs,
}
// XXXPERM
return
new
CMSPermGen
(
perm_rs
,
init_size
,
ctrs
,
(
FreeBlockDictionary
::
DictionaryChoice
)
CMSDictionaryChoice
);
(
FreeBlockDictionary
<
FreeChunk
>
::
DictionaryChoice
)
CMSDictionaryChoice
);
}
#endif // SERIALGC
default:
...
...
src/share/vm/precompiled/precompiled.hpp
浏览文件 @
7b1f4848
...
...
@@ -293,13 +293,10 @@
# include "c1/c1_globals.hpp"
#endif // COMPILER1
#ifndef SERIALGC
# include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
# include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
# include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
# include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
# include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
# include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
# include "gc_implementation/concurrentMarkSweep/freeList.hpp"
# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
# include "gc_implementation/g1/dirtyCardQueue.hpp"
# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
...
...
src/share/vm/runtime/vmStructs.cpp
浏览文件 @
7b1f4848
...
...
@@ -44,7 +44,6 @@
#include "code/vmreg.hpp"
#include "compiler/oopMap.hpp"
#include "compiler/compileBroker.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/shared/immutableSpace.hpp"
#include "gc_implementation/shared/markSweep.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
...
...
@@ -55,6 +54,7 @@
#include "memory/cardTableRS.hpp"
#include "memory/compactPermGen.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/generation.hpp"
#include "memory/generationSpec.hpp"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录