Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
68cee6b2
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
68cee6b2
编写于
2月 17, 2012
作者:
S
stefank
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
9a875b6e
5f3747f4
变更
28
展开全部
隐藏空白更改
内联
并排
Showing
28 changed file
with
831 addition
and
669 deletion
+831
-669
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+5
-1
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
+143
-121
src/share/vm/gc_implementation/g1/collectionSetChooser.hpp
src/share/vm/gc_implementation/g1/collectionSetChooser.hpp
+88
-37
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+66
-54
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+7
-3
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+161
-185
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+9
-8
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
+13
-12
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+18
-2
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+3
-4
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+10
-2
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+6
-2
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
..._implementation/parallelScavenge/parallelScavengeHeap.cpp
+51
-29
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
..._implementation/parallelScavenge/parallelScavengeHeap.hpp
+7
-1
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp
...entation/parallelScavenge/parallelScavengeHeap.inline.hpp
+7
-1
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+4
-2
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+4
-2
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
...gc_implementation/parallelScavenge/psPromotionManager.cpp
+1
-162
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
...gc_implementation/parallelScavenge/psPromotionManager.hpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
...ementation/parallelScavenge/psPromotionManager.inline.hpp
+167
-3
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
...hare/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+21
-13
src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
...hare/vm/gc_implementation/parallelScavenge/psScavenge.hpp
+6
-6
src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
.../gc_implementation/parallelScavenge/psScavenge.inline.hpp
+13
-8
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+3
-2
src/share/vm/memory/defNewGeneration.cpp
src/share/vm/memory/defNewGeneration.cpp
+7
-2
src/share/vm/memory/genMarkSweep.cpp
src/share/vm/memory/genMarkSweep.cpp
+5
-1
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
68cee6b2
...
...
@@ -6092,7 +6092,11 @@ void CMSCollector::sweep(bool asynch) {
_inter_sweep_timer
.
reset
();
_inter_sweep_timer
.
start
();
update_time_of_last_gc
(
os
::
javaTimeMillis
());
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong
now
=
os
::
javaTimeNanos
()
/
NANOSECS_PER_MILLISEC
;
update_time_of_last_gc
(
now
);
// NOTE on abstract state transitions:
// Mutators allocate-live and/or mark the mod-union table dirty
...
...
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -48,6 +48,8 @@ void CSetChooserCache::clear() {
#ifndef PRODUCT
bool
CSetChooserCache
::
verify
()
{
guarantee
(
false
,
"CSetChooserCache::verify(): don't call this any more"
);
int
index
=
_first
;
HeapRegion
*
prev
=
NULL
;
for
(
int
i
=
0
;
i
<
_occupancy
;
++
i
)
{
...
...
@@ -75,6 +77,8 @@ bool CSetChooserCache::verify() {
#endif // PRODUCT
void
CSetChooserCache
::
insert
(
HeapRegion
*
hr
)
{
guarantee
(
false
,
"CSetChooserCache::insert(): don't call this any more"
);
assert
(
!
is_full
(),
"cache should not be empty"
);
hr
->
calc_gc_efficiency
();
...
...
@@ -104,6 +108,9 @@ void CSetChooserCache::insert(HeapRegion *hr) {
}
HeapRegion
*
CSetChooserCache
::
remove_first
()
{
guarantee
(
false
,
"CSetChooserCache::remove_first(): "
"don't call this any more"
);
if
(
_occupancy
>
0
)
{
assert
(
_cache
[
_first
]
!=
NULL
,
"cache should have at least one region"
);
HeapRegion
*
ret
=
_cache
[
_first
];
...
...
@@ -118,16 +125,35 @@ HeapRegion *CSetChooserCache::remove_first() {
}
}
static
inline
int
orderRegions
(
HeapRegion
*
hr1
,
HeapRegion
*
hr2
)
{
// Even though we don't use the GC efficiency in our heuristics as
// much as we used to, we still order according to GC efficiency. This
// will cause regions with a lot of live objects and large RSets to
// end up at the end of the array. Given that we might skip collecting
// the last few old regions, if after a few mixed GCs the remaining
// have reclaimable bytes under a certain threshold, the hope is that
// the ones we'll skip are ones with both large RSets and a lot of
// live objects, not the ones with just a lot of live objects if we
// ordered according to the amount of reclaimable bytes per region.
static
int
orderRegions
(
HeapRegion
*
hr1
,
HeapRegion
*
hr2
)
{
if
(
hr1
==
NULL
)
{
if
(
hr2
==
NULL
)
return
0
;
else
return
1
;
if
(
hr2
==
NULL
)
{
return
0
;
}
else
{
return
1
;
}
}
else
if
(
hr2
==
NULL
)
{
return
-
1
;
}
if
(
hr2
->
gc_efficiency
()
<
hr1
->
gc_efficiency
())
return
-
1
;
else
if
(
hr1
->
gc_efficiency
()
<
hr2
->
gc_efficiency
())
return
1
;
else
return
0
;
double
gc_eff1
=
hr1
->
gc_efficiency
();
double
gc_eff2
=
hr2
->
gc_efficiency
();
if
(
gc_eff1
>
gc_eff2
)
{
return
-
1
;
}
if
(
gc_eff1
<
gc_eff2
)
{
return
1
;
}
else
{
return
0
;
}
}
static
int
orderRegions
(
HeapRegion
**
hr1p
,
HeapRegion
**
hr2p
)
{
...
...
@@ -151,51 +177,61 @@ CollectionSetChooser::CollectionSetChooser() :
//
_markedRegions
((
ResourceObj
::
set_allocation_type
((
address
)
&
_markedRegions
,
ResourceObj
::
C_HEAP
),
100
),
true
),
_curMarkedIndex
(
0
),
_numMarkedRegions
(
0
),
_unmarked_age_1_returned_as_new
(
false
),
_first_par_unreserved_idx
(
0
)
{}
100
),
true
/* C_Heap */
),
_curr_index
(
0
),
_length
(
0
),
_regionLiveThresholdBytes
(
0
),
_remainingReclaimableBytes
(
0
),
_first_par_unreserved_idx
(
0
)
{
_regionLiveThresholdBytes
=
HeapRegion
::
GrainBytes
*
(
size_t
)
G1OldCSetRegionLiveThresholdPercent
/
100
;
}
#ifndef PRODUCT
bool
CollectionSetChooser
::
verify
()
{
guarantee
(
_length
>=
0
,
err_msg
(
"_length: %d"
,
_length
));
guarantee
(
0
<=
_curr_index
&&
_curr_index
<=
_length
,
err_msg
(
"_curr_index: %d _length: %d"
,
_curr_index
,
_length
));
int
index
=
0
;
guarantee
(
_curMarkedIndex
<=
_numMarkedRegions
,
"_curMarkedIndex should be within bounds"
);
while
(
index
<
_curMarkedIndex
)
{
guarantee
(
_markedRegions
.
at
(
index
++
)
==
NULL
,
"all entries before _curMarkedIndex should be NULL"
)
;
size_t
sum_of_reclaimable_bytes
=
0
;
while
(
index
<
_curr_index
)
{
guarantee
(
_markedRegions
.
at
(
index
)
==
NULL
,
"all entries before _curr_index should be NULL"
);
index
+=
1
;
}
HeapRegion
*
prev
=
NULL
;
while
(
index
<
_
numMarkedRegions
)
{
while
(
index
<
_
length
)
{
HeapRegion
*
curr
=
_markedRegions
.
at
(
index
++
);
guarantee
(
curr
!=
NULL
,
"Regions in _markedRegions array cannot be NULL"
);
int
si
=
curr
->
sort_index
();
guarantee
(
!
curr
->
is_young
(),
"should not be young!"
);
guarantee
(
!
curr
->
isHumongous
(),
"should not be humongous!"
);
guarantee
(
si
>
-
1
&&
si
==
(
index
-
1
),
"sort index invariant"
);
if
(
prev
!=
NULL
)
{
guarantee
(
orderRegions
(
prev
,
curr
)
!=
1
,
"regions should be sorted"
);
guarantee
(
orderRegions
(
prev
,
curr
)
!=
1
,
err_msg
(
"GC eff prev: %1.4f GC eff curr: %1.4f"
,
prev
->
gc_efficiency
(),
curr
->
gc_efficiency
()));
}
sum_of_reclaimable_bytes
+=
curr
->
reclaimable_bytes
();
prev
=
curr
;
}
return
_cache
.
verify
();
guarantee
(
sum_of_reclaimable_bytes
==
_remainingReclaimableBytes
,
err_msg
(
"reclaimable bytes inconsistent, "
"remaining: "
SIZE_FORMAT
" sum: "
SIZE_FORMAT
,
_remainingReclaimableBytes
,
sum_of_reclaimable_bytes
));
return
true
;
}
#endif
void
CollectionSetChooser
::
fillCache
()
{
while
(
!
_cache
.
is_full
()
&&
(
_curMarkedIndex
<
_numMarkedRegions
))
{
HeapRegion
*
hr
=
_markedRegions
.
at
(
_curMarkedIndex
);
void
CollectionSetChooser
::
fillCache
()
{
guarantee
(
false
,
"fillCache: don't call this any more"
);
while
(
!
_cache
.
is_full
()
&&
(
_curr_index
<
_length
))
{
HeapRegion
*
hr
=
_markedRegions
.
at
(
_curr_index
);
assert
(
hr
!=
NULL
,
err_msg
(
"Unexpected NULL hr in _markedRegions at index %d"
,
_cur
MarkedI
ndex
));
_cur
MarkedI
ndex
+=
1
;
_cur
r_i
ndex
));
_cur
r_i
ndex
+=
1
;
assert
(
!
hr
->
is_young
(),
"should not be young!"
);
assert
(
hr
->
sort_index
()
==
_cur
MarkedI
ndex
-
1
,
"sort_index invariant"
);
assert
(
hr
->
sort_index
()
==
_cur
r_i
ndex
-
1
,
"sort_index invariant"
);
_markedRegions
.
at_put
(
hr
->
sort_index
(),
NULL
);
_cache
.
insert
(
hr
);
assert
(
!
_cache
.
is_empty
(),
"cache should not be empty"
);
...
...
@@ -203,9 +239,7 @@ CollectionSetChooser::fillCache() {
assert
(
verify
(),
"cache should be consistent"
);
}
void
CollectionSetChooser
::
sortMarkedHeapRegions
()
{
guarantee
(
_cache
.
is_empty
(),
"cache should be empty"
);
void
CollectionSetChooser
::
sortMarkedHeapRegions
()
{
// First trim any unused portion of the top in the parallel case.
if
(
_first_par_unreserved_idx
>
0
)
{
if
(
G1PrintParCleanupStats
)
{
...
...
@@ -217,43 +251,78 @@ CollectionSetChooser::sortMarkedHeapRegions() {
_markedRegions
.
trunc_to
(
_first_par_unreserved_idx
);
}
_markedRegions
.
sort
(
orderRegions
);
assert
(
_numMarkedRegions
<=
_markedRegions
.
length
(),
"Requirement"
);
assert
(
_numMarkedRegions
==
0
||
_markedRegions
.
at
(
_numMarkedRegions
-
1
)
!=
NULL
,
"Testing _numMarkedRegions"
);
assert
(
_numMarkedRegions
==
_markedRegions
.
length
()
||
_markedRegions
.
at
(
_numMarkedRegions
)
==
NULL
,
"Testing _numMarkedRegions"
);
assert
(
_length
<=
_markedRegions
.
length
(),
"Requirement"
);
assert
(
_length
==
0
||
_markedRegions
.
at
(
_length
-
1
)
!=
NULL
,
"Testing _length"
);
assert
(
_length
==
_markedRegions
.
length
()
||
_markedRegions
.
at
(
_length
)
==
NULL
,
"Testing _length"
);
if
(
G1PrintParCleanupStats
)
{
gclog_or_tty
->
print_cr
(
" Sorted %d marked regions."
,
_
numMarkedRegions
);
gclog_or_tty
->
print_cr
(
" Sorted %d marked regions."
,
_
length
);
}
for
(
int
i
=
0
;
i
<
_
numMarkedRegions
;
i
++
)
{
for
(
int
i
=
0
;
i
<
_
length
;
i
++
)
{
assert
(
_markedRegions
.
at
(
i
)
!=
NULL
,
"Should be true by sorting!"
);
_markedRegions
.
at
(
i
)
->
set_sort_index
(
i
);
}
if
(
G1PrintRegionLivenessInfo
)
{
G1PrintRegionLivenessInfoClosure
cl
(
gclog_or_tty
,
"Post-Sorting"
);
for
(
int
i
=
0
;
i
<
_
numMarkedRegions
;
++
i
)
{
for
(
int
i
=
0
;
i
<
_
length
;
++
i
)
{
HeapRegion
*
r
=
_markedRegions
.
at
(
i
);
cl
.
doHeapRegion
(
r
);
}
}
assert
(
verify
(),
"should now be sorted"
);
assert
(
verify
(),
"CSet chooser verification"
);
}
size_t
CollectionSetChooser
::
calcMinOldCSetLength
()
{
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
// sure we go through the available old regions in no more than the
// maximum desired number of mixed GCs.
//
// The calculation is based on the number of marked regions we added
// to the CSet chooser in the first place, not how many remain, so
// that the result is the same during all mixed GCs that follow a cycle.
const
size_t
region_num
=
(
size_t
)
_length
;
const
size_t
gc_num
=
(
size_t
)
G1MaxMixedGCNum
;
size_t
result
=
region_num
/
gc_num
;
// emulate ceiling
if
(
result
*
gc_num
<
region_num
)
{
result
+=
1
;
}
return
result
;
}
void
CollectionSetChooser
::
addMarkedHeapRegion
(
HeapRegion
*
hr
)
{
size_t
CollectionSetChooser
::
calcMaxOldCSetLength
()
{
// The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many
// of them are available.
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
const
size_t
region_num
=
g1h
->
n_regions
();
const
size_t
perc
=
(
size_t
)
G1OldCSetRegionThresholdPercent
;
size_t
result
=
region_num
*
perc
/
100
;
// emulate ceiling
if
(
100
*
result
<
region_num
*
perc
)
{
result
+=
1
;
}
return
result
;
}
void
CollectionSetChooser
::
addMarkedHeapRegion
(
HeapRegion
*
hr
)
{
assert
(
!
hr
->
isHumongous
(),
"Humongous regions shouldn't be added to the collection set"
);
assert
(
!
hr
->
is_young
(),
"should not be young!"
);
_markedRegions
.
append
(
hr
);
_numMarkedRegions
++
;
_length
++
;
_remainingReclaimableBytes
+=
hr
->
reclaimable_bytes
();
hr
->
calc_gc_efficiency
();
}
void
CollectionSetChooser
::
prepareForAddMarkedHeapRegionsPar
(
size_t
n_regions
,
size_t
chunkSize
)
{
void
CollectionSetChooser
::
prepareForAddMarkedHeapRegionsPar
(
size_t
n_regions
,
size_t
chunkSize
)
{
_first_par_unreserved_idx
=
0
;
int
n_threads
=
ParallelGCThreads
;
if
(
UseDynamicNumberOfGCThreads
)
{
...
...
@@ -274,8 +343,7 @@ prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
_markedRegions
.
at_put_grow
((
int
)(
aligned_n_regions
+
max_waste
-
1
),
NULL
);
}
jint
CollectionSetChooser
::
getParMarkedHeapRegionChunk
(
jint
n_regions
)
{
jint
CollectionSetChooser
::
getParMarkedHeapRegionChunk
(
jint
n_regions
)
{
// Don't do this assert because this can be called at a point
// where the loop up stream will not execute again but might
// try to claim more chunks (loop test has not been done yet).
...
...
@@ -287,83 +355,37 @@ CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
return
res
-
n_regions
;
}
void
CollectionSetChooser
::
setMarkedHeapRegion
(
jint
index
,
HeapRegion
*
hr
)
{
void
CollectionSetChooser
::
setMarkedHeapRegion
(
jint
index
,
HeapRegion
*
hr
)
{
assert
(
_markedRegions
.
at
(
index
)
==
NULL
,
"precondition"
);
assert
(
!
hr
->
is_young
(),
"should not be young!"
);
_markedRegions
.
at_put
(
index
,
hr
);
hr
->
calc_gc_efficiency
();
}
void
CollectionSetChooser
::
incNumMarkedHeapRegions
(
jint
inc_by
)
{
(
void
)
Atomic
::
add
(
inc_by
,
&
_numMarkedRegions
);
void
CollectionSetChooser
::
updateTotals
(
jint
region_num
,
size_t
reclaimable_bytes
)
{
// Only take the lock if we actually need to update the totals.
if
(
region_num
>
0
)
{
assert
(
reclaimable_bytes
>
0
,
"invariant"
);
// We could have just used atomics instead of taking the
// lock. However, we currently don't have an atomic add for size_t.
MutexLockerEx
x
(
ParGCRareEvent_lock
,
Mutex
::
_no_safepoint_check_flag
);
_length
+=
(
int
)
region_num
;
_remainingReclaimableBytes
+=
reclaimable_bytes
;
}
else
{
assert
(
reclaimable_bytes
==
0
,
"invariant"
);
}
}
void
CollectionSetChooser
::
clearMarkedHeapRegions
(){
void
CollectionSetChooser
::
clearMarkedHeapRegions
()
{
for
(
int
i
=
0
;
i
<
_markedRegions
.
length
();
i
++
)
{
HeapRegion
*
r
=
_markedRegions
.
at
(
i
);
if
(
r
!=
NULL
)
r
->
set_sort_index
(
-
1
);
HeapRegion
*
r
=
_markedRegions
.
at
(
i
);
if
(
r
!=
NULL
)
{
r
->
set_sort_index
(
-
1
);
}
}
_markedRegions
.
clear
();
_cur
MarkedI
ndex
=
0
;
_
numMarkedRegions
=
0
;
_
cache
.
clear
()
;
_cur
r_i
ndex
=
0
;
_
length
=
0
;
_
remainingReclaimableBytes
=
0
;
};
void
CollectionSetChooser
::
updateAfterFullCollection
()
{
clearMarkedHeapRegions
();
}
// if time_remaining < 0.0, then this method should try to return
// a region, whether it fits within the remaining time or not
HeapRegion
*
CollectionSetChooser
::
getNextMarkedRegion
(
double
time_remaining
,
double
avg_prediction
)
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectorPolicy
*
g1p
=
g1h
->
g1_policy
();
fillCache
();
if
(
_cache
.
is_empty
())
{
assert
(
_curMarkedIndex
==
_numMarkedRegions
,
"if cache is empty, list should also be empty"
);
ergo_verbose0
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"cache is empty"
));
return
NULL
;
}
HeapRegion
*
hr
=
_cache
.
get_first
();
assert
(
hr
!=
NULL
,
"if cache not empty, first entry should be non-null"
);
double
predicted_time
=
g1h
->
predict_region_elapsed_time_ms
(
hr
,
false
);
if
(
g1p
->
adaptive_young_list_length
())
{
if
(
time_remaining
-
predicted_time
<
0.0
)
{
g1h
->
check_if_region_is_too_expensive
(
predicted_time
);
ergo_verbose2
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"predicted old region time higher than remaining time"
)
ergo_format_ms
(
"predicted old region time"
)
ergo_format_ms
(
"remaining time"
),
predicted_time
,
time_remaining
);
return
NULL
;
}
}
else
{
double
threshold
=
2.0
*
avg_prediction
;
if
(
predicted_time
>
threshold
)
{
ergo_verbose2
(
ErgoCSetConstruction
,
"stop adding old regions to CSet"
,
ergo_format_reason
(
"predicted old region time higher than threshold"
)
ergo_format_ms
(
"predicted old region time"
)
ergo_format_ms
(
"threshold"
),
predicted_time
,
threshold
);
return
NULL
;
}
}
HeapRegion
*
hr2
=
_cache
.
remove_first
();
assert
(
hr
==
hr2
,
"cache contents should not have changed"
);
return
hr
;
}
src/share/vm/gc_implementation/g1/collectionSetChooser.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -28,28 +28,6 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
// We need to sort heap regions by collection desirability.
// This sorting is currently done in two "stages". An initial sort is
// done following a cleanup pause as soon as all of the marked but
// non-empty regions have been identified and the completely empty
// ones reclaimed.
// This gives us a global sort on a GC efficiency metric
// based on predictive data available at that time. However,
// any of these regions that are collected will only be collected
// during a future GC pause, by which time it is possible that newer
// data might allow us to revise and/or refine the earlier
// pause predictions, leading to changes in expected gc efficiency
// order. To somewhat mitigate this obsolescence, more so in the
// case of regions towards the end of the list, which will be
// picked later, these pre-sorted regions from the _markedRegions
// array are not used as is, but a small prefix thereof is
// insertion-sorted again into a small cache, based on more
// recent remembered set information. Regions are then drawn
// from this cache to construct the collection set at each
// incremental GC.
// This scheme and/or its implementation may be subject to
// revision in the future.
class
CSetChooserCache
VALUE_OBJ_CLASS_SPEC
{
private:
enum
{
...
...
@@ -103,24 +81,82 @@ public:
class
CollectionSetChooser
:
public
CHeapObj
{
GrowableArray
<
HeapRegion
*>
_markedRegions
;
int
_curMarkedIndex
;
int
_numMarkedRegions
;
CSetChooserCache
_cache
;
// T
rue iff last collection pause ran of out new "age 0" regions, and
//
returned an "age 1" region
.
bool
_unmarked_age_1_returned_as_new
;
// T
he index of the next candidate old region to be considered for
//
addition to the CSet
.
int
_curr_index
;
// The number of candidate old regions added to the CSet chooser.
int
_length
;
CSetChooserCache
_cache
;
jint
_first_par_unreserved_idx
;
// If a region has more live bytes than this threshold, it will not
// be added to the CSet chooser and will not be a candidate for
// collection.
size_t
_regionLiveThresholdBytes
;
// The sum of reclaimable bytes over all the regions in the CSet chooser.
size_t
_remainingReclaimableBytes
;
public:
HeapRegion
*
getNextMarkedRegion
(
double
time_so_far
,
double
avg_prediction
);
// Return the current candidate region to be considered for
// collection without removing it from the CSet chooser.
HeapRegion
*
peek
()
{
HeapRegion
*
res
=
NULL
;
if
(
_curr_index
<
_length
)
{
res
=
_markedRegions
.
at
(
_curr_index
);
assert
(
res
!=
NULL
,
err_msg
(
"Unexpected NULL hr in _markedRegions at index %d"
,
_curr_index
));
}
return
res
;
}
// Remove the given region from the CSet chooser and move to the
// next one. The given region should be the current candidate region
// in the CSet chooser.
void
remove_and_move_to_next
(
HeapRegion
*
hr
)
{
assert
(
hr
!=
NULL
,
"pre-condition"
);
assert
(
_curr_index
<
_length
,
"pre-condition"
);
assert
(
_markedRegions
.
at
(
_curr_index
)
==
hr
,
"pre-condition"
);
hr
->
set_sort_index
(
-
1
);
_markedRegions
.
at_put
(
_curr_index
,
NULL
);
assert
(
hr
->
reclaimable_bytes
()
<=
_remainingReclaimableBytes
,
err_msg
(
"remaining reclaimable bytes inconsistent "
"from region: "
SIZE_FORMAT
" remaining: "
SIZE_FORMAT
,
hr
->
reclaimable_bytes
(),
_remainingReclaimableBytes
));
_remainingReclaimableBytes
-=
hr
->
reclaimable_bytes
();
_curr_index
+=
1
;
}
CollectionSetChooser
();
void
sortMarkedHeapRegions
();
void
fillCache
();
// Determine whether to add the given region to the CSet chooser or
// not. Currently, we skip humongous regions (we never add them to
// the CSet, we only reclaim them during cleanup) and regions whose
// live bytes are over the threshold.
bool
shouldAdd
(
HeapRegion
*
hr
)
{
assert
(
hr
->
is_marked
(),
"pre-condition"
);
assert
(
!
hr
->
is_young
(),
"should never consider young regions"
);
return
!
hr
->
isHumongous
()
&&
hr
->
live_bytes
()
<
_regionLiveThresholdBytes
;
}
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
size_t
calcMinOldCSetLength
();
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
size_t
calcMaxOldCSetLength
();
// Serial version.
void
addMarkedHeapRegion
(
HeapRegion
*
hr
);
// Must be called before calls to getParMarkedHeapRegionChunk.
...
...
@@ -133,14 +169,21 @@ public:
// Set the marked array entry at index to hr. Careful to claim the index
// first if in parallel.
void
setMarkedHeapRegion
(
jint
index
,
HeapRegion
*
hr
);
// Atomically increment the number of claimed regions by "inc_by".
void
incNumMarkedHeapRegions
(
jint
inc_by
);
// Atomically increment the number of added regions by region_num
// and the amount of reclaimable bytes by reclaimable_bytes.
void
updateTotals
(
jint
region_num
,
size_t
reclaimable_bytes
);
void
clearMarkedHeapRegions
();
void
updateAfterFullCollection
();
// Return the number of candidate regions that remain to be collected.
size_t
remainingRegions
()
{
return
_length
-
_curr_index
;
}
// Determine whether the CSet chooser has more candidate regions or not.
bool
isEmpty
()
{
return
remainingRegions
()
==
0
;
}
bool
unmarked_age_1_returned_as_new
()
{
return
_unmarked_age_1_returned_as_new
;
}
// Return the reclaimable bytes that remain to be collected on
// all the candidate regions in the CSet chooser.
size_t
remainingReclaimableBytes
()
{
return
_remainingReclaimableBytes
;
}
// Returns true if the used portion of "_markedRegions" is properly
// sorted, otherwise asserts false.
...
...
@@ -148,9 +191,17 @@ public:
bool
verify
(
void
);
bool
regionProperlyOrdered
(
HeapRegion
*
r
)
{
int
si
=
r
->
sort_index
();
return
(
si
==
-
1
)
||
(
si
>
-
1
&&
_markedRegions
.
at
(
si
)
==
r
)
||
(
si
<
-
1
&&
_cache
.
region_in_cache
(
r
));
if
(
si
>
-
1
)
{
guarantee
(
_curr_index
<=
si
&&
si
<
_length
,
err_msg
(
"curr: %d sort index: %d: length: %d"
,
_curr_index
,
si
,
_length
));
guarantee
(
_markedRegions
.
at
(
si
)
==
r
,
err_msg
(
"sort index: %d at: "
PTR_FORMAT
" r: "
PTR_FORMAT
,
si
,
_markedRegions
.
at
(
si
),
r
));
}
else
{
guarantee
(
si
==
-
1
,
err_msg
(
"sort index: %d"
,
si
));
}
return
true
;
}
#endif
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
68cee6b2
...
...
@@ -958,7 +958,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
should_try_gc
=
false
;
}
else
{
// Read the GC count while still holding the Heap_lock.
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
gc_count_before
=
total_collections
();
should_try_gc
=
true
;
}
}
...
...
@@ -976,7 +976,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
MutexLockerEx
x
(
Heap_lock
);
*
gc_count_before_ret
=
SharedHeap
::
heap
()
->
total_collections
();
*
gc_count_before_ret
=
total_collections
();
return
NULL
;
}
}
else
{
...
...
@@ -1031,7 +1031,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
if
(
g1_policy
()
->
need_to_start_conc_mark
(
"concurrent humongous allocation"
,
word_size
))
{
if
(
g1_policy
()
->
need_to_start_conc_mark
(
"concurrent humongous allocation"
,
word_size
))
{
collect
(
GCCause
::
_g1_humongous_allocation
);
}
...
...
@@ -1059,7 +1060,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
should_try_gc
=
false
;
}
else
{
// Read the GC count while still holding the Heap_lock.
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
gc_count_before
=
total_collections
();
should_try_gc
=
true
;
}
}
...
...
@@ -1081,7 +1082,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
MutexLockerEx
x
(
Heap_lock
);
*
gc_count_before_ret
=
SharedHeap
::
heap
()
->
total_collections
();
*
gc_count_before_ret
=
total_collections
();
return
NULL
;
}
}
else
{
...
...
@@ -2311,10 +2312,12 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
}
bool
G1CollectedHeap
::
should_do_concurrent_full_gc
(
GCCause
::
Cause
cause
)
{
return
((
cause
==
GCCause
::
_gc_locker
&&
GCLockerInvokesConcurrent
)
||
(
cause
==
GCCause
::
_java_lang_system_gc
&&
ExplicitGCInvokesConcurrent
)
||
cause
==
GCCause
::
_g1_humongous_allocation
);
switch
(
cause
)
{
case
GCCause
::
_gc_locker
:
return
GCLockerInvokesConcurrent
;
case
GCCause
::
_java_lang_system_gc
:
return
ExplicitGCInvokesConcurrent
;
case
GCCause
::
_g1_humongous_allocation
:
return
true
;
default:
return
false
;
}
}
#ifndef PRODUCT
...
...
@@ -2408,47 +2411,66 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
void
G1CollectedHeap
::
collect
(
GCCause
::
Cause
cause
)
{
// The caller doesn't have the Heap_lock
assert
(
!
Heap_lock
->
owned_by_self
(),
"this thread should not own the Heap_lock"
);
assert_heap_not_locked
();
unsigned
int
gc_count_before
;
unsigned
int
full_gc_count_before
;
{
MutexLocker
ml
(
Heap_lock
);
// Read the GC count while holding the Heap_lock
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
}
if
(
should_do_concurrent_full_gc
(
cause
))
{
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle. We're setting word_size to 0 which means that
// we are not requesting a post-GC allocation.
VM_G1IncCollectionPause
op
(
gc_count_before
,
0
,
/* word_size */
true
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
cause
);
VMThread
::
execute
(
&
op
);
}
else
{
if
(
cause
==
GCCause
::
_gc_locker
DEBUG_ONLY
(
||
cause
==
GCCause
::
_scavenge_alot
))
{
bool
retry_gc
;
do
{
retry_gc
=
false
;
{
MutexLocker
ml
(
Heap_lock
);
// Read the GC count while holding the Heap_lock
gc_count_before
=
total_collections
();
full_gc_count_before
=
total_full_collections
();
}
// Schedule a standard evacuation pause. We're setting word_size
// to 0 which means that we are not requesting a post-GC allocation.
if
(
should_do_concurrent_full_gc
(
cause
))
{
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle. We're setting word_size to 0 which means that
// we are not requesting a post-GC allocation.
VM_G1IncCollectionPause
op
(
gc_count_before
,
0
,
/* word_size */
false
,
/* should_initiate_conc_mark */
true
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
cause
);
VMThread
::
execute
(
&
op
);
if
(
!
op
.
pause_succeeded
())
{
// Another GC got scheduled and prevented us from scheduling
// the initial-mark GC. It's unlikely that the GC that
// pre-empted us was also an initial-mark GC. So, we'll retry
// the initial-mark GC.
if
(
full_gc_count_before
==
total_full_collections
())
{
retry_gc
=
true
;
}
else
{
// A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
}
}
else
{
// Schedule a Full GC.
VM_G1CollectFull
op
(
gc_count_before
,
full_gc_count_before
,
cause
);
VMThread
::
execute
(
&
op
);
if
(
cause
==
GCCause
::
_gc_locker
DEBUG_ONLY
(
||
cause
==
GCCause
::
_scavenge_alot
))
{
// Schedule a standard evacuation pause. We're setting word_size
// to 0 which means that we are not requesting a post-GC allocation.
VM_G1IncCollectionPause
op
(
gc_count_before
,
0
,
/* word_size */
false
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
cause
);
VMThread
::
execute
(
&
op
);
}
else
{
// Schedule a Full GC.
VM_G1CollectFull
op
(
gc_count_before
,
full_gc_count_before
,
cause
);
VMThread
::
execute
(
&
op
);
}
}
}
}
while
(
retry_gc
);
}
bool
G1CollectedHeap
::
is_in
(
const
void
*
p
)
const
{
...
...
@@ -3149,12 +3171,12 @@ void G1CollectedHeap::verify(bool allow_dirty,
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
const
int
so
=
S
haredHeap
::
SO_AllClasses
|
SharedHeap
::
SO_Strings
|
SharedHeap
::
SO_CodeCache
;
const
int
so
=
S
O_AllClasses
|
SO_Strings
|
SO_CodeCache
;
process_strong_roots
(
true
,
// activate StrongRootsScope
true
,
// we set "collecting perm gen" to true,
// so we don't reset the dirty cards in the perm gen.
S
haredHeap
::
S
canningOption
(
so
),
// roots scanning options
ScanningOption
(
so
),
// roots scanning options
&
rootsCl
,
&
blobsCl
,
&
rootsCl
);
...
...
@@ -3425,16 +3447,6 @@ G1CollectedHeap::doConcurrentMark() {
}
}
double
G1CollectedHeap
::
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
young
)
{
return
_g1_policy
->
predict_region_elapsed_time_ms
(
hr
,
young
);
}
void
G1CollectedHeap
::
check_if_region_is_too_expensive
(
double
predicted_time_ms
)
{
_g1_policy
->
check_if_region_is_too_expensive
(
predicted_time_ms
);
}
size_t
G1CollectedHeap
::
pending_card_num
()
{
size_t
extra_cards
=
0
;
JavaThread
*
curr
=
Threads
::
first
();
...
...
@@ -3706,12 +3718,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy
()
->
print_collection_set
(
g1_policy
()
->
inc_cset_head
(),
gclog_or_tty
);
#endif // YOUNG_LIST_VERBOSE
g1_policy
()
->
choose_collection_
set
(
target_pause_time_ms
);
g1_policy
()
->
finalize_c
set
(
target_pause_time_ms
);
_cm
->
note_start_of_gc
();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
// GC). We also call this after
choose_collection_
set() to
// GC). We also call this after
finalize_c
set() to
// ensure that the CSet has been finalized.
_cm
->
verify_no_cset_oops
(
true
/* verify_stacks */
,
true
/* verify_enqueued_buffers */
,
...
...
@@ -4734,7 +4746,7 @@ public:
void
G1CollectedHeap
::
g1_process_strong_roots
(
bool
collecting_perm_gen
,
S
haredHeap
::
S
canningOption
so
,
ScanningOption
so
,
OopClosure
*
scan_non_heap_roots
,
OopsInHeapRegionClosure
*
scan_rs
,
OopsInGenClosure
*
scan_perm
,
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
68cee6b2
...
...
@@ -770,7 +770,7 @@ protected:
// the "i" of the calling parallel worker thread's work(i) function.
// In the sequential case this param will be ignored.
void
g1_process_strong_roots
(
bool
collecting_perm_gen
,
S
haredHeap
::
S
canningOption
so
,
ScanningOption
so
,
OopClosure
*
scan_non_heap_roots
,
OopsInHeapRegionClosure
*
scan_rs
,
OopsInGenClosure
*
scan_perm
,
...
...
@@ -1182,6 +1182,12 @@ public:
bool
free_regions_coming
()
{
return
_free_regions_coming
;
}
void
wait_while_free_regions_coming
();
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool
is_old_gc_alloc_region
(
HeapRegion
*
hr
)
{
return
hr
==
_retained_old_gc_alloc_region
;
}
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
// "CollectedHeap" supports.
...
...
@@ -1662,8 +1668,6 @@ public:
public:
void
stop_conc_gc_threads
();
double
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
young
);
void
check_if_region_is_too_expensive
(
double
predicted_time_ms
);
size_t
pending_card_num
();
size_t
max_pending_card_num
();
size_t
cards_scanned
();
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
68cee6b2
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
68cee6b2
...
...
@@ -312,16 +312,13 @@ private:
double
_recorded_non_young_free_cset_time_ms
;
double
_sigma
;
double
_expensive_region_limit_ms
;
size_t
_rs_lengths_prediction
;
size_t
_known_garbage_bytes
;
double
_known_garbage_ratio
;
double
sigma
()
{
return
_sigma
;
}
double
sigma
()
{
return
_sigma
;
}
// A function that prevents us putting too much stock in small sample
// sets. Returns a number between 2.0 and 1.0, depending on the number
...
...
@@ -491,8 +488,6 @@ public:
get_new_prediction
(
_non_young_other_cost_per_region_ms_seq
);
}
void
check_if_region_is_too_expensive
(
double
predicted_time_ms
);
double
predict_young_collection_elapsed_time_ms
(
size_t
adjustment
);
double
predict_base_elapsed_time_ms
(
size_t
pending_cards
);
double
predict_base_elapsed_time_ms
(
size_t
pending_cards
,
...
...
@@ -707,7 +702,6 @@ private:
// initial-mark work.
volatile
bool
_during_initial_mark_pause
;
bool
_should_revert_to_young_gcs
;
bool
_last_young_gc
;
// This set of variables tracks the collector efficiency, in order to
...
...
@@ -946,10 +940,17 @@ public:
return
_bytes_copied_during_gc
;
}
// Determine whether the next GC should be mixed. Called to determine
// whether to start mixed GCs or whether to carry on doing mixed
// GCs. The two action strings are used in the ergo output when the
// method returns true or false.
bool
next_gc_should_be_mixed
(
const
char
*
true_action_str
,
const
char
*
false_action_str
);
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
void
choose_collection_
set
(
double
target_pause_time_ms
);
void
finalize_c
set
(
double
target_pause_time_ms
);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
...
...
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
浏览文件 @
68cee6b2
...
...
@@ -131,8 +131,8 @@ public:
", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
// Generates the format string
#define ergo_format(_
action_, _extra_format_)
\
" %1.3f: [G1Ergonomics (%s)
" _action_
_extra_format_ "]"
#define ergo_format(_
extra_format_)
\
" %1.3f: [G1Ergonomics (%s)
%s"
_extra_format_ "]"
// Conditionally, prints an ergonomic decision record. _extra_format_
// is the format string for the optional items we'd like to print
...
...
@@ -145,20 +145,21 @@ public:
// them to the print method. For convenience, we have wrapper macros
// below which take a specific number of arguments and set the rest to
// a default value.
#define ergo_verbose_common(_tag_, _action_, _extra_format_, \
#define ergo_verbose_common(_tag_, _action_, _extra_format_,
\
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
do { \
if (G1ErgoVerbose::enabled((_tag_))) { \
gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_), \
os::elapsedTime(), \
G1ErgoVerbose::to_string((_tag_)), \
(_arg0_), (_arg1_), (_arg2_), \
(_arg3_), (_arg4_), (_arg5_)); \
} \
do { \
if (G1ErgoVerbose::enabled((_tag_))) { \
gclog_or_tty->print_cr(ergo_format(_extra_format_), \
os::elapsedTime(), \
G1ErgoVerbose::to_string((_tag_)), \
(_action_), \
(_arg0_), (_arg1_), (_arg2_), \
(_arg3_), (_arg4_), (_arg5_)); \
} \
} while (0)
#define ergo_verbose(_tag_, _action_) \
#define ergo_verbose(_tag_, _action_)
\
ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
#define ergo_verbose0(_tag_, _action_, _extra_format_) \
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -297,7 +297,23 @@
\
develop(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.")
"young gen size.") \
\
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95, \
"Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \
\
develop(uintx, G1OldReclaimableThresholdPercent, 1, \
"Threshold for the remaining old reclaimable bytes, expressed " \
"as a percentage of the heap size. If the old reclaimable bytes " \
"are under this we will not collect them with more mixed GCs.") \
\
develop(uintx, G1MaxMixedGCNum, 4, \
"The maximum desired number of mixed GCs after a marking cycle.") \
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.")
G1_FLAGS
(
DECLARE_DEVELOPER_FLAG
,
DECLARE_PD_DEVELOPER_FLAG
,
DECLARE_PRODUCT_FLAG
,
DECLARE_PD_PRODUCT_FLAG
,
DECLARE_DIAGNOSTIC_FLAG
,
DECLARE_EXPERIMENTAL_FLAG
,
DECLARE_NOTPRODUCT_FLAG
,
DECLARE_MANAGEABLE_FLAG
,
DECLARE_PRODUCT_RW_FLAG
)
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
68cee6b2
...
...
@@ -387,13 +387,12 @@ void HeapRegion::par_clear() {
ct_bs
->
clear
(
MemRegion
(
bottom
(),
end
()));
}
// <PREDICTION>
void
HeapRegion
::
calc_gc_efficiency
()
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
_gc_efficiency
=
(
double
)
garbage_bytes
()
/
g1h
->
predict_region_elapsed_time_ms
(
this
,
false
);
G1CollectorPolicy
*
g1p
=
g1h
->
g1_policy
();
_gc_efficiency
=
(
double
)
reclaimable_bytes
()
/
g1p
->
predict_region_elapsed_time_ms
(
this
,
false
);
}
// </PREDICTION>
void
HeapRegion
::
set_startsHumongous
(
HeapWord
*
new_top
,
HeapWord
*
new_end
)
{
assert
(
!
isHumongous
(),
"sanity / pre-condition"
);
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
68cee6b2
...
...
@@ -415,6 +415,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
return
used_at_mark_start_bytes
-
marked_bytes
();
}
// Return the amount of bytes we'll reclaim if we collect this
// region. This includes not only the known garbage bytes in the
// region but also any unallocated space in it, i.e., [top, end),
// since it will also be reclaimed if we collect the region.
size_t
reclaimable_bytes
()
{
size_t
known_live_bytes
=
live_bytes
();
assert
(
known_live_bytes
<=
capacity
(),
"sanity"
);
return
capacity
()
-
known_live_bytes
;
}
// An upper bound on the number of live bytes in the region.
size_t
max_live_bytes
()
{
return
used
()
-
garbage_bytes
();
}
...
...
@@ -648,10 +658,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
init_top_at_mark_start
();
}
// <PREDICTION>
void
calc_gc_efficiency
(
void
);
double
gc_efficiency
()
{
return
_gc_efficiency
;}
// </PREDICTION>
bool
is_young
()
const
{
return
_young_type
!=
NotYoung
;
}
bool
is_survivor
()
const
{
return
_young_type
==
Survivor
;
}
...
...
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -1042,7 +1042,11 @@ void ParNewGeneration::collect(bool full,
size_policy
->
avg_survived
()
->
sample
(
from
()
->
used
());
}
update_time_of_last_gc
(
os
::
javaTimeMillis
());
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong
now
=
os
::
javaTimeNanos
()
/
NANOSECS_PER_MILLISEC
;
update_time_of_last_gc
(
now
);
SpecializationStats
::
print
();
...
...
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -418,25 +418,17 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
gc_count
=
Universe
::
heap
()
->
total_collections
();
result
=
young_gen
()
->
allocate
(
size
);
// (1) If the requested object is too large to easily fit in the
// young_gen, or
// (2) If GC is locked out via GCLocker, young gen is full and
// the need for a GC already signalled to GCLocker (done
// at a safepoint),
// ... then, rather than force a safepoint and (a potentially futile)
// collection (attempt) for each allocation, try allocation directly
// in old_gen. For case (2) above, we may in the future allow
// TLAB allocation directly in the old gen.
if
(
result
!=
NULL
)
{
return
result
;
}
if
(
size
>=
(
young_gen
()
->
eden_space
()
->
capacity_in_words
(
Thread
::
current
())
/
2
))
{
result
=
old_gen
()
->
allocate
(
size
);
if
(
result
!=
NULL
)
{
return
result
;
}
// If certain conditions hold, try allocating from the old gen.
result
=
mem_allocate_old_gen
(
size
);
if
(
result
!=
NULL
)
{
return
result
;
}
// Failed to allocate without a gc.
if
(
GC_locker
::
is_active_and_needs_gc
())
{
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
...
...
@@ -460,7 +452,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
}
if
(
result
==
NULL
)
{
// Generate a VM operation
VM_ParallelGCFailedAllocation
op
(
size
,
gc_count
);
VMThread
::
execute
(
&
op
);
...
...
@@ -523,6 +514,42 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
return
result
;
}
// A "death march" is a series of ultra-slow allocations in which a full gc is
// done before each allocation, and after the full gc the allocation still
// cannot be satisfied from the young gen. This routine detects that condition;
// it should be called after a full gc has been done and the allocation
// attempted from the young gen. The parameter 'addr' should be the result of
// that young gen allocation attempt.
void
ParallelScavengeHeap
::
death_march_check
(
HeapWord
*
const
addr
,
size_t
size
)
{
if
(
addr
!=
NULL
)
{
_death_march_count
=
0
;
// death march has ended
}
else
if
(
_death_march_count
==
0
)
{
if
(
should_alloc_in_eden
(
size
))
{
_death_march_count
=
1
;
// death march has started
}
}
}
HeapWord
*
ParallelScavengeHeap
::
mem_allocate_old_gen
(
size_t
size
)
{
if
(
!
should_alloc_in_eden
(
size
)
||
GC_locker
::
is_active_and_needs_gc
())
{
// Size is too big for eden, or gc is locked out.
return
old_gen
()
->
allocate
(
size
);
}
// If a "death march" is in progress, allocate from the old gen a limited
// number of times before doing a GC.
if
(
_death_march_count
>
0
)
{
if
(
_death_march_count
<
64
)
{
++
_death_march_count
;
return
old_gen
()
->
allocate
(
size
);
}
else
{
_death_march_count
=
0
;
}
}
return
NULL
;
}
// Failed allocation policy. Must be called from the VM thread, and
// only at a safepoint! Note that this method has policy for allocation
// flow, and NOT collection policy. So we do not check for gc collection
...
...
@@ -535,27 +562,22 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"not reentrant"
);
assert
(
!
Heap_lock
->
owned_by_self
(),
"this thread should not own the Heap_lock"
);
size_t
mark_sweep_invocation_count
=
total_invocations
();
// We assume (and assert!) that an allocation at this point will fail
// unless we collect.
// We assume that allocation in eden will fail unless we collect.
// First level allocation failure, scavenge and allocate in young gen.
GCCauseSetter
gccs
(
this
,
GCCause
::
_allocation_failure
);
PSScavenge
::
invoke
();
const
bool
invoked_full_gc
=
PSScavenge
::
invoke
();
HeapWord
*
result
=
young_gen
()
->
allocate
(
size
);
// Second level allocation failure.
// Mark sweep and allocate in young generation.
if
(
result
==
NULL
)
{
// There is some chance the scavenge method decided to invoke mark_sweep.
// Don't mark sweep twice if so.
if
(
mark_sweep_invocation_count
==
total_invocations
())
{
invoke_full_gc
(
false
);
result
=
young_gen
()
->
allocate
(
size
);
}
if
(
result
==
NULL
&&
!
invoked_full_gc
)
{
invoke_full_gc
(
false
);
result
=
young_gen
()
->
allocate
(
size
);
}
death_march_check
(
result
,
size
);
// Third level allocation failure.
// After mark sweep and young generation allocation failure,
// allocate in old generation.
...
...
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -64,6 +64,7 @@ class ParallelScavengeHeap : public CollectedHeap {
// Collection of generations that are adjacent in the
// space reserved for the heap.
AdjoiningGenerations
*
_gens
;
unsigned
int
_death_march_count
;
static
GCTaskManager
*
_gc_task_manager
;
// The task manager.
...
...
@@ -71,8 +72,13 @@ class ParallelScavengeHeap : public CollectedHeap {
static
inline
size_t
total_invocations
();
HeapWord
*
allocate_new_tlab
(
size_t
size
);
inline
bool
should_alloc_in_eden
(
size_t
size
)
const
;
inline
void
death_march_check
(
HeapWord
*
const
result
,
size_t
size
);
HeapWord
*
mem_allocate_old_gen
(
size_t
size
);
public:
ParallelScavengeHeap
()
:
CollectedHeap
()
{
_death_march_count
=
0
;
set_alignment
(
_perm_gen_alignment
,
intra_heap_alignment
());
set_alignment
(
_young_gen_alignment
,
intra_heap_alignment
());
set_alignment
(
_old_gen_alignment
,
intra_heap_alignment
());
...
...
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2006, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -36,6 +36,12 @@ inline size_t ParallelScavengeHeap::total_invocations()
PSMarkSweep
::
total_invocations
();
}
inline
bool
ParallelScavengeHeap
::
should_alloc_in_eden
(
const
size_t
size
)
const
{
const
size_t
eden_size
=
young_gen
()
->
eden_space
()
->
capacity_in_words
();
return
size
<
eden_size
/
2
;
}
inline
void
ParallelScavengeHeap
::
invoke_scavenge
()
{
PSScavenge
::
invoke
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
68cee6b2
...
...
@@ -100,12 +100,12 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
// This method contains no policy. You should probably
// be calling invoke() instead.
void
PSMarkSweep
::
invoke_no_policy
(
bool
clear_all_softrefs
)
{
bool
PSMarkSweep
::
invoke_no_policy
(
bool
clear_all_softrefs
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at a safepoint"
);
assert
(
ref_processor
()
!=
NULL
,
"Sanity"
);
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
return
false
;
}
ParallelScavengeHeap
*
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
...
...
@@ -382,6 +382,8 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
#ifdef TRACESPINNING
ParallelTaskTerminator
::
print_termination_counts
();
#endif
return
true
;
}
bool
PSMarkSweep
::
absorb_live_data_from_eden
(
PSAdaptiveSizePolicy
*
size_policy
,
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -78,7 +78,7 @@ class PSMarkSweep : public MarkSweep {
public:
static
void
invoke
(
bool
clear_all_softrefs
);
static
void
invoke_no_policy
(
bool
clear_all_softrefs
);
static
bool
invoke_no_policy
(
bool
clear_all_softrefs
);
static
void
initialize
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
68cee6b2
...
...
@@ -1993,12 +1993,12 @@ bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
// This method contains no policy. You should probably
// be calling invoke() instead.
void
PSParallelCompact
::
invoke_no_policy
(
bool
maximum_heap_compaction
)
{
bool
PSParallelCompact
::
invoke_no_policy
(
bool
maximum_heap_compaction
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at a safepoint"
);
assert
(
ref_processor
()
!=
NULL
,
"Sanity"
);
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
return
false
;
}
TimeStamp
marking_start
;
...
...
@@ -2248,6 +2248,8 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
#ifdef TRACESPINNING
ParallelTaskTerminator
::
print_termination_counts
();
#endif
return
true
;
}
bool
PSParallelCompact
::
absorb_live_data_from_eden
(
PSAdaptiveSizePolicy
*
size_policy
,
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2005, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -1057,7 +1057,7 @@ class PSParallelCompact : AllStatic {
}
static
void
invoke
(
bool
maximum_heap_compaction
);
static
void
invoke_no_policy
(
bool
maximum_heap_compaction
);
static
bool
invoke_no_policy
(
bool
maximum_heap_compaction
);
static
void
post_initialize
();
// Perform initialization for PSParallelCompact that requires
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -247,167 +247,6 @@ void PSPromotionManager::flush_labs() {
}
}
//
// This method is pretty bulky. It would be nice to split it up
// into smaller submethods, but we need to be careful not to hurt
// performance.
//
oop
PSPromotionManager
::
copy_to_survivor_space
(
oop
o
)
{
assert
(
PSScavenge
::
should_scavenge
(
&
o
),
"Sanity"
);
oop
new_obj
=
NULL
;
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
markOop
test_mark
=
o
->
mark
();
// The same test as "o->is_forwarded()"
if
(
!
test_mark
->
is_marked
())
{
bool
new_obj_is_tenured
=
false
;
size_t
new_obj_size
=
o
->
size
();
// Find the objects age, MT safe.
int
age
=
(
test_mark
->
has_displaced_mark_helper
()
/* o->has_displaced_mark() */
)
?
test_mark
->
displaced_mark_helper
()
->
age
()
:
test_mark
->
age
();
// Try allocating obj in to-space (unless too old)
if
(
age
<
PSScavenge
::
tenuring_threshold
())
{
new_obj
=
(
oop
)
_young_lab
.
allocate
(
new_obj_size
);
if
(
new_obj
==
NULL
&&
!
_young_gen_is_full
)
{
// Do we allocate directly, or flush and refill?
if
(
new_obj_size
>
(
YoungPLABSize
/
2
))
{
// Allocate this object directly
new_obj
=
(
oop
)
young_space
()
->
cas_allocate
(
new_obj_size
);
}
else
{
// Flush and fill
_young_lab
.
flush
();
HeapWord
*
lab_base
=
young_space
()
->
cas_allocate
(
YoungPLABSize
);
if
(
lab_base
!=
NULL
)
{
_young_lab
.
initialize
(
MemRegion
(
lab_base
,
YoungPLABSize
));
// Try the young lab allocation again.
new_obj
=
(
oop
)
_young_lab
.
allocate
(
new_obj_size
);
}
else
{
_young_gen_is_full
=
true
;
}
}
}
}
// Otherwise try allocating obj tenured
if
(
new_obj
==
NULL
)
{
#ifndef PRODUCT
if
(
Universe
::
heap
()
->
promotion_should_fail
())
{
return
oop_promotion_failed
(
o
,
test_mark
);
}
#endif // #ifndef PRODUCT
new_obj
=
(
oop
)
_old_lab
.
allocate
(
new_obj_size
);
new_obj_is_tenured
=
true
;
if
(
new_obj
==
NULL
)
{
if
(
!
_old_gen_is_full
)
{
// Do we allocate directly, or flush and refill?
if
(
new_obj_size
>
(
OldPLABSize
/
2
))
{
// Allocate this object directly
new_obj
=
(
oop
)
old_gen
()
->
cas_allocate
(
new_obj_size
);
}
else
{
// Flush and fill
_old_lab
.
flush
();
HeapWord
*
lab_base
=
old_gen
()
->
cas_allocate
(
OldPLABSize
);
if
(
lab_base
!=
NULL
)
{
_old_lab
.
initialize
(
MemRegion
(
lab_base
,
OldPLABSize
));
// Try the old lab allocation again.
new_obj
=
(
oop
)
_old_lab
.
allocate
(
new_obj_size
);
}
}
}
// This is the promotion failed test, and code handling.
// The code belongs here for two reasons. It is slightly
// different thatn the code below, and cannot share the
// CAS testing code. Keeping the code here also minimizes
// the impact on the common case fast path code.
if
(
new_obj
==
NULL
)
{
_old_gen_is_full
=
true
;
return
oop_promotion_failed
(
o
,
test_mark
);
}
}
}
assert
(
new_obj
!=
NULL
,
"allocation should have succeeded"
);
// Copy obj
Copy
::
aligned_disjoint_words
((
HeapWord
*
)
o
,
(
HeapWord
*
)
new_obj
,
new_obj_size
);
// Now we have to CAS in the header.
if
(
o
->
cas_forward_to
(
new_obj
,
test_mark
))
{
// We won any races, we "own" this object.
assert
(
new_obj
==
o
->
forwardee
(),
"Sanity"
);
// Increment age if obj still in new generation. Now that
// we're dealing with a markOop that cannot change, it is
// okay to use the non mt safe oop methods.
if
(
!
new_obj_is_tenured
)
{
new_obj
->
incr_age
();
assert
(
young_space
()
->
contains
(
new_obj
),
"Attempt to push non-promoted obj"
);
}
// Do the size comparison first with new_obj_size, which we
// already have. Hopefully, only a few objects are larger than
// _min_array_size_for_chunking, and most of them will be arrays.
// So, the is->objArray() test would be very infrequent.
if
(
new_obj_size
>
_min_array_size_for_chunking
&&
new_obj
->
is_objArray
()
&&
PSChunkLargeArrays
)
{
// we'll chunk it
oop
*
const
masked_o
=
mask_chunked_array_oop
(
o
);
push_depth
(
masked_o
);
TASKQUEUE_STATS_ONLY
(
++
_arrays_chunked
;
++
_masked_pushes
);
}
else
{
// we'll just push its contents
new_obj
->
push_contents
(
this
);
}
}
else
{
// We lost, someone else "owns" this object
guarantee
(
o
->
is_forwarded
(),
"Object must be forwarded if the cas failed."
);
// Try to deallocate the space. If it was directly allocated we cannot
// deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if
(
new_obj_is_tenured
)
{
if
(
!
_old_lab
.
unallocate_object
((
HeapWord
*
)
new_obj
,
new_obj_size
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
}
else
if
(
!
_young_lab
.
unallocate_object
((
HeapWord
*
)
new_obj
,
new_obj_size
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
// don't update this before the unallocation!
new_obj
=
o
->
forwardee
();
}
}
else
{
assert
(
o
->
is_forwarded
(),
"Sanity"
);
new_obj
=
o
->
forwardee
();
}
#ifdef DEBUG
// This code must come after the CAS test, or it will print incorrect
// information.
if
(
TraceScavenge
)
{
gclog_or_tty
->
print_cr
(
"{%s %s "
PTR_FORMAT
" -> "
PTR_FORMAT
" ("
SIZE_FORMAT
")}"
,
PSScavenge
::
should_scavenge
(
&
new_obj
)
?
"copying"
:
"tenuring"
,
new_obj
->
blueprint
()
->
internal_name
(),
o
,
new_obj
,
new_obj
->
size
());
}
#endif
return
new_obj
;
}
template
<
class
T
>
void
PSPromotionManager
::
process_array_chunk_work
(
oop
obj
,
int
start
,
int
end
)
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -171,7 +171,7 @@ class PSPromotionManager : public CHeapObj {
void
set_old_gen_is_full
(
bool
state
)
{
_old_gen_is_full
=
state
;
}
// Promotion methods
oop
copy_to_survivor_space
(
oop
o
);
template
<
bool
promote_immediately
>
oop
copy_to_survivor_space
(
oop
o
);
oop
oop_promotion_failed
(
oop
obj
,
markOop
obj_mark
);
void
reset
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -61,6 +61,170 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) {
claim_or_forward_internal_depth
(
p
);
}
//
// This method is pretty bulky. It would be nice to split it up
// into smaller submethods, but we need to be careful not to hurt
// performance.
//
template
<
bool
promote_immediately
>
oop
PSPromotionManager
::
copy_to_survivor_space
(
oop
o
)
{
assert
(
PSScavenge
::
should_scavenge
(
&
o
),
"Sanity"
);
oop
new_obj
=
NULL
;
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
markOop
test_mark
=
o
->
mark
();
// The same test as "o->is_forwarded()"
if
(
!
test_mark
->
is_marked
())
{
bool
new_obj_is_tenured
=
false
;
size_t
new_obj_size
=
o
->
size
();
if
(
!
promote_immediately
)
{
// Find the objects age, MT safe.
int
age
=
(
test_mark
->
has_displaced_mark_helper
()
/* o->has_displaced_mark() */
)
?
test_mark
->
displaced_mark_helper
()
->
age
()
:
test_mark
->
age
();
// Try allocating obj in to-space (unless too old)
if
(
age
<
PSScavenge
::
tenuring_threshold
())
{
new_obj
=
(
oop
)
_young_lab
.
allocate
(
new_obj_size
);
if
(
new_obj
==
NULL
&&
!
_young_gen_is_full
)
{
// Do we allocate directly, or flush and refill?
if
(
new_obj_size
>
(
YoungPLABSize
/
2
))
{
// Allocate this object directly
new_obj
=
(
oop
)
young_space
()
->
cas_allocate
(
new_obj_size
);
}
else
{
// Flush and fill
_young_lab
.
flush
();
HeapWord
*
lab_base
=
young_space
()
->
cas_allocate
(
YoungPLABSize
);
if
(
lab_base
!=
NULL
)
{
_young_lab
.
initialize
(
MemRegion
(
lab_base
,
YoungPLABSize
));
// Try the young lab allocation again.
new_obj
=
(
oop
)
_young_lab
.
allocate
(
new_obj_size
);
}
else
{
_young_gen_is_full
=
true
;
}
}
}
}
}
// Otherwise try allocating obj tenured
if
(
new_obj
==
NULL
)
{
#ifndef PRODUCT
if
(
Universe
::
heap
()
->
promotion_should_fail
())
{
return
oop_promotion_failed
(
o
,
test_mark
);
}
#endif // #ifndef PRODUCT
new_obj
=
(
oop
)
_old_lab
.
allocate
(
new_obj_size
);
new_obj_is_tenured
=
true
;
if
(
new_obj
==
NULL
)
{
if
(
!
_old_gen_is_full
)
{
// Do we allocate directly, or flush and refill?
if
(
new_obj_size
>
(
OldPLABSize
/
2
))
{
// Allocate this object directly
new_obj
=
(
oop
)
old_gen
()
->
cas_allocate
(
new_obj_size
);
}
else
{
// Flush and fill
_old_lab
.
flush
();
HeapWord
*
lab_base
=
old_gen
()
->
cas_allocate
(
OldPLABSize
);
if
(
lab_base
!=
NULL
)
{
_old_lab
.
initialize
(
MemRegion
(
lab_base
,
OldPLABSize
));
// Try the old lab allocation again.
new_obj
=
(
oop
)
_old_lab
.
allocate
(
new_obj_size
);
}
}
}
// This is the promotion failed test, and code handling.
// The code belongs here for two reasons. It is slightly
// different thatn the code below, and cannot share the
// CAS testing code. Keeping the code here also minimizes
// the impact on the common case fast path code.
if
(
new_obj
==
NULL
)
{
_old_gen_is_full
=
true
;
return
oop_promotion_failed
(
o
,
test_mark
);
}
}
}
assert
(
new_obj
!=
NULL
,
"allocation should have succeeded"
);
// Copy obj
Copy
::
aligned_disjoint_words
((
HeapWord
*
)
o
,
(
HeapWord
*
)
new_obj
,
new_obj_size
);
// Now we have to CAS in the header.
if
(
o
->
cas_forward_to
(
new_obj
,
test_mark
))
{
// We won any races, we "own" this object.
assert
(
new_obj
==
o
->
forwardee
(),
"Sanity"
);
// Increment age if obj still in new generation. Now that
// we're dealing with a markOop that cannot change, it is
// okay to use the non mt safe oop methods.
if
(
!
new_obj_is_tenured
)
{
new_obj
->
incr_age
();
assert
(
young_space
()
->
contains
(
new_obj
),
"Attempt to push non-promoted obj"
);
}
// Do the size comparison first with new_obj_size, which we
// already have. Hopefully, only a few objects are larger than
// _min_array_size_for_chunking, and most of them will be arrays.
// So, the is->objArray() test would be very infrequent.
if
(
new_obj_size
>
_min_array_size_for_chunking
&&
new_obj
->
is_objArray
()
&&
PSChunkLargeArrays
)
{
// we'll chunk it
oop
*
const
masked_o
=
mask_chunked_array_oop
(
o
);
push_depth
(
masked_o
);
TASKQUEUE_STATS_ONLY
(
++
_arrays_chunked
;
++
_masked_pushes
);
}
else
{
// we'll just push its contents
new_obj
->
push_contents
(
this
);
}
}
else
{
// We lost, someone else "owns" this object
guarantee
(
o
->
is_forwarded
(),
"Object must be forwarded if the cas failed."
);
// Try to deallocate the space. If it was directly allocated we cannot
// deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if
(
new_obj_is_tenured
)
{
if
(
!
_old_lab
.
unallocate_object
((
HeapWord
*
)
new_obj
,
new_obj_size
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
}
else
if
(
!
_young_lab
.
unallocate_object
((
HeapWord
*
)
new_obj
,
new_obj_size
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
// don't update this before the unallocation!
new_obj
=
o
->
forwardee
();
}
}
else
{
assert
(
o
->
is_forwarded
(),
"Sanity"
);
new_obj
=
o
->
forwardee
();
}
#ifdef DEBUG
// This code must come after the CAS test, or it will print incorrect
// information.
if
(
TraceScavenge
)
{
gclog_or_tty
->
print_cr
(
"{%s %s "
PTR_FORMAT
" -> "
PTR_FORMAT
" ("
SIZE_FORMAT
")}"
,
PSScavenge
::
should_scavenge
(
&
new_obj
)
?
"copying"
:
"tenuring"
,
new_obj
->
blueprint
()
->
internal_name
(),
o
,
new_obj
,
new_obj
->
size
());
}
#endif
return
new_obj
;
}
inline
void
PSPromotionManager
::
process_popped_location_depth
(
StarTask
p
)
{
if
(
is_oop_masked
(
p
))
{
assert
(
PSChunkLargeArrays
,
"invariant"
);
...
...
@@ -69,9 +233,9 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
}
else
{
if
(
p
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
PSScavenge
::
copy_and_push_safe_barrier
(
this
,
(
narrowOop
*
)
p
);
PSScavenge
::
copy_and_push_safe_barrier
<
narrowOop
,
/*promote_immediately=*/
false
>
(
this
,
p
);
}
else
{
PSScavenge
::
copy_and_push_safe_barrier
(
this
,
(
oop
*
)
p
);
PSScavenge
::
copy_and_push_safe_barrier
<
oop
,
/*promote_immediately=*/
false
>
(
this
,
p
);
}
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
浏览文件 @
68cee6b2
...
...
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
...
...
@@ -100,7 +101,7 @@ public:
// Weak refs may be visited more than once.
if
(
PSScavenge
::
should_scavenge
(
p
,
_to_space
))
{
PSScavenge
::
copy_and_push_safe_barrier
(
_promotion_manager
,
p
);
PSScavenge
::
copy_and_push_safe_barrier
<
T
,
/*promote_immediately=*/
false
>
(
_promotion_manager
,
p
);
}
}
virtual
void
do_oop
(
oop
*
p
)
{
PSKeepAliveClosure
::
do_oop_work
(
p
);
}
...
...
@@ -214,36 +215,41 @@ void PSRefProcTaskExecutor::execute(EnqueueTask& task)
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
void
PSScavenge
::
invoke
()
{
bool
PSScavenge
::
invoke
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"should be at safepoint"
);
assert
(
Thread
::
current
()
==
(
Thread
*
)
VMThread
::
vm_thread
(),
"should be in vm thread"
);
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"not reentrant"
);
ParallelScavengeHeap
*
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
ParallelScavengeHeap
*
const
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
assert
(
heap
->
kind
()
==
CollectedHeap
::
ParallelScavengeHeap
,
"Sanity"
);
PSAdaptiveSizePolicy
*
policy
=
heap
->
size_policy
();
IsGCActiveMark
mark
;
bool
scavenge_was_done
=
PSScavenge
::
invoke_no_policy
();
const
bool
scavenge_done
=
PSScavenge
::
invoke_no_policy
();
const
bool
need_full_gc
=
!
scavenge_done
||
policy
->
should_full_GC
(
heap
->
old_gen
()
->
free_in_bytes
());
bool
full_gc_done
=
false
;
PSGCAdaptivePolicyCounters
*
counters
=
heap
->
gc_policy_counters
();
if
(
UsePerfData
)
co
unters
->
update_full_follows_scavenge
(
0
)
;
if
(
!
scavenge_was_done
||
policy
->
should_full_GC
(
heap
->
old_gen
()
->
free_in_bytes
()))
{
if
(
UsePerfData
)
counters
->
update_full_follows_scavenge
(
full_follows_scavenge
);
if
(
UsePerfData
)
{
PSGCAdaptivePolicyCounters
*
const
counters
=
heap
->
gc_policy_counters
();
co
nst
int
ffs_val
=
need_full_gc
?
full_follows_scavenge
:
not_skipped
;
counters
->
update_full_follows_scavenge
(
ffs_val
);
}
if
(
need_full_gc
)
{
GCCauseSetter
gccs
(
heap
,
GCCause
::
_adaptive_size_policy
);
CollectorPolicy
*
cp
=
heap
->
collector_policy
();
const
bool
clear_all_softrefs
=
cp
->
should_clear_all_soft_refs
();
if
(
UseParallelOldGC
)
{
PSParallelCompact
::
invoke_no_policy
(
clear_all_softrefs
);
full_gc_done
=
PSParallelCompact
::
invoke_no_policy
(
clear_all_softrefs
);
}
else
{
PSMarkSweep
::
invoke_no_policy
(
clear_all_softrefs
);
full_gc_done
=
PSMarkSweep
::
invoke_no_policy
(
clear_all_softrefs
);
}
}
return
full_gc_done
;
}
// This method contains no policy. You should probably
...
...
@@ -602,6 +608,8 @@ bool PSScavenge::invoke_no_policy() {
NOT_PRODUCT
(
reference_processor
()
->
verify_no_references_recorded
());
CodeCache
::
prune_scavenge_root_nmethods
();
// Re-verify object start arrays
if
(
VerifyObjectStartArray
&&
VerifyAfterGC
)
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -117,10 +117,9 @@ class PSScavenge: AllStatic {
// Called by parallelScavengeHeap to init the tenuring threshold
static
void
initialize
();
// Scavenge entry point
static
void
invoke
();
// Return true is a collection was done. Return
// false if the collection was skipped.
// Scavenge entry point. This may invoke a full gc; return true if so.
static
bool
invoke
();
// Return true if a collection was done; false otherwise.
static
bool
invoke_no_policy
();
// If an attempt to promote fails, this method is invoked
...
...
@@ -135,7 +134,8 @@ class PSScavenge: AllStatic {
template
<
class
T
>
static
inline
bool
should_scavenge
(
T
*
p
,
MutableSpace
*
to_space
);
template
<
class
T
>
static
inline
bool
should_scavenge
(
T
*
p
,
bool
check_to_space
);
template
<
class
T
>
inline
static
void
copy_and_push_safe_barrier
(
PSPromotionManager
*
pm
,
T
*
p
);
template
<
class
T
,
bool
promote_immediately
>
inline
static
void
copy_and_push_safe_barrier
(
PSPromotionManager
*
pm
,
T
*
p
);
// Is an object in the young generation
// This assumes that the HeapWord argument is in the heap,
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -28,6 +28,7 @@
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
inline
void
PSScavenge
::
save_to_space_top_before_gc
()
{
...
...
@@ -65,7 +66,7 @@ inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
// Attempt to "claim" oop at p via CAS, push the new obj if successful
// This version tests the oop* to make sure it is within the heap before
// attempting marking.
template
<
class
T
>
template
<
class
T
,
bool
promote_immediately
>
inline
void
PSScavenge
::
copy_and_push_safe_barrier
(
PSPromotionManager
*
pm
,
T
*
p
)
{
assert
(
should_scavenge
(
p
,
true
),
"revisiting object?"
);
...
...
@@ -73,7 +74,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
oop
o
=
oopDesc
::
load_decode_heap_oop_not_null
(
p
);
oop
new_obj
=
o
->
is_forwarded
()
?
o
->
forwardee
()
:
pm
->
copy_to_survivor_space
(
o
);
:
pm
->
copy_to_survivor_space
<
promote_immediately
>
(
o
);
oopDesc
::
encode_store_heap_oop_not_null
(
p
,
new_obj
);
// We cannot mark without test, as some code passes us pointers
...
...
@@ -86,7 +87,8 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
}
}
class
PSScavengeRootsClosure
:
public
OopClosure
{
template
<
bool
promote_immediately
>
class
PSRootsClosure
:
public
OopClosure
{
private:
PSPromotionManager
*
_promotion_manager
;
...
...
@@ -94,13 +96,16 @@ class PSScavengeRootsClosure: public OopClosure {
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
if
(
PSScavenge
::
should_scavenge
(
p
))
{
// We never card mark roots, maybe call a func without test?
PSScavenge
::
copy_and_push_safe_barrier
(
_promotion_manager
,
p
);
PSScavenge
::
copy_and_push_safe_barrier
<
T
,
promote_immediately
>
(
_promotion_manager
,
p
);
}
}
public:
PS
Scavenge
RootsClosure
(
PSPromotionManager
*
pm
)
:
_promotion_manager
(
pm
)
{
}
void
do_oop
(
oop
*
p
)
{
PS
Scavenge
RootsClosure
::
do_oop_work
(
p
);
}
void
do_oop
(
narrowOop
*
p
)
{
PS
Scavenge
RootsClosure
::
do_oop_work
(
p
);
}
PSRootsClosure
(
PSPromotionManager
*
pm
)
:
_promotion_manager
(
pm
)
{
}
void
do_oop
(
oop
*
p
)
{
PSRootsClosure
::
do_oop_work
(
p
);
}
void
do_oop
(
narrowOop
*
p
)
{
PSRootsClosure
::
do_oop_work
(
p
);
}
};
typedef
PSRootsClosure
<
/*promote_immediately=*/
false
>
PSScavengeRootsClosure
;
typedef
PSRootsClosure
<
/*promote_immediately=*/
true
>
PSPromoteRootsClosure
;
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2002, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -51,6 +51,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
PSPromotionManager
*
pm
=
PSPromotionManager
::
gc_thread_promotion_manager
(
which
);
PSScavengeRootsClosure
roots_closure
(
pm
);
PSPromoteRootsClosure
roots_to_old_closure
(
pm
);
switch
(
_root_type
)
{
case
universe
:
...
...
@@ -91,7 +92,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
case
code_cache
:
{
CodeBlobToOopClosure
each_scavengable_code_blob
(
&
roots_closure
,
/*do_marking=*/
true
);
CodeBlobToOopClosure
each_scavengable_code_blob
(
&
roots_
to_old_
closure
,
/*do_marking=*/
true
);
CodeCache
::
scavenge_root_nmethods_do
(
&
each_scavengable_code_blob
);
}
break
;
...
...
src/share/vm/memory/defNewGeneration.cpp
浏览文件 @
68cee6b2
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -655,7 +655,12 @@ void DefNewGeneration::collect(bool full,
from
()
->
set_concurrent_iteration_safe_limit
(
from
()
->
top
());
to
()
->
set_concurrent_iteration_safe_limit
(
to
()
->
top
());
SpecializationStats
::
print
();
update_time_of_last_gc
(
os
::
javaTimeMillis
());
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong
now
=
os
::
javaTimeNanos
()
/
NANOSECS_PER_MILLISEC
;
update_time_of_last_gc
(
now
);
}
class
RemoveForwardPointerClosure
:
public
ObjectClosure
{
...
...
src/share/vm/memory/genMarkSweep.cpp
浏览文件 @
68cee6b2
...
...
@@ -176,7 +176,11 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// Update time of last gc for all generations we collected
// (which curently is all the generations in the heap).
gch
->
update_time_of_last_gc
(
os
::
javaTimeMillis
());
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong
now
=
os
::
javaTimeNanos
()
/
NANOSECS_PER_MILLISEC
;
gch
->
update_time_of_last_gc
(
now
);
}
void
GenMarkSweep
::
allocate_stacks
()
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录