Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0ce87a38
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0ce87a38
编写于
3月 27, 2008
作者:
T
tonyp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
0838dcdc
f903d5a1
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
198 addition
and
109 deletion
+198
-109
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+152
-87
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+26
-19
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
...currentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
+1
-1
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+19
-2
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
0ce87a38
...
...
@@ -225,6 +225,34 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
assert
(
_dilatation_factor
>=
1.0
,
"from previous assert"
);
}
// The field "_initiating_occupancy" represents the occupancy percentage
// at which we trigger a new collection cycle. Unless explicitly specified
// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
// is calculated by:
//
// Let "f" be MinHeapFreeRatio in
//
// _intiating_occupancy = 100-f +
// f * (CMSTrigger[Perm]Ratio/100)
// where CMSTrigger[Perm]Ratio is the argument "tr" below.
//
// That is, if we assume the heap is at its desired maximum occupancy at the
// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
// space be allocated before initiating a new collection cycle.
//
void
ConcurrentMarkSweepGeneration
::
init_initiating_occupancy
(
intx
io
,
intx
tr
)
{
assert
(
io
<=
100
&&
tr
>=
0
&&
tr
<=
100
,
"Check the arguments"
);
if
(
io
>=
0
)
{
_initiating_occupancy
=
(
double
)
io
/
100.0
;
}
else
{
_initiating_occupancy
=
((
100
-
MinHeapFreeRatio
)
+
(
double
)(
tr
*
MinHeapFreeRatio
)
/
100.0
)
/
100.0
;
}
}
void
ConcurrentMarkSweepGeneration
::
ref_processor_init
()
{
assert
(
collector
()
!=
NULL
,
"no collector"
);
collector
()
->
ref_processor_init
();
...
...
@@ -520,8 +548,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_verification_mark_bm
(
0
,
Mutex
::
leaf
+
1
,
"CMS_verification_mark_bm_lock"
),
_completed_initialization
(
false
),
_collector_policy
(
cp
),
_unload_classes
(
false
),
_
unloaded_classes_last_cycle
(
false
),
_
should_
unload_classes
(
false
),
_
concurrent_cycles_since_last_unload
(
0
),
_sweep_estimate
(
CMS_SweepWeight
,
CMS_SweepPadding
)
{
if
(
ExplicitGCInvokesConcurrentAndUnloadsClasses
)
{
...
...
@@ -642,26 +670,11 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
}
// "initiatingOccupancy" is the occupancy ratio at which we trigger
// a new collection cycle. Unless explicitly specified via
// CMSTriggerRatio, it is calculated by:
// Let "f" be MinHeapFreeRatio in
//
// intiatingOccupancy = 100-f +
// f * (CMSTriggerRatio/100)
// That is, if we assume the heap is at its desired maximum occupancy at the
// end of a collection, we let CMSTriggerRatio of the (purported) free
// space be allocated before initiating a new collection cycle.
if
(
CMSInitiatingOccupancyFraction
>
0
)
{
_initiatingOccupancy
=
(
double
)
CMSInitiatingOccupancyFraction
/
100.0
;
}
else
{
_initiatingOccupancy
=
((
100
-
MinHeapFreeRatio
)
+
(
double
)(
CMSTriggerRatio
*
MinHeapFreeRatio
)
/
100.0
)
/
100.0
;
}
_cmsGen
->
init_initiating_occupancy
(
CMSInitiatingOccupancyFraction
,
CMSTriggerRatio
);
_permGen
->
init_initiating_occupancy
(
CMSInitiatingPermOccupancyFraction
,
CMSTriggerPermRatio
);
// Clip CMSBootstrapOccupancy between 0 and 100.
_bootstrap_occupancy
=
((
double
)
MIN2
((
intx
)
100
,
MAX2
((
intx
)
0
,
CMSBootstrapOccupancy
)))
_bootstrap_occupancy
=
((
double
)
MIN2
((
uintx
)
100
,
MAX2
((
u
intx
)
0
,
CMSBootstrapOccupancy
)))
/
(
double
)
100
;
_full_gcs_since_conc_gc
=
0
;
...
...
@@ -1413,7 +1426,8 @@ bool CMSCollector::shouldConcurrentCollect() {
gclog_or_tty
->
print_cr
(
"promotion_rate=%g"
,
stats
().
promotion_rate
());
gclog_or_tty
->
print_cr
(
"cms_allocation_rate=%g"
,
stats
().
cms_allocation_rate
());
gclog_or_tty
->
print_cr
(
"occupancy=%3.7f"
,
_cmsGen
->
occupancy
());
gclog_or_tty
->
print_cr
(
"initiatingOccupancy=%3.7f"
,
initiatingOccupancy
());
gclog_or_tty
->
print_cr
(
"initiatingOccupancy=%3.7f"
,
_cmsGen
->
initiating_occupancy
());
gclog_or_tty
->
print_cr
(
"initiatingPermOccupancy=%3.7f"
,
_permGen
->
initiating_occupancy
());
}
// ------------------------------------------------------------------
...
...
@@ -1446,22 +1460,36 @@ bool CMSCollector::shouldConcurrentCollect() {
// old gen want a collection cycle started. Each may use
// an appropriate criterion for making this decision.
// XXX We need to make sure that the gen expansion
// criterion dovetails well with this.
if
(
_cmsGen
->
should
ConcurrentCollect
(
initiatingOccupancy
()
))
{
// criterion dovetails well with this.
XXX NEED TO FIX THIS
if
(
_cmsGen
->
should
_concurrent_collect
(
))
{
if
(
Verbose
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
"CMS old gen initiated"
);
}
return
true
;
}
if
(
cms_should_unload_classes
()
&&
_permGen
->
shouldConcurrentCollect
(
initiatingOccupancy
()))
{
if
(
Verbose
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
"CMS perm gen initiated"
);
// We start a collection if we believe an incremental collection may fail;
// this is not likely to be productive in practice because it's probably too
// late anyway.
GenCollectedHeap
*
gch
=
GenCollectedHeap
::
heap
();
assert
(
gch
->
collector_policy
()
->
is_two_generation_policy
(),
"You may want to check the correctness of the following"
);
if
(
gch
->
incremental_collection_will_fail
())
{
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print
(
"CMSCollector: collect because incremental collection will fail "
);
}
return
true
;
}
if
(
CMSClassUnloadingEnabled
&&
_permGen
->
should_concurrent_collect
())
{
bool
res
=
update_should_unload_classes
();
if
(
res
)
{
if
(
Verbose
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
"CMS perm gen initiated"
);
}
return
true
;
}
}
return
false
;
}
...
...
@@ -1471,32 +1499,36 @@ void CMSCollector::clear_expansion_cause() {
_permGen
->
clear_expansion_cause
();
}
bool
ConcurrentMarkSweepGeneration
::
shouldConcurrentCollect
(
double
initiatingOccupancy
)
{
// We should be conservative in starting a collection cycle. To
// start too eagerly runs the risk of collecting too often in the
// extreme. To collect too rarely falls back on full collections,
// which works, even if not optimum in terms of concurrent work.
// As a work around for too eagerly collecting, use the flag
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
// giving the user an easily understandable way of controlling the
// collections.
// We want to start a new collection cycle if any of the following
// conditions hold:
// . our current occupancy exceeds the initiating occupancy, or
// . we recently needed to expand and have not since that expansion,
// collected, or
// . we are not using adaptive free lists and linear allocation is
// going to fail, or
// . (for old gen) incremental collection has already failed or
// may soon fail in the near future as we may not be able to absorb
// promotions.
assert_lock_strong
(
freelistLock
());
// We should be conservative in starting a collection cycle. To
// start too eagerly runs the risk of collecting too often in the
// extreme. To collect too rarely falls back on full collections,
// which works, even if not optimum in terms of concurrent work.
// As a work around for too eagerly collecting, use the flag
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
// giving the user an easily understandable way of controlling the
// collections.
// We want to start a new collection cycle if any of the following
// conditions hold:
// . our current occupancy exceeds the configured initiating occupancy
// for this generation, or
// . we recently needed to expand this space and have not, since that
// expansion, done a collection of this generation, or
// . the underlying space believes that it may be a good idea to initiate
// a concurrent collection (this may be based on criteria such as the
// following: the space uses linear allocation and linear allocation is
// going to fail, or there is believed to be excessive fragmentation in
// the generation, etc... or ...
// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
// the case of the old generation, not the perm generation; see CR 6543076):
// we may be approaching a point at which allocation requests may fail because
// we will be out of sufficient free space given allocation rate estimates.]
bool
ConcurrentMarkSweepGeneration
::
should_concurrent_collect
()
const
{
if
(
occupancy
()
>
initiatingOccupancy
)
{
assert_lock_strong
(
freelistLock
());
if
(
occupancy
()
>
initiating_occupancy
())
{
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print
(
" %s: collect because of occupancy %f / %f "
,
short_name
(),
occupancy
(),
initiating
Occupancy
);
short_name
(),
occupancy
(),
initiating
_occupancy
()
);
}
return
true
;
}
...
...
@@ -1510,20 +1542,9 @@ bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
}
return
true
;
}
GenCollectedHeap
*
gch
=
GenCollectedHeap
::
heap
();
assert
(
gch
->
collector_policy
()
->
is_two_generation_policy
(),
"You may want to check the correctness of the following"
);
if
(
gch
->
incremental_collection_will_fail
())
{
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print
(
" %s: collect because incremental collection will fail "
,
short_name
());
}
return
true
;
}
if
(
!
_cmsSpace
->
adaptive_freelists
()
&&
_cmsSpace
->
linearAllocationWouldFail
())
{
if
(
_cmsSpace
->
should_concurrent_collect
())
{
if
(
PrintGCDetails
&&
Verbose
)
{
gclog_or_tty
->
print
(
" %s: collect because
of linAB
"
,
gclog_or_tty
->
print
(
" %s: collect because
cmsSpace says so
"
,
short_name
());
}
return
true
;
...
...
@@ -1970,8 +1991,9 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
"Should have been NULL'd before baton was passed"
);
reset
(
false
/* == !asynch */
);
_cmsGen
->
reset_after_compaction
();
_concurrent_cycles_since_last_unload
=
0
;
if
(
verifying
()
&&
!
cms_
should_unload_classes
())
{
if
(
verifying
()
&&
!
should_unload_classes
())
{
perm_gen_verify_bit_map
()
->
clear_all
();
}
...
...
@@ -2098,6 +2120,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
{
bool
safepoint_check
=
Mutex
::
_no_safepoint_check_flag
;
MutexLockerEx
hl
(
Heap_lock
,
safepoint_check
);
FreelistLocker
fll
(
this
);
MutexLockerEx
x
(
CGC_lock
,
safepoint_check
);
if
(
_foregroundGCIsActive
||
!
UseAsyncConcMarkSweepGC
)
{
// The foreground collector is active or we're
...
...
@@ -2112,13 +2135,9 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
// a new cycle.
clear_expansion_cause
();
}
_unloaded_classes_last_cycle
=
cms_should_unload_classes
();
// ... from last cycle
// This controls class unloading in response to an explicit gc request.
// If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
// we will unload classes even if CMSClassUnloadingEnabled is not set.
// See CR 6541037 and related CRs.
_unload_classes
=
_full_gc_requested
// ... for this cycle
&&
ExplicitGCInvokesConcurrentAndUnloadsClasses
;
// Decide if we want to enable class unloading as part of the
// ensuing concurrent GC cycle.
update_should_unload_classes
();
_full_gc_requested
=
false
;
// acks all outstanding full gc requests
// Signal that we are about to start a collection
gch
->
increment_total_full_collections
();
// ... starting a collection cycle
...
...
@@ -3047,21 +3066,62 @@ void CMSCollector::verify_overflow_empty() const {
}
#endif // PRODUCT
// Decide if we want to enable class unloading as part of the
// ensuing concurrent GC cycle. We will collect the perm gen and
// unload classes if it's the case that:
// (1) an explicit gc request has been made and the flag
// ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
// (2) (a) class unloading is enabled at the command line, and
// (b) (i) perm gen threshold has been crossed, or
// (ii) old gen is getting really full, or
// (iii) the previous N CMS collections did not collect the
// perm gen
// NOTE: Provided there is no change in the state of the heap between
// calls to this method, it should have idempotent results. Moreover,
// its results should be monotonically increasing (i.e. going from 0 to 1,
// but not 1 to 0) between successive calls between which the heap was
// not collected. For the implementation below, it must thus rely on
// the property that concurrent_cycles_since_last_unload()
// will not decrease unless a collection cycle happened and that
// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
// themselves also monotonic in that sense. See check_monotonicity()
// below.
bool
CMSCollector
::
update_should_unload_classes
()
{
_should_unload_classes
=
false
;
// Condition 1 above
if
(
_full_gc_requested
&&
ExplicitGCInvokesConcurrentAndUnloadsClasses
)
{
_should_unload_classes
=
true
;
}
else
if
(
CMSClassUnloadingEnabled
)
{
// Condition 2.a above
// Disjuncts 2.b.(i,ii,iii) above
_should_unload_classes
=
(
concurrent_cycles_since_last_unload
()
>=
CMSClassUnloadingMaxInterval
)
||
_permGen
->
should_concurrent_collect
()
||
_cmsGen
->
is_too_full
();
}
return
_should_unload_classes
;
}
bool
ConcurrentMarkSweepGeneration
::
is_too_full
()
const
{
bool
res
=
should_concurrent_collect
();
res
=
res
&&
(
occupancy
()
>
(
double
)
CMSIsTooFullPercentage
/
100.0
);
return
res
;
}
void
CMSCollector
::
setup_cms_unloading_and_verification_state
()
{
const
bool
should_verify
=
VerifyBeforeGC
||
VerifyAfterGC
||
VerifyDuringGC
||
VerifyBeforeExit
;
const
int
rso
=
SharedHeap
::
SO_Symbols
|
SharedHeap
::
SO_Strings
|
SharedHeap
::
SO_CodeCache
;
if
(
cms_
should_unload_classes
())
{
// Should unload classes this cycle
if
(
should_unload_classes
())
{
// Should unload classes this cycle
remove_root_scanning_option
(
rso
);
// Shrink the root set appropriately
set_verifying
(
should_verify
);
// Set verification state for this cycle
return
;
// Nothing else needs to be done at this time
}
// Not unloading classes this cycle
assert
(
!
cms_
should_unload_classes
(),
"Inconsitency!"
);
if
((
!
verifying
()
||
cms_
unloaded_classes_last_cycle
())
&&
should_verify
)
{
assert
(
!
should_unload_classes
(),
"Inconsitency!"
);
if
((
!
verifying
()
||
unloaded_classes_last_cycle
())
&&
should_verify
)
{
// We were not verifying, or we _were_ unloading classes in the last cycle,
// AND some verification options are enabled this cycle; in this case,
// we must make sure that the deadness map is allocated if not already so,
...
...
@@ -4693,7 +4753,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
GenCollectedHeap
*
gch
=
GenCollectedHeap
::
heap
();
if
(
cms_
should_unload_classes
())
{
if
(
should_unload_classes
())
{
CodeCache
::
gc_prologue
();
}
assert
(
haveFreelistLocks
(),
"must have free list locks"
);
...
...
@@ -4753,7 +4813,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_work_stacks_empty
();
verify_overflow_empty
();
if
(
cms_
should_unload_classes
())
{
if
(
should_unload_classes
())
{
CodeCache
::
gc_epilogue
();
}
...
...
@@ -5623,7 +5683,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
verify_work_stacks_empty
();
}
if
(
cms_
should_unload_classes
())
{
if
(
should_unload_classes
())
{
{
TraceTime
t
(
"class unloading"
,
PrintGCDetails
,
false
,
gclog_or_tty
);
...
...
@@ -5726,7 +5786,7 @@ void CMSCollector::sweep(bool asynch) {
// this cycle, we preserve the perm gen object "deadness" information
// in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects.
if
(
verifying
()
&&
!
cms_
should_unload_classes
())
{
if
(
verifying
()
&&
!
should_unload_classes
())
{
assert
(
perm_gen_verify_bit_map
()
->
sizeInBits
()
!=
0
,
"Should have already been allocated"
);
MarkDeadObjectsClosure
mdo
(
this
,
_permGen
->
cmsSpace
(),
...
...
@@ -5753,7 +5813,7 @@ void CMSCollector::sweep(bool asynch) {
}
// Now repeat for perm gen
if
(
cms_
should_unload_classes
())
{
if
(
should_unload_classes
())
{
CMSTokenSyncWithLocks
ts
(
true
,
_permGen
->
freelistLock
(),
bitMapLock
());
sweepWork
(
_permGen
,
asynch
);
...
...
@@ -5775,7 +5835,7 @@ void CMSCollector::sweep(bool asynch) {
// already have needed locks
sweepWork
(
_cmsGen
,
asynch
);
if
(
cms_
should_unload_classes
())
{
if
(
should_unload_classes
())
{
sweepWork
(
_permGen
,
asynch
);
}
// Update heap occupancy information which is used as
...
...
@@ -5937,6 +5997,11 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
}
gen
->
cmsSpace
()
->
sweep_completed
();
gen
->
cmsSpace
()
->
endSweepFLCensus
(
sweepCount
());
if
(
should_unload_classes
())
{
// unloaded classes this cycle,
_concurrent_cycles_since_last_unload
=
0
;
// ... reset count
}
else
{
// did not unload classes,
_concurrent_cycles_since_last_unload
++
;
// ... increment count
}
}
// Reset CMS data structures (for now just the marking bit map)
...
...
@@ -7194,7 +7259,7 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
_revisitStack
(
revisitStack
),
_finger
(
finger
),
_parent
(
parent
),
_should_remember_klasses
(
collector
->
cms_
should_unload_classes
())
_should_remember_klasses
(
collector
->
should_unload_classes
())
{
}
Par_PushOrMarkClosure
::
Par_PushOrMarkClosure
(
CMSCollector
*
collector
,
...
...
@@ -7217,7 +7282,7 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
_finger
(
finger
),
_global_finger_addr
(
global_finger_addr
),
_parent
(
parent
),
_should_remember_klasses
(
collector
->
cms_
should_unload_classes
())
_should_remember_klasses
(
collector
->
should_unload_classes
())
{
}
...
...
@@ -7360,7 +7425,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
_mark_stack
(
mark_stack
),
_revisit_stack
(
revisit_stack
),
_concurrent_precleaning
(
concurrent_precleaning
),
_should_remember_klasses
(
collector
->
cms_
should_unload_classes
())
_should_remember_klasses
(
collector
->
should_unload_classes
())
{
assert
(
_ref_processor
!=
NULL
,
"_ref_processor shouldn't be NULL"
);
}
...
...
@@ -7422,7 +7487,7 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
_bit_map
(
bit_map
),
_work_queue
(
work_queue
),
_revisit_stack
(
revisit_stack
),
_should_remember_klasses
(
collector
->
cms_
should_unload_classes
())
_should_remember_klasses
(
collector
->
should_unload_classes
())
{
assert
(
_ref_processor
!=
NULL
,
"_ref_processor shouldn't be NULL"
);
}
...
...
@@ -7944,7 +8009,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
#ifdef DEBUG
if
(
oop
(
addr
)
->
klass
()
!=
NULL
&&
(
!
_collector
->
cms_
should_unload_classes
()
(
!
_collector
->
should_unload_classes
()
||
oop
(
addr
)
->
is_parsable
()))
{
// Ignore mark word because we are running concurrent with mutators
assert
(
oop
(
addr
)
->
is_oop
(
true
),
"live block should be an oop"
);
...
...
@@ -7957,7 +8022,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
}
else
{
// This should be an initialized object that's alive.
assert
(
oop
(
addr
)
->
klass
()
!=
NULL
&&
(
!
_collector
->
cms_
should_unload_classes
()
(
!
_collector
->
should_unload_classes
()
||
oop
(
addr
)
->
is_parsable
()),
"Should be an initialized object"
);
// Ignore mark word because we are running concurrent with mutators
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
0ce87a38
...
...
@@ -535,13 +535,16 @@ class CMSCollector: public CHeapObj {
// In support of ExplicitGCInvokesConcurrent
static
bool
_full_gc_requested
;
unsigned
int
_collection_count_start
;
// Should we unload classes this concurrent cycle?
// Set in response to a concurrent full gc request.
bool
_unload_classes
;
bool
_unloaded_classes_last_cycle
;
bool
_should_unload_classes
;
unsigned
int
_concurrent_cycles_since_last_unload
;
unsigned
int
concurrent_cycles_since_last_unload
()
const
{
return
_concurrent_cycles_since_last_unload
;
}
// Did we (allow) unload classes in the previous concurrent cycle?
bool
cms_
unloaded_classes_last_cycle
()
const
{
return
_unloaded_classes_last_cycle
||
CMSClassUnloadingEnabled
;
bool
unloaded_classes_last_cycle
()
const
{
return
concurrent_cycles_since_last_unload
()
==
0
;
}
// Verification support
...
...
@@ -651,8 +654,6 @@ class CMSCollector: public CHeapObj {
// number of full gc's since the last concurrent gc.
uint
_full_gcs_since_conc_gc
;
// if occupancy exceeds this, start a new gc cycle
double
_initiatingOccupancy
;
// occupancy used for bootstrapping stats
double
_bootstrap_occupancy
;
...
...
@@ -825,7 +826,6 @@ class CMSCollector: public CHeapObj {
Mutex
*
bitMapLock
()
const
{
return
_markBitMap
.
lock
();
}
static
CollectorState
abstract_state
()
{
return
_collectorState
;
}
double
initiatingOccupancy
()
const
{
return
_initiatingOccupancy
;
}
bool
should_abort_preclean
()
const
;
// Whether preclean should be aborted.
size_t
get_eden_used
()
const
;
...
...
@@ -849,11 +849,10 @@ class CMSCollector: public CHeapObj {
// In support of ExplicitGCInvokesConcurrent
static
void
request_full_gc
(
unsigned
int
full_gc_count
);
// Should we unload classes in a particular concurrent cycle?
bool
cms_should_unload_classes
()
const
{
assert
(
!
_unload_classes
||
ExplicitGCInvokesConcurrentAndUnloadsClasses
,
"Inconsistency; see CR 6541037"
);
return
_unload_classes
||
CMSClassUnloadingEnabled
;
bool
should_unload_classes
()
const
{
return
_should_unload_classes
;
}
bool
update_should_unload_classes
();
void
direct_allocated
(
HeapWord
*
start
,
size_t
size
);
...
...
@@ -1022,6 +1021,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
_incremental_collection_failed
=
false
;
}
// accessors
void
set_expansion_cause
(
CMSExpansionCause
::
Cause
v
)
{
_expansion_cause
=
v
;}
CMSExpansionCause
::
Cause
expansion_cause
()
const
{
return
_expansion_cause
;
}
private:
// For parallel young-gen GC support.
CMSParGCThreadState
**
_par_gc_thread_states
;
...
...
@@ -1029,10 +1032,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Reason generation was expanded
CMSExpansionCause
::
Cause
_expansion_cause
;
// accessors
void
set_expansion_cause
(
CMSExpansionCause
::
Cause
v
)
{
_expansion_cause
=
v
;}
CMSExpansionCause
::
Cause
expansion_cause
()
{
return
_expansion_cause
;
}
// In support of MinChunkSize being larger than min object size
const
double
_dilatation_factor
;
...
...
@@ -1045,6 +1044,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
CollectionTypes
_debug_collection_type
;
// Fraction of current occupancy at which to start a CMS collection which
// will collect this generation (at least).
double
_initiating_occupancy
;
protected:
// Grow generation by specified size (returns false if unable to grow)
bool
grow_by
(
size_t
bytes
);
...
...
@@ -1060,6 +1063,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// space.
size_t
max_available
()
const
;
// getter and initializer for _initiating_occupancy field.
double
initiating_occupancy
()
const
{
return
_initiating_occupancy
;
}
void
init_initiating_occupancy
(
intx
io
,
intx
tr
);
public:
ConcurrentMarkSweepGeneration
(
ReservedSpace
rs
,
size_t
initial_byte_size
,
int
level
,
CardTableRS
*
ct
,
...
...
@@ -1103,7 +1110,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
size_t
capacity
()
const
;
size_t
used
()
const
;
size_t
free
()
const
;
double
occupancy
()
{
return
((
double
)
used
())
/
((
double
)
capacity
());
}
double
occupancy
()
const
{
return
((
double
)
used
())
/
((
double
)
capacity
());
}
size_t
contiguous_available
()
const
;
size_t
unsafe_max_alloc_nogc
()
const
;
...
...
@@ -1158,8 +1165,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
bool
younger_handles_promotion_failure
)
const
;
bool
should_collect
(
bool
full
,
size_t
size
,
bool
tlab
);
// XXXPERM
bool
shouldConcurrentCollect
(
double
initiatingOccupancy
);
// XXXPERM
virtual
bool
should_concurrent_collect
()
const
;
virtual
bool
is_too_full
()
const
;
void
collect
(
bool
full
,
bool
clear_all_soft_refs
,
size_t
size
,
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
浏览文件 @
0ce87a38
...
...
@@ -267,7 +267,7 @@ inline bool CMSCollector::is_dead_obj(oop obj) const {
(
_permGen
->
cmsSpace
()
->
is_in_reserved
(
addr
)
&&
_permGen
->
cmsSpace
()
->
block_is_obj
(
addr
)),
"must be object"
);
return
cms_
should_unload_classes
()
&&
return
should_unload_classes
()
&&
_collectorState
==
Sweeping
&&
!
_markBitMap
.
isMarked
(
addr
);
}
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
0ce87a38
...
...
@@ -1319,6 +1319,10 @@ class CommandLineFlags {
product(bool, CMSClassUnloadingEnabled, false, \
"Whether class unloading enabled when using CMS GC") \
\
product(uintx, CMSClassUnloadingMaxInterval, 0, \
"When CMS class unloading is enabled, the maximum CMS cycle count"\
" for which classes may not be unloaded") \
\
product(bool, CMSCompactWhenClearAllSoftRefs, true, \
"Compact when asked to collect CMS gen with clear_all_soft_refs") \
\
...
...
@@ -1504,17 +1508,30 @@ class CommandLineFlags {
"Percentage of MinHeapFreeRatio in CMS generation that is " \
" allocated before a CMS collection cycle commences") \
\
product(intx, CMSBootstrapOccupancy, 50, \
product(intx, CMSTriggerPermRatio, 80, \
"Percentage of MinHeapFreeRatio in the CMS perm generation that" \
" is allocated before a CMS collection cycle commences, that " \
" also collects the perm generation") \
\
product(uintx, CMSBootstrapOccupancy, 50, \
"Percentage CMS generation occupancy at which to " \
" initiate CMS collection for bootstrapping collection stats") \
\
product(intx, CMSInitiatingOccupancyFraction, -1, \
"Percentage CMS generation occupancy to start a CMS collection " \
" cycle (A negative value means that CMSTirggerRatio is used)") \
" cycle (A negative value means that CMSTriggerRatio is used)") \
\
product(intx, CMSInitiatingPermOccupancyFraction, -1, \
"Percentage CMS perm generation occupancy to start a CMScollection"\
" cycle (A negative value means that CMSTriggerPermRatio is used)")\
\
product(bool, UseCMSInitiatingOccupancyOnly, false, \
"Only use occupancy as a crierion for starting a CMS collection") \
\
product(intx, CMSIsTooFullPercentage, 98, \
"An absolute ceiling above which CMS will always consider the" \
" perm gen ripe for collection") \
\
develop(bool, CMSTestInFreeList, false, \
"Check if the coalesced range is already in the " \
"free lists as claimed.") \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录