提交 f903d5a1 编写于 作者: Y ysr

6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from...

6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
Summary: The option CMSInitiatingPermOccupancyFraction now controls perm triggering threshold. Even though the actual value of the threshold has not yet been changed, so there is no change in policy, we now have the infrastructure in place for dynamically deciding when to collect the perm gen, an issue that will be addressed in the near future.
Reviewed-by: jmasa
上级 b2c5d55f
...@@ -225,6 +225,34 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( ...@@ -225,6 +225,34 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
assert(_dilatation_factor >= 1.0, "from previous assert"); assert(_dilatation_factor >= 1.0, "from previous assert");
} }
// The field "_initiating_occupancy" represents the occupancy percentage
// at which we trigger a new collection cycle. Unless explicitly specified
// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
// is calculated by:
//
// Let "f" be MinHeapFreeRatio in
//
// _intiating_occupancy = 100-f +
// f * (CMSTrigger[Perm]Ratio/100)
// where CMSTrigger[Perm]Ratio is the argument "tr" below.
//
// That is, if we assume the heap is at its desired maximum occupancy at the
// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
// space be allocated before initiating a new collection cycle.
//
void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
if (io >= 0) {
_initiating_occupancy = (double)io / 100.0;
} else {
_initiating_occupancy = ((100 - MinHeapFreeRatio) +
(double)(tr * MinHeapFreeRatio) / 100.0)
/ 100.0;
}
}
void ConcurrentMarkSweepGeneration::ref_processor_init() { void ConcurrentMarkSweepGeneration::ref_processor_init() {
assert(collector() != NULL, "no collector"); assert(collector() != NULL, "no collector");
collector()->ref_processor_init(); collector()->ref_processor_init();
...@@ -520,8 +548,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -520,8 +548,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false), _completed_initialization(false),
_collector_policy(cp), _collector_policy(cp),
_unload_classes(false), _should_unload_classes(false),
_unloaded_classes_last_cycle(false), _concurrent_cycles_since_last_unload(0),
_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
{ {
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
...@@ -642,26 +670,11 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -642,26 +670,11 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
} }
} }
// "initiatingOccupancy" is the occupancy ratio at which we trigger _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
// a new collection cycle. Unless explicitly specified via _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
// CMSTriggerRatio, it is calculated by:
// Let "f" be MinHeapFreeRatio in
//
// intiatingOccupancy = 100-f +
// f * (CMSTriggerRatio/100)
// That is, if we assume the heap is at its desired maximum occupancy at the
// end of a collection, we let CMSTriggerRatio of the (purported) free
// space be allocated before initiating a new collection cycle.
if (CMSInitiatingOccupancyFraction > 0) {
_initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
} else {
_initiatingOccupancy = ((100 - MinHeapFreeRatio) +
(double)(CMSTriggerRatio *
MinHeapFreeRatio) / 100.0)
/ 100.0;
}
// Clip CMSBootstrapOccupancy between 0 and 100. // Clip CMSBootstrapOccupancy between 0 and 100.
_bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy))) _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
/(double)100; /(double)100;
_full_gcs_since_conc_gc = 0; _full_gcs_since_conc_gc = 0;
...@@ -1413,7 +1426,8 @@ bool CMSCollector::shouldConcurrentCollect() { ...@@ -1413,7 +1426,8 @@ bool CMSCollector::shouldConcurrentCollect() {
gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy()); gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
...@@ -1446,22 +1460,36 @@ bool CMSCollector::shouldConcurrentCollect() { ...@@ -1446,22 +1460,36 @@ bool CMSCollector::shouldConcurrentCollect() {
// old gen want a collection cycle started. Each may use // old gen want a collection cycle started. Each may use
// an appropriate criterion for making this decision. // an appropriate criterion for making this decision.
// XXX We need to make sure that the gen expansion // XXX We need to make sure that the gen expansion
// criterion dovetails well with this. // criterion dovetails well with this. XXX NEED TO FIX THIS
if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) { if (_cmsGen->should_concurrent_collect()) {
if (Verbose && PrintGCDetails) { if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS old gen initiated"); gclog_or_tty->print_cr("CMS old gen initiated");
} }
return true; return true;
} }
if (cms_should_unload_classes() && // We start a collection if we believe an incremental collection may fail;
_permGen->shouldConcurrentCollect(initiatingOccupancy())) { // this is not likely to be productive in practice because it's probably too
if (Verbose && PrintGCDetails) { // late anyway.
gclog_or_tty->print_cr("CMS perm gen initiated"); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->collector_policy()->is_two_generation_policy(),
"You may want to check the correctness of the following");
if (gch->incremental_collection_will_fail()) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
} }
return true; return true;
} }
if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
bool res = update_should_unload_classes();
if (res) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS perm gen initiated");
}
return true;
}
}
return false; return false;
} }
...@@ -1471,32 +1499,36 @@ void CMSCollector::clear_expansion_cause() { ...@@ -1471,32 +1499,36 @@ void CMSCollector::clear_expansion_cause() {
_permGen->clear_expansion_cause(); _permGen->clear_expansion_cause();
} }
bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect( // We should be conservative in starting a collection cycle. To
double initiatingOccupancy) { // start too eagerly runs the risk of collecting too often in the
// We should be conservative in starting a collection cycle. To // extreme. To collect too rarely falls back on full collections,
// start too eagerly runs the risk of collecting too often in the // which works, even if not optimum in terms of concurrent work.
// extreme. To collect too rarely falls back on full collections, // As a work around for too eagerly collecting, use the flag
// which works, even if not optimum in terms of concurrent work. // UseCMSInitiatingOccupancyOnly. This also has the advantage of
// As a work around for too eagerly collecting, use the flag // giving the user an easily understandable way of controlling the
// UseCMSInitiatingOccupancyOnly. This also has the advantage of // collections.
// giving the user an easily understandable way of controlling the // We want to start a new collection cycle if any of the following
// collections. // conditions hold:
// We want to start a new collection cycle if any of the following // . our current occupancy exceeds the configured initiating occupancy
// conditions hold: // for this generation, or
// . our current occupancy exceeds the initiating occupancy, or // . we recently needed to expand this space and have not, since that
// . we recently needed to expand and have not since that expansion, // expansion, done a collection of this generation, or
// collected, or // . the underlying space believes that it may be a good idea to initiate
// . we are not using adaptive free lists and linear allocation is // a concurrent collection (this may be based on criteria such as the
// going to fail, or // following: the space uses linear allocation and linear allocation is
// . (for old gen) incremental collection has already failed or // going to fail, or there is believed to be excessive fragmentation in
// may soon fail in the near future as we may not be able to absorb // the generation, etc... or ...
// promotions. // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
assert_lock_strong(freelistLock()); // the case of the old generation, not the perm generation; see CR 6543076):
// we may be approaching a point at which allocation requests may fail because
// we will be out of sufficient free space given allocation rate estimates.]
bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
if (occupancy() > initiatingOccupancy) { assert_lock_strong(freelistLock());
if (occupancy() > initiating_occupancy()) {
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
short_name(), occupancy(), initiatingOccupancy); short_name(), occupancy(), initiating_occupancy());
} }
return true; return true;
} }
...@@ -1510,20 +1542,9 @@ bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect( ...@@ -1510,20 +1542,9 @@ bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
} }
return true; return true;
} }
GenCollectedHeap* gch = GenCollectedHeap::heap(); if (_cmsSpace->should_concurrent_collect()) {
assert(gch->collector_policy()->is_two_generation_policy(),
"You may want to check the correctness of the following");
if (gch->incremental_collection_will_fail()) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because incremental collection will fail ",
short_name());
}
return true;
}
if (!_cmsSpace->adaptive_freelists() &&
_cmsSpace->linearAllocationWouldFail()) {
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because of linAB ", gclog_or_tty->print(" %s: collect because cmsSpace says so ",
short_name()); short_name());
} }
return true; return true;
...@@ -1970,8 +1991,9 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { ...@@ -1970,8 +1991,9 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
"Should have been NULL'd before baton was passed"); "Should have been NULL'd before baton was passed");
reset(false /* == !asynch */); reset(false /* == !asynch */);
_cmsGen->reset_after_compaction(); _cmsGen->reset_after_compaction();
_concurrent_cycles_since_last_unload = 0;
if (verifying() && !cms_should_unload_classes()) { if (verifying() && !should_unload_classes()) {
perm_gen_verify_bit_map()->clear_all(); perm_gen_verify_bit_map()->clear_all();
} }
...@@ -2098,6 +2120,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) { ...@@ -2098,6 +2120,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
{ {
bool safepoint_check = Mutex::_no_safepoint_check_flag; bool safepoint_check = Mutex::_no_safepoint_check_flag;
MutexLockerEx hl(Heap_lock, safepoint_check); MutexLockerEx hl(Heap_lock, safepoint_check);
FreelistLocker fll(this);
MutexLockerEx x(CGC_lock, safepoint_check); MutexLockerEx x(CGC_lock, safepoint_check);
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
// The foreground collector is active or we're // The foreground collector is active or we're
...@@ -2112,13 +2135,9 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) { ...@@ -2112,13 +2135,9 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
// a new cycle. // a new cycle.
clear_expansion_cause(); clear_expansion_cause();
} }
_unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle // Decide if we want to enable class unloading as part of the
// This controls class unloading in response to an explicit gc request. // ensuing concurrent GC cycle.
// If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then update_should_unload_classes();
// we will unload classes even if CMSClassUnloadingEnabled is not set.
// See CR 6541037 and related CRs.
_unload_classes = _full_gc_requested // ... for this cycle
&& ExplicitGCInvokesConcurrentAndUnloadsClasses;
_full_gc_requested = false; // acks all outstanding full gc requests _full_gc_requested = false; // acks all outstanding full gc requests
// Signal that we are about to start a collection // Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle gch->increment_total_full_collections(); // ... starting a collection cycle
...@@ -3047,21 +3066,62 @@ void CMSCollector::verify_overflow_empty() const { ...@@ -3047,21 +3066,62 @@ void CMSCollector::verify_overflow_empty() const {
} }
#endif // PRODUCT #endif // PRODUCT
// Decide if we want to enable class unloading as part of the
// ensuing concurrent GC cycle. We will collect the perm gen and
// unload classes if it's the case that:
// (1) an explicit gc request has been made and the flag
// ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
// (2) (a) class unloading is enabled at the command line, and
// (b) (i) perm gen threshold has been crossed, or
// (ii) old gen is getting really full, or
// (iii) the previous N CMS collections did not collect the
// perm gen
// NOTE: Provided there is no change in the state of the heap between
// calls to this method, it should have idempotent results. Moreover,
// its results should be monotonically increasing (i.e. going from 0 to 1,
// but not 1 to 0) between successive calls between which the heap was
// not collected. For the implementation below, it must thus rely on
// the property that concurrent_cycles_since_last_unload()
// will not decrease unless a collection cycle happened and that
// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
// themselves also monotonic in that sense. See check_monotonicity()
// below.
bool CMSCollector::update_should_unload_classes() {
_should_unload_classes = false;
// Condition 1 above
if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
_should_unload_classes = true;
} else if (CMSClassUnloadingEnabled) { // Condition 2.a above
// Disjuncts 2.b.(i,ii,iii) above
_should_unload_classes = (concurrent_cycles_since_last_unload() >=
CMSClassUnloadingMaxInterval)
|| _permGen->should_concurrent_collect()
|| _cmsGen->is_too_full();
}
return _should_unload_classes;
}
bool ConcurrentMarkSweepGeneration::is_too_full() const {
bool res = should_concurrent_collect();
res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
return res;
}
void CMSCollector::setup_cms_unloading_and_verification_state() { void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit; || VerifyBeforeExit;
const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
| SharedHeap::SO_CodeCache; | SharedHeap::SO_CodeCache;
if (cms_should_unload_classes()) { // Should unload classes this cycle if (should_unload_classes()) { // Should unload classes this cycle
remove_root_scanning_option(rso); // Shrink the root set appropriately remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time return; // Nothing else needs to be done at this time
} }
// Not unloading classes this cycle // Not unloading classes this cycle
assert(!cms_should_unload_classes(), "Inconsitency!"); assert(!should_unload_classes(), "Inconsitency!");
if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) { if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
// We were not verifying, or we _were_ unloading classes in the last cycle, // We were not verifying, or we _were_ unloading classes in the last cycle,
// AND some verification options are enabled this cycle; in this case, // AND some verification options are enabled this cycle; in this case,
// we must make sure that the deadness map is allocated if not already so, // we must make sure that the deadness map is allocated if not already so,
...@@ -4693,7 +4753,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, ...@@ -4693,7 +4753,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
if (cms_should_unload_classes()) { if (should_unload_classes()) {
CodeCache::gc_prologue(); CodeCache::gc_prologue();
} }
assert(haveFreelistLocks(), "must have free list locks"); assert(haveFreelistLocks(), "must have free list locks");
...@@ -4753,7 +4813,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, ...@@ -4753,7 +4813,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
if (cms_should_unload_classes()) { if (should_unload_classes()) {
CodeCache::gc_epilogue(); CodeCache::gc_epilogue();
} }
...@@ -5623,7 +5683,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { ...@@ -5623,7 +5683,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
verify_work_stacks_empty(); verify_work_stacks_empty();
} }
if (cms_should_unload_classes()) { if (should_unload_classes()) {
{ {
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
...@@ -5726,7 +5786,7 @@ void CMSCollector::sweep(bool asynch) { ...@@ -5726,7 +5786,7 @@ void CMSCollector::sweep(bool asynch) {
// this cycle, we preserve the perm gen object "deadness" information // this cycle, we preserve the perm gen object "deadness" information
// in the perm_gen_verify_bit_map. In order to do that we traverse // in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects. // all blocks in perm gen and mark all dead objects.
if (verifying() && !cms_should_unload_classes()) { if (verifying() && !should_unload_classes()) {
assert(perm_gen_verify_bit_map()->sizeInBits() != 0, assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
"Should have already been allocated"); "Should have already been allocated");
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
...@@ -5753,7 +5813,7 @@ void CMSCollector::sweep(bool asynch) { ...@@ -5753,7 +5813,7 @@ void CMSCollector::sweep(bool asynch) {
} }
// Now repeat for perm gen // Now repeat for perm gen
if (cms_should_unload_classes()) { if (should_unload_classes()) {
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
bitMapLock()); bitMapLock());
sweepWork(_permGen, asynch); sweepWork(_permGen, asynch);
...@@ -5775,7 +5835,7 @@ void CMSCollector::sweep(bool asynch) { ...@@ -5775,7 +5835,7 @@ void CMSCollector::sweep(bool asynch) {
// already have needed locks // already have needed locks
sweepWork(_cmsGen, asynch); sweepWork(_cmsGen, asynch);
if (cms_should_unload_classes()) { if (should_unload_classes()) {
sweepWork(_permGen, asynch); sweepWork(_permGen, asynch);
} }
// Update heap occupancy information which is used as // Update heap occupancy information which is used as
...@@ -5937,6 +5997,11 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, ...@@ -5937,6 +5997,11 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
} }
gen->cmsSpace()->sweep_completed(); gen->cmsSpace()->sweep_completed();
gen->cmsSpace()->endSweepFLCensus(sweepCount()); gen->cmsSpace()->endSweepFLCensus(sweepCount());
if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes,
_concurrent_cycles_since_last_unload++; // ... increment count
}
} }
// Reset CMS data structures (for now just the marking bit map) // Reset CMS data structures (for now just the marking bit map)
...@@ -7194,7 +7259,7 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, ...@@ -7194,7 +7259,7 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
_revisitStack(revisitStack), _revisitStack(revisitStack),
_finger(finger), _finger(finger),
_parent(parent), _parent(parent),
_should_remember_klasses(collector->cms_should_unload_classes()) _should_remember_klasses(collector->should_unload_classes())
{ } { }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
...@@ -7217,7 +7282,7 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, ...@@ -7217,7 +7282,7 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
_finger(finger), _finger(finger),
_global_finger_addr(global_finger_addr), _global_finger_addr(global_finger_addr),
_parent(parent), _parent(parent),
_should_remember_klasses(collector->cms_should_unload_classes()) _should_remember_klasses(collector->should_unload_classes())
{ } { }
...@@ -7360,7 +7425,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, ...@@ -7360,7 +7425,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
_mark_stack(mark_stack), _mark_stack(mark_stack),
_revisit_stack(revisit_stack), _revisit_stack(revisit_stack),
_concurrent_precleaning(concurrent_precleaning), _concurrent_precleaning(concurrent_precleaning),
_should_remember_klasses(collector->cms_should_unload_classes()) _should_remember_klasses(collector->should_unload_classes())
{ {
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
} }
...@@ -7422,7 +7487,7 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, ...@@ -7422,7 +7487,7 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
_bit_map(bit_map), _bit_map(bit_map),
_work_queue(work_queue), _work_queue(work_queue),
_revisit_stack(revisit_stack), _revisit_stack(revisit_stack),
_should_remember_klasses(collector->cms_should_unload_classes()) _should_remember_klasses(collector->should_unload_classes())
{ {
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
} }
...@@ -7944,7 +8009,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -7944,7 +8009,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
#ifdef DEBUG #ifdef DEBUG
if (oop(addr)->klass() != NULL && if (oop(addr)->klass() != NULL &&
( !_collector->cms_should_unload_classes() ( !_collector->should_unload_classes()
|| oop(addr)->is_parsable())) { || oop(addr)->is_parsable())) {
// Ignore mark word because we are running concurrent with mutators // Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop"); assert(oop(addr)->is_oop(true), "live block should be an oop");
...@@ -7957,7 +8022,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -7957,7 +8022,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
} else { } else {
// This should be an initialized object that's alive. // This should be an initialized object that's alive.
assert(oop(addr)->klass() != NULL && assert(oop(addr)->klass() != NULL &&
(!_collector->cms_should_unload_classes() (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()), || oop(addr)->is_parsable()),
"Should be an initialized object"); "Should be an initialized object");
// Ignore mark word because we are running concurrent with mutators // Ignore mark word because we are running concurrent with mutators
......
...@@ -535,13 +535,16 @@ class CMSCollector: public CHeapObj { ...@@ -535,13 +535,16 @@ class CMSCollector: public CHeapObj {
// In support of ExplicitGCInvokesConcurrent // In support of ExplicitGCInvokesConcurrent
static bool _full_gc_requested; static bool _full_gc_requested;
unsigned int _collection_count_start; unsigned int _collection_count_start;
// Should we unload classes this concurrent cycle? // Should we unload classes this concurrent cycle?
// Set in response to a concurrent full gc request. bool _should_unload_classes;
bool _unload_classes; unsigned int _concurrent_cycles_since_last_unload;
bool _unloaded_classes_last_cycle; unsigned int concurrent_cycles_since_last_unload() const {
return _concurrent_cycles_since_last_unload;
}
// Did we (allow) unload classes in the previous concurrent cycle? // Did we (allow) unload classes in the previous concurrent cycle?
bool cms_unloaded_classes_last_cycle() const { bool unloaded_classes_last_cycle() const {
return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled; return concurrent_cycles_since_last_unload() == 0;
} }
// Verification support // Verification support
...@@ -651,8 +654,6 @@ class CMSCollector: public CHeapObj { ...@@ -651,8 +654,6 @@ class CMSCollector: public CHeapObj {
// number of full gc's since the last concurrent gc. // number of full gc's since the last concurrent gc.
uint _full_gcs_since_conc_gc; uint _full_gcs_since_conc_gc;
// if occupancy exceeds this, start a new gc cycle
double _initiatingOccupancy;
// occupancy used for bootstrapping stats // occupancy used for bootstrapping stats
double _bootstrap_occupancy; double _bootstrap_occupancy;
...@@ -825,7 +826,6 @@ class CMSCollector: public CHeapObj { ...@@ -825,7 +826,6 @@ class CMSCollector: public CHeapObj {
Mutex* bitMapLock() const { return _markBitMap.lock(); } Mutex* bitMapLock() const { return _markBitMap.lock(); }
static CollectorState abstract_state() { return _collectorState; } static CollectorState abstract_state() { return _collectorState; }
double initiatingOccupancy() const { return _initiatingOccupancy; }
bool should_abort_preclean() const; // Whether preclean should be aborted. bool should_abort_preclean() const; // Whether preclean should be aborted.
size_t get_eden_used() const; size_t get_eden_used() const;
...@@ -849,11 +849,10 @@ class CMSCollector: public CHeapObj { ...@@ -849,11 +849,10 @@ class CMSCollector: public CHeapObj {
// In support of ExplicitGCInvokesConcurrent // In support of ExplicitGCInvokesConcurrent
static void request_full_gc(unsigned int full_gc_count); static void request_full_gc(unsigned int full_gc_count);
// Should we unload classes in a particular concurrent cycle? // Should we unload classes in a particular concurrent cycle?
bool cms_should_unload_classes() const { bool should_unload_classes() const {
assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses, return _should_unload_classes;
"Inconsistency; see CR 6541037");
return _unload_classes || CMSClassUnloadingEnabled;
} }
bool update_should_unload_classes();
void direct_allocated(HeapWord* start, size_t size); void direct_allocated(HeapWord* start, size_t size);
...@@ -1022,6 +1021,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1022,6 +1021,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
_incremental_collection_failed = false; _incremental_collection_failed = false;
} }
// accessors
void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
private: private:
// For parallel young-gen GC support. // For parallel young-gen GC support.
CMSParGCThreadState** _par_gc_thread_states; CMSParGCThreadState** _par_gc_thread_states;
...@@ -1029,10 +1032,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1029,10 +1032,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Reason generation was expanded // Reason generation was expanded
CMSExpansionCause::Cause _expansion_cause; CMSExpansionCause::Cause _expansion_cause;
// accessors
void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
// In support of MinChunkSize being larger than min object size // In support of MinChunkSize being larger than min object size
const double _dilatation_factor; const double _dilatation_factor;
...@@ -1045,6 +1044,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1045,6 +1044,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
CollectionTypes _debug_collection_type; CollectionTypes _debug_collection_type;
// Fraction of current occupancy at which to start a CMS collection which
// will collect this generation (at least).
double _initiating_occupancy;
protected: protected:
// Grow generation by specified size (returns false if unable to grow) // Grow generation by specified size (returns false if unable to grow)
bool grow_by(size_t bytes); bool grow_by(size_t bytes);
...@@ -1060,6 +1063,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1060,6 +1063,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// space. // space.
size_t max_available() const; size_t max_available() const;
// getter and initializer for _initiating_occupancy field.
double initiating_occupancy() const { return _initiating_occupancy; }
void init_initiating_occupancy(intx io, intx tr);
public: public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct, int level, CardTableRS* ct,
...@@ -1103,7 +1110,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1103,7 +1110,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
size_t capacity() const; size_t capacity() const;
size_t used() const; size_t used() const;
size_t free() const; size_t free() const;
double occupancy() { return ((double)used())/((double)capacity()); } double occupancy() const { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const; size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const; size_t unsafe_max_alloc_nogc() const;
...@@ -1158,8 +1165,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1158,8 +1165,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
bool younger_handles_promotion_failure) const; bool younger_handles_promotion_failure) const;
bool should_collect(bool full, size_t size, bool tlab); bool should_collect(bool full, size_t size, bool tlab);
// XXXPERM virtual bool should_concurrent_collect() const;
bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM virtual bool is_too_full() const;
void collect(bool full, void collect(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
......
...@@ -267,7 +267,7 @@ inline bool CMSCollector::is_dead_obj(oop obj) const { ...@@ -267,7 +267,7 @@ inline bool CMSCollector::is_dead_obj(oop obj) const {
(_permGen->cmsSpace()->is_in_reserved(addr) (_permGen->cmsSpace()->is_in_reserved(addr)
&& _permGen->cmsSpace()->block_is_obj(addr)), && _permGen->cmsSpace()->block_is_obj(addr)),
"must be object"); "must be object");
return cms_should_unload_classes() && return should_unload_classes() &&
_collectorState == Sweeping && _collectorState == Sweeping &&
!_markBitMap.isMarked(addr); !_markBitMap.isMarked(addr);
} }
......
...@@ -1319,6 +1319,10 @@ class CommandLineFlags { ...@@ -1319,6 +1319,10 @@ class CommandLineFlags {
product(bool, CMSClassUnloadingEnabled, false, \ product(bool, CMSClassUnloadingEnabled, false, \
"Whether class unloading enabled when using CMS GC") \ "Whether class unloading enabled when using CMS GC") \
\ \
product(uintx, CMSClassUnloadingMaxInterval, 0, \
"When CMS class unloading is enabled, the maximum CMS cycle count"\
" for which classes may not be unloaded") \
\
product(bool, CMSCompactWhenClearAllSoftRefs, true, \ product(bool, CMSCompactWhenClearAllSoftRefs, true, \
"Compact when asked to collect CMS gen with clear_all_soft_refs") \ "Compact when asked to collect CMS gen with clear_all_soft_refs") \
\ \
...@@ -1504,17 +1508,30 @@ class CommandLineFlags { ...@@ -1504,17 +1508,30 @@ class CommandLineFlags {
"Percentage of MinHeapFreeRatio in CMS generation that is " \ "Percentage of MinHeapFreeRatio in CMS generation that is " \
" allocated before a CMS collection cycle commences") \ " allocated before a CMS collection cycle commences") \
\ \
product(intx, CMSBootstrapOccupancy, 50, \ product(intx, CMSTriggerPermRatio, 80, \
"Percentage of MinHeapFreeRatio in the CMS perm generation that" \
" is allocated before a CMS collection cycle commences, that " \
" also collects the perm generation") \
\
product(uintx, CMSBootstrapOccupancy, 50, \
"Percentage CMS generation occupancy at which to " \ "Percentage CMS generation occupancy at which to " \
" initiate CMS collection for bootstrapping collection stats") \ " initiate CMS collection for bootstrapping collection stats") \
\ \
product(intx, CMSInitiatingOccupancyFraction, -1, \ product(intx, CMSInitiatingOccupancyFraction, -1, \
"Percentage CMS generation occupancy to start a CMS collection " \ "Percentage CMS generation occupancy to start a CMS collection " \
" cycle (A negative value means that CMSTirggerRatio is used)") \ " cycle (A negative value means that CMSTriggerRatio is used)") \
\
product(intx, CMSInitiatingPermOccupancyFraction, -1, \
"Percentage CMS perm generation occupancy to start a CMScollection"\
" cycle (A negative value means that CMSTriggerPermRatio is used)")\
\ \
product(bool, UseCMSInitiatingOccupancyOnly, false, \ product(bool, UseCMSInitiatingOccupancyOnly, false, \
"Only use occupancy as a crierion for starting a CMS collection") \ "Only use occupancy as a crierion for starting a CMS collection") \
\ \
product(intx, CMSIsTooFullPercentage, 98, \
"An absolute ceiling above which CMS will always consider the" \
" perm gen ripe for collection") \
\
develop(bool, CMSTestInFreeList, false, \ develop(bool, CMSTestInFreeList, false, \
"Check if the coalesced range is already in the " \ "Check if the coalesced range is already in the " \
"free lists as claimed.") \ "free lists as claimed.") \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册