提交 f12ebb89 编写于 作者: J johnc

Merge

...@@ -46,9 +46,9 @@ CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size, ...@@ -46,9 +46,9 @@ CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size,
_processor_count = os::active_processor_count(); _processor_count = os::active_processor_count();
if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) { if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
assert(_processor_count > 0, "Processor count is suspect"); assert(_processor_count > 0, "Processor count is suspect");
_concurrent_processor_count = MIN2((uint) ParallelCMSThreads, _concurrent_processor_count = MIN2((uint) ConcGCThreads,
(uint) _processor_count); (uint) _processor_count);
} else { } else {
_concurrent_processor_count = 1; _concurrent_processor_count = 1;
......
...@@ -606,7 +606,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -606,7 +606,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
} }
if (!_markStack.allocate(CMSMarkStackSize)) { if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CMS Marking Stack"); warning("Failed to allocate CMS Marking Stack");
return; return;
} }
...@@ -617,13 +617,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -617,13 +617,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for multi-threaded concurrent phases // Support for multi-threaded concurrent phases
if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ParallelCMSThreads)) { if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now // just for now
FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4); FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
} }
if (ParallelCMSThreads > 1) { if (ConcGCThreads > 1) {
_conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
ParallelCMSThreads, true); ConcGCThreads, true);
if (_conc_workers == NULL) { if (_conc_workers == NULL) {
warning("GC/CMS: _conc_workers allocation failure: " warning("GC/CMS: _conc_workers allocation failure: "
"forcing -CMSConcurrentMTEnabled"); "forcing -CMSConcurrentMTEnabled");
...@@ -634,13 +634,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -634,13 +634,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
} }
} }
if (!CMSConcurrentMTEnabled) { if (!CMSConcurrentMTEnabled) {
ParallelCMSThreads = 0; ConcGCThreads = 0;
} else { } else {
// Turn off CMSCleanOnEnter optimization temporarily for // Turn off CMSCleanOnEnter optimization temporarily for
// the MT case where it's not fixed yet; see 6178663. // the MT case where it's not fixed yet; see 6178663.
CMSCleanOnEnter = false; CMSCleanOnEnter = false;
} }
assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), assert((_conc_workers != NULL) == (ConcGCThreads > 1),
"Inconsistency"); "Inconsistency");
// Parallel task queues; these are shared for the // Parallel task queues; these are shared for the
...@@ -648,7 +648,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, ...@@ -648,7 +648,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// are not shared with parallel scavenge (ParNew). // are not shared with parallel scavenge (ParNew).
{ {
uint i; uint i;
uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads); uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
|| ParallelRefProcEnabled) || ParallelRefProcEnabled)
...@@ -3657,7 +3657,7 @@ bool CMSCollector::markFromRootsWork(bool asynch) { ...@@ -3657,7 +3657,7 @@ bool CMSCollector::markFromRootsWork(bool asynch) {
assert(_revisitStack.isEmpty(), "tabula rasa"); assert(_revisitStack.isEmpty(), "tabula rasa");
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
bool result = false; bool result = false;
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
result = do_marking_mt(asynch); result = do_marking_mt(asynch);
} else { } else {
result = do_marking_st(asynch); result = do_marking_st(asynch);
...@@ -4174,10 +4174,10 @@ void CMSConcMarkingTask::coordinator_yield() { ...@@ -4174,10 +4174,10 @@ void CMSConcMarkingTask::coordinator_yield() {
} }
bool CMSCollector::do_marking_mt(bool asynch) { bool CMSCollector::do_marking_mt(bool asynch) {
assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition"); assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
// In the future this would be determined ergonomically, based // In the future this would be determined ergonomically, based
// on #cpu's, # active mutator threads (and load), and mutation rate. // on #cpu's, # active mutator threads (and load), and mutation rate.
int num_workers = ParallelCMSThreads; int num_workers = ConcGCThreads;
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
...@@ -6429,8 +6429,8 @@ bool CMSMarkStack::allocate(size_t size) { ...@@ -6429,8 +6429,8 @@ bool CMSMarkStack::allocate(size_t size) {
// For now we take the expedient path of just disabling the // For now we take the expedient path of just disabling the
// messages for the problematic case.) // messages for the problematic case.)
void CMSMarkStack::expand() { void CMSMarkStack::expand() {
assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted"); assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
if (_capacity == CMSMarkStackSizeMax) { if (_capacity == MarkStackSizeMax) {
if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
// We print a warning message only once per CMS cycle. // We print a warning message only once per CMS cycle.
gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
...@@ -6438,7 +6438,7 @@ void CMSMarkStack::expand() { ...@@ -6438,7 +6438,7 @@ void CMSMarkStack::expand() {
return; return;
} }
// Double capacity if possible // Double capacity if possible
size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax); size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
// Do not give up existing stack until we have managed to // Do not give up existing stack until we have managed to
// get the double capacity that we desired. // get the double capacity that we desired.
ReservedSpace rs(ReservedSpace::allocation_align_size_up( ReservedSpace rs(ReservedSpace::allocation_align_size_up(
......
...@@ -44,20 +44,20 @@ ConcurrentG1Refine::ConcurrentG1Refine() : ...@@ -44,20 +44,20 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
{ {
// Ergomonically select initial concurrent refinement parameters // Ergomonically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1)); FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
} }
set_green_zone(G1ConcRefineGreenZone); set_green_zone(G1ConcRefinementGreenZone);
if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3); FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
} }
set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone())); set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2); FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
} }
set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone())); set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
_n_worker_threads = thread_num(); _n_worker_threads = thread_num();
// We need one extra thread to do the young gen rset size sampling. // We need one extra thread to do the young gen rset size sampling.
_n_threads = _n_worker_threads + 1; _n_threads = _n_worker_threads + 1;
...@@ -76,15 +76,15 @@ ConcurrentG1Refine::ConcurrentG1Refine() : ...@@ -76,15 +76,15 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
} }
void ConcurrentG1Refine::reset_threshold_step() { void ConcurrentG1Refine::reset_threshold_step() {
if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) { if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
_thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1); _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
} else { } else {
_thread_threshold_step = G1ConcRefineThresholdStep; _thread_threshold_step = G1ConcRefinementThresholdStep;
} }
} }
int ConcurrentG1Refine::thread_num() { int ConcurrentG1Refine::thread_num() {
return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1); return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
} }
void ConcurrentG1Refine::init() { void ConcurrentG1Refine::init() {
......
...@@ -39,7 +39,8 @@ class ConcurrentG1Refine: public CHeapObj { ...@@ -39,7 +39,8 @@ class ConcurrentG1Refine: public CHeapObj {
* running. If the length becomes red (max queue length) the mutators start * running. If the length becomes red (max queue length) the mutators start
* processing the buffers. * processing the buffers.
* *
* There are some interesting cases (with G1AdaptiveConcRefine turned off): * There are some interesting cases (when G1UseAdaptiveConcRefinement
* is turned off):
* 1) green = yellow = red = 0. In this case the mutator will process all * 1) green = yellow = red = 0. In this case the mutator will process all
* buffers. Except for those that are created by the deferred updates * buffers. Except for those that are created by the deferred updates
* machinery during a collection. * machinery during a collection.
......
...@@ -107,7 +107,7 @@ void ConcurrentG1RefineThread::run_young_rs_sampling() { ...@@ -107,7 +107,7 @@ void ConcurrentG1RefineThread::run_young_rs_sampling() {
if (_should_terminate) { if (_should_terminate) {
break; break;
} }
_monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval); _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
} }
} }
...@@ -127,7 +127,7 @@ bool ConcurrentG1RefineThread::is_active() { ...@@ -127,7 +127,7 @@ bool ConcurrentG1RefineThread::is_active() {
void ConcurrentG1RefineThread::activate() { void ConcurrentG1RefineThread::activate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (_worker_id > 0) { if (_worker_id > 0) {
if (G1TraceConcurrentRefinement) { if (G1TraceConcRefinement) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d", gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
_worker_id, _threshold, (int)dcqs.completed_buffers_num()); _worker_id, _threshold, (int)dcqs.completed_buffers_num());
...@@ -143,7 +143,7 @@ void ConcurrentG1RefineThread::activate() { ...@@ -143,7 +143,7 @@ void ConcurrentG1RefineThread::activate() {
void ConcurrentG1RefineThread::deactivate() { void ConcurrentG1RefineThread::deactivate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (_worker_id > 0) { if (_worker_id > 0) {
if (G1TraceConcurrentRefinement) { if (G1TraceConcRefinement) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d", gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
_worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num()); _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
...@@ -218,9 +218,13 @@ void ConcurrentG1RefineThread::run() { ...@@ -218,9 +218,13 @@ void ConcurrentG1RefineThread::run() {
void ConcurrentG1RefineThread::yield() { void ConcurrentG1RefineThread::yield() {
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-yield");
}
_sts.yield("G1 refine"); _sts.yield("G1 refine");
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-yield-end");
}
} }
void ConcurrentG1RefineThread::stop() { void ConcurrentG1RefineThread::stop() {
...@@ -241,7 +245,9 @@ void ConcurrentG1RefineThread::stop() { ...@@ -241,7 +245,9 @@ void ConcurrentG1RefineThread::stop() {
Terminator_lock->wait(); Terminator_lock->wait();
} }
} }
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-stop");
}
} }
void ConcurrentG1RefineThread::print() const { void ConcurrentG1RefineThread::print() const {
......
...@@ -447,7 +447,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, ...@@ -447,7 +447,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
"heap end = "PTR_FORMAT, _heap_start, _heap_end); "heap end = "PTR_FORMAT, _heap_start, _heap_end);
_markStack.allocate(G1MarkStackSize); _markStack.allocate(MarkStackSize);
_regionStack.allocate(G1MarkRegionStackSize); _regionStack.allocate(G1MarkRegionStackSize);
// Create & start a ConcurrentMark thread. // Create & start a ConcurrentMark thread.
...@@ -461,7 +461,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, ...@@ -461,7 +461,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBLogBufferSize); satb_qs.set_buffer_size(G1SATBBufferSize);
int size = (int) MAX2(ParallelGCThreads, (size_t)1); int size = (int) MAX2(ParallelGCThreads, (size_t)1);
_par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size); _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
...@@ -483,8 +483,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, ...@@ -483,8 +483,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_accum_task_vtime[i] = 0.0; _accum_task_vtime[i] = 0.0;
} }
if (ParallelMarkingThreads > ParallelGCThreads) { if (ConcGCThreads > ParallelGCThreads) {
vm_exit_during_initialization("Can't have more ParallelMarkingThreads " vm_exit_during_initialization("Can't have more ConcGCThreads "
"than ParallelGCThreads."); "than ParallelGCThreads.");
} }
if (ParallelGCThreads == 0) { if (ParallelGCThreads == 0) {
...@@ -494,11 +494,11 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, ...@@ -494,11 +494,11 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
} else { } else {
if (ParallelMarkingThreads > 0) { if (ConcGCThreads > 0) {
// notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
// if both are set // if both are set
_parallel_marking_threads = ParallelMarkingThreads; _parallel_marking_threads = ConcGCThreads;
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
} else if (G1MarkingOverheadPercent > 0) { } else if (G1MarkingOverheadPercent > 0) {
......
...@@ -583,7 +583,7 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, ...@@ -583,7 +583,7 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
res->zero_fill_state() == HeapRegion::Allocated)), res->zero_fill_state() == HeapRegion::Allocated)),
"Non-young alloc Regions must be zero filled (and non-H)"); "Non-young alloc Regions must be zero filled (and non-H)");
if (G1PrintRegions) { if (G1PrintHeapRegions) {
if (res != NULL) { if (res != NULL) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT, "top "PTR_FORMAT,
...@@ -2477,7 +2477,7 @@ void G1CollectedHeap::print_tracing_info() const { ...@@ -2477,7 +2477,7 @@ void G1CollectedHeap::print_tracing_info() const {
if (G1SummarizeRSetStats) { if (G1SummarizeRSetStats) {
g1_rem_set()->print_summary_info(); g1_rem_set()->print_summary_info();
} }
if (G1SummarizeConcurrentMark) { if (G1SummarizeConcMark) {
concurrent_mark()->print_summary_info(); concurrent_mark()->print_summary_info();
} }
if (G1SummarizeZFStats) { if (G1SummarizeZFStats) {
...@@ -3480,7 +3480,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { ...@@ -3480,7 +3480,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
HeapRegion* r = heap_region_containing(old); HeapRegion* r = heap_region_containing(old);
if (!r->evacuation_failed()) { if (!r->evacuation_failed()) {
r->set_evacuation_failed(true); r->set_evacuation_failed(true);
if (G1PrintRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
"["PTR_FORMAT","PTR_FORMAT")\n", "["PTR_FORMAT","PTR_FORMAT")\n",
r, r->bottom(), r->end()); r, r->bottom(), r->end());
...@@ -4002,9 +4002,7 @@ public: ...@@ -4002,9 +4002,7 @@ public:
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination_time(i, term_ms); _g1h->g1_policy()->record_termination_time(i, term_ms);
} }
if (G1UseSurvivorSpaces) { _g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
}
_g1h->update_surviving_young_words(pss.surviving_young_words()+1); _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
// Clean up any par-expanded rem sets. // Clean up any par-expanded rem sets.
......
...@@ -270,14 +270,10 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -270,14 +270,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
_concurrent_mark_cleanup_times_ms->add(0.20); _concurrent_mark_cleanup_times_ms->add(0.20);
_tenuring_threshold = MaxTenuringThreshold; _tenuring_threshold = MaxTenuringThreshold;
if (G1UseSurvivorSpaces) { // if G1FixedSurvivorSpaceSize is 0 which means the size is not
// if G1FixedSurvivorSpaceSize is 0 which means the size is not // fixed, then _max_survivor_regions will be calculated at
// fixed, then _max_survivor_regions will be calculated at // calculate_young_list_target_config during initialization
// calculate_young_list_target_config during initialization _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
_max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
} else {
_max_survivor_regions = 0;
}
initialize_all(); initialize_all();
} }
...@@ -296,28 +292,54 @@ void G1CollectorPolicy::initialize_flags() { ...@@ -296,28 +292,54 @@ void G1CollectorPolicy::initialize_flags() {
CollectorPolicy::initialize_flags(); CollectorPolicy::initialize_flags();
} }
// The easiest way to deal with the parsing of the NewSize /
// MaxNewSize / etc. parameteres is to re-use the code in the
// TwoGenerationCollectorPolicy class. This is similar to what
// ParallelScavenge does with its GenerationSizer class (see
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start.
class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
size_t size_to_region_num(size_t byte_size) {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
}
public:
G1YoungGenSizer() {
initialize_flags();
initialize_size_info();
}
size_t min_young_region_num() {
return size_to_region_num(_min_gen0_size);
}
size_t initial_young_region_num() {
return size_to_region_num(_initial_gen0_size);
}
size_t max_young_region_num() {
return size_to_region_num(_max_gen0_size);
}
};
void G1CollectorPolicy::init() { void G1CollectorPolicy::init() {
// Set aside an initial future to_space. // Set aside an initial future to_space.
_g1 = G1CollectedHeap::heap(); _g1 = G1CollectedHeap::heap();
size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
assert(Heap_lock->owned_by_self(), "Locking discipline."); assert(Heap_lock->owned_by_self(), "Locking discipline.");
if (G1SteadyStateUsed < 50) {
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
}
initialize_gc_policy_counters(); initialize_gc_policy_counters();
if (G1Gen) { if (G1Gen) {
_in_young_gc_mode = true; _in_young_gc_mode = true;
if (G1YoungGenSize == 0) { G1YoungGenSizer sizer;
size_t initial_region_num = sizer.initial_young_region_num();
if (UseAdaptiveSizePolicy) {
set_adaptive_young_list_length(true); set_adaptive_young_list_length(true);
_young_list_fixed_length = 0; _young_list_fixed_length = 0;
} else { } else {
set_adaptive_young_list_length(false); set_adaptive_young_list_length(false);
_young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes); _young_list_fixed_length = initial_region_num;
} }
_free_regions_at_end_of_collection = _g1->free_regions(); _free_regions_at_end_of_collection = _g1->free_regions();
_scan_only_regions_at_end_of_collection = 0; _scan_only_regions_at_end_of_collection = 0;
...@@ -455,7 +477,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) { ...@@ -455,7 +477,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
guarantee( adaptive_young_list_length(), "pre-condition" ); guarantee( adaptive_young_list_length(), "pre-condition" );
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();
size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent); size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
min_reserve_perc = MIN2((size_t) 50, min_reserve_perc); min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
size_t reserve_regions = size_t reserve_regions =
(size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0); (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
...@@ -1110,10 +1132,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -1110,10 +1132,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
size_t short_lived_so_length = _young_list_so_prefix_length; size_t short_lived_so_length = _young_list_so_prefix_length;
_short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length); _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
tag_scan_only(short_lived_so_length); tag_scan_only(short_lived_so_length);
_survivors_age_table.clear();
if (G1UseSurvivorSpaces) {
_survivors_age_table.clear();
}
assert( verify_young_ages(), "region age verification" ); assert( verify_young_ages(), "region age verification" );
} }
...@@ -1432,7 +1451,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) { ...@@ -1432,7 +1451,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
record_concurrent_mark_init_end_pre(0.0); record_concurrent_mark_init_end_pre(0.0);
size_t min_used_targ = size_t min_used_targ =
(_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta); (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
if (cur_used_bytes > min_used_targ) { if (cur_used_bytes > min_used_targ) {
if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
...@@ -1916,7 +1935,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) { ...@@ -1916,7 +1935,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
calculate_young_list_target_config(); calculate_young_list_target_config();
// Note that _mmu_tracker->max_gc_time() returns the time in seconds. // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0; double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
// </NEW PREDICTION> // </NEW PREDICTION>
...@@ -1932,7 +1951,7 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, ...@@ -1932,7 +1951,7 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
if (G1AdaptiveConcRefine) { if (G1UseAdaptiveConcRefinement) {
const int k_gy = 3, k_gr = 6; const int k_gy = 3, k_gr = 6;
const double inc_k = 1.1, dec_k = 0.9; const double inc_k = 1.1, dec_k = 0.9;
...@@ -2607,9 +2626,6 @@ size_t G1CollectorPolicy::max_regions(int purpose) { ...@@ -2607,9 +2626,6 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
// Calculates survivor space parameters. // Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy() void G1CollectorPolicy::calculate_survivors_policy()
{ {
if (!G1UseSurvivorSpaces) {
return;
}
if (G1FixedSurvivorSpaceSize == 0) { if (G1FixedSurvivorSpaceSize == 0) {
_max_survivor_regions = _young_list_target_length / SurvivorRatio; _max_survivor_regions = _young_list_target_length / SurvivorRatio;
} else { } else {
...@@ -2628,13 +2644,6 @@ bool ...@@ -2628,13 +2644,6 @@ bool
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
word_size) { word_size) {
assert(_g1->regions_accounted_for(), "Region leakage!"); assert(_g1->regions_accounted_for(), "Region leakage!");
// Initiate a pause when we reach the steady-state "used" target.
size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
size_t used_soft =
MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
used_hard/2);
size_t used = _g1->used();
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
size_t young_list_length = _g1->young_list_length(); size_t young_list_length = _g1->young_list_length();
...@@ -2867,7 +2876,7 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes, ...@@ -2867,7 +2876,7 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
// estimate of the number of live bytes. // estimate of the number of live bytes.
void G1CollectorPolicy:: void G1CollectorPolicy::
add_to_collection_set(HeapRegion* hr) { add_to_collection_set(HeapRegion* hr) {
if (G1PrintRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], " gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT", young %s", "top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(), hr->hrs_index(), hr->bottom(), hr->end(),
......
...@@ -88,13 +88,13 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) { ...@@ -88,13 +88,13 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
// the time slice than what's allowed) // the time slice than what's allowed)
// consolidate the two entries with the minimum gap between them // consolidate the two entries with the minimum gap between them
// (this might allow less GC time than what's allowed) // (this might allow less GC time than what's allowed)
guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker, guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
"array full, currently we can't recover unless +G1ForgetfulMMUTracker"); "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
// In the case where ScavengeALot is true, such overflow is not // In the case where ScavengeALot is true, such overflow is not
// uncommon; in such cases, we can, without much loss of precision // uncommon; in such cases, we can, without much loss of precision
// or performance (we are GC'ing most of the time anyway!), // or performance (we are GC'ing most of the time anyway!),
// simply overwrite the oldest entry in the tracker: this // simply overwrite the oldest entry in the tracker: this
// is also the behaviour when G1ForgetfulMMUTracker is enabled. // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
_head_index = trim_index(_head_index + 1); _head_index = trim_index(_head_index + 1);
assert(_head_index == _tail_index, "Because we have a full circular buffer"); assert(_head_index == _tail_index, "Because we have a full circular buffer");
_tail_index = trim_index(_tail_index + 1); _tail_index = trim_index(_tail_index + 1);
......
...@@ -101,7 +101,7 @@ private: ...@@ -101,7 +101,7 @@ private:
// If the array is full, an easy fix is to look for the pauses with // If the array is full, an easy fix is to look for the pauses with
// the shortest gap between them and consolidate them. // the shortest gap between them and consolidate them.
// For now, we have taken the expedient alternative of forgetting // For now, we have taken the expedient alternative of forgetting
// the oldest entry in the event that +G1ForgetfulMMUTracker, thus // the oldest entry in the event that +G1UseFixedWindowMMUTracker, thus
// potentially violating MMU specs for some time thereafter. // potentially violating MMU specs for some time thereafter.
G1MMUTrackerQueueElem _array[QueueLength]; G1MMUTrackerQueueElem _array[QueueLength];
......
...@@ -467,7 +467,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, ...@@ -467,7 +467,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
// and they are causing failures. When we resolve said race // and they are causing failures. When we resolve said race
// conditions, we'll revert back to parallel remembered set // conditions, we'll revert back to parallel remembered set
// updating and scanning. See CRs 6677707 and 6677708. // updating and scanning. See CRs 6677707 and 6677708.
if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) { if (G1UseParallelRSetUpdating || (worker_i == 0)) {
updateRS(worker_i); updateRS(worker_i);
scanNewRefsRS(oc, worker_i); scanNewRefsRS(oc, worker_i);
} else { } else {
...@@ -476,7 +476,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, ...@@ -476,7 +476,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1p->record_update_rs_time(worker_i, 0.0); _g1p->record_update_rs_time(worker_i, 0.0);
_g1p->record_scan_new_refs_time(worker_i, 0.0); _g1p->record_scan_new_refs_time(worker_i, 0.0);
} }
if (G1ParallelRSetScanningEnabled || (worker_i == 0)) { if (G1UseParallelRSetScanning || (worker_i == 0)) {
scanRS(oc, worker_i); scanRS(oc, worker_i);
} else { } else {
_g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0); _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
......
...@@ -37,9 +37,6 @@ ...@@ -37,9 +37,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \ develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \ "Overhead of concurrent marking") \
\ \
product(uintx, G1YoungGenSize, 0, \
"Size of the G1 young generation, 0 is the adaptive policy") \
\
develop(bool, G1Gen, true, \ develop(bool, G1Gen, true, \
"If true, it will enable the generational G1") \ "If true, it will enable the generational G1") \
\ \
...@@ -70,7 +67,7 @@ ...@@ -70,7 +67,7 @@
develop(intx, G1PausesBtwnConcMark, -1, \ develop(intx, G1PausesBtwnConcMark, -1, \
"If positive, fixed number of pauses between conc markings") \ "If positive, fixed number of pauses between conc markings") \
\ \
diagnostic(bool, G1SummarizeConcurrentMark, false, \ diagnostic(bool, G1SummarizeConcMark, false, \
"Summarize concurrent mark info") \ "Summarize concurrent mark info") \
\ \
diagnostic(bool, G1SummarizeRSetStats, false, \ diagnostic(bool, G1SummarizeRSetStats, false, \
...@@ -85,12 +82,9 @@ ...@@ -85,12 +82,9 @@
diagnostic(bool, G1SummarizeZFStats, false, \ diagnostic(bool, G1SummarizeZFStats, false, \
"Summarize zero-filling info") \ "Summarize zero-filling info") \
\ \
diagnostic(bool, G1TraceConcurrentRefinement, false, \ diagnostic(bool, G1TraceConcRefinement, false, \
"Trace G1 concurrent refinement") \ "Trace G1 concurrent refinement") \
\ \
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
"Size of the mark stack for concurrent marking.") \
\
product(intx, G1MarkRegionStackSize, 1024 * 1024, \ product(intx, G1MarkRegionStackSize, 1024 * 1024, \
"Size of the region stack for concurrent marking.") \ "Size of the region stack for concurrent marking.") \
\ \
...@@ -100,20 +94,13 @@ ...@@ -100,20 +94,13 @@
develop(intx, G1ConcZFMaxRegions, 1, \ develop(intx, G1ConcZFMaxRegions, 1, \
"Stop zero-filling when # of zf'd regions reaches") \ "Stop zero-filling when # of zf'd regions reaches") \
\ \
product(intx, G1SteadyStateUsed, 90, \
"If non-0, try to maintain 'used' at this pct (of max)") \
\
product(intx, G1SteadyStateUsedDelta, 30, \
"If G1SteadyStateUsed is non-0, then do pause this number of " \
"of percentage points earlier if no marking is in progress.") \
\
develop(bool, G1SATBBarrierPrintNullPreVals, false, \ develop(bool, G1SATBBarrierPrintNullPreVals, false, \
"If true, count frac of ptr writes with null pre-vals.") \ "If true, count frac of ptr writes with null pre-vals.") \
\ \
product(intx, G1SATBLogBufferSize, 1*K, \ product(intx, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \ "Number of entries in an SATB log buffer.") \
\ \
product(intx, G1SATBProcessCompletedThreshold, 20, \ develop(intx, G1SATBProcessCompletedThreshold, 20, \
"Number of completed buffers that triggers log processing.") \ "Number of completed buffers that triggers log processing.") \
\ \
develop(intx, G1ExtraRegionSurvRate, 33, \ develop(intx, G1ExtraRegionSurvRate, 33, \
...@@ -127,7 +114,7 @@ ...@@ -127,7 +114,7 @@
develop(bool, G1SATBPrintStubs, false, \ develop(bool, G1SATBPrintStubs, false, \
"If true, print generated stubs for the SATB barrier") \ "If true, print generated stubs for the SATB barrier") \
\ \
product(intx, G1ExpandByPercentOfAvailable, 20, \ experimental(intx, G1ExpandByPercentOfAvailable, 20, \
"When expanding, % of uncommitted space to claim.") \ "When expanding, % of uncommitted space to claim.") \
\ \
develop(bool, G1RSBarrierRegionFilter, true, \ develop(bool, G1RSBarrierRegionFilter, true, \
...@@ -165,36 +152,36 @@ ...@@ -165,36 +152,36 @@
product(intx, G1UpdateBufferSize, 256, \ product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \ "Size of an update buffer") \
\ \
product(intx, G1ConcRefineYellowZone, 0, \ product(intx, G1ConcRefinementYellowZone, 0, \
"Number of enqueued update buffers that will " \ "Number of enqueued update buffers that will " \
"trigger concurrent processing. Will be selected ergonomically " \ "trigger concurrent processing. Will be selected ergonomically " \
"by default.") \ "by default.") \
\ \
product(intx, G1ConcRefineRedZone, 0, \ product(intx, G1ConcRefinementRedZone, 0, \
"Maximum number of enqueued update buffers before mutator " \ "Maximum number of enqueued update buffers before mutator " \
"threads start processing new ones instead of enqueueing them. " \ "threads start processing new ones instead of enqueueing them. " \
"Will be selected ergonomically by default. Zero will disable " \ "Will be selected ergonomically by default. Zero will disable " \
"concurrent processing.") \ "concurrent processing.") \
\ \
product(intx, G1ConcRefineGreenZone, 0, \ product(intx, G1ConcRefinementGreenZone, 0, \
"The number of update buffers that are left in the queue by the " \ "The number of update buffers that are left in the queue by the " \
"concurrent processing threads. Will be selected ergonomically " \ "concurrent processing threads. Will be selected ergonomically " \
"by default.") \ "by default.") \
\ \
product(intx, G1ConcRefineServiceInterval, 300, \ product(intx, G1ConcRefinementServiceIntervalMillis, 300, \
"The last concurrent refinement thread wakes up every " \ "The last concurrent refinement thread wakes up every " \
"specified number of milliseconds to do miscellaneous work.") \ "specified number of milliseconds to do miscellaneous work.") \
\ \
product(intx, G1ConcRefineThresholdStep, 0, \ product(intx, G1ConcRefinementThresholdStep, 0, \
"Each time the rset update queue increases by this amount " \ "Each time the rset update queue increases by this amount " \
"activate the next refinement thread if available. " \ "activate the next refinement thread if available. " \
"Will be selected ergonomically by default.") \ "Will be selected ergonomically by default.") \
\ \
product(intx, G1RSUpdatePauseFractionPercent, 10, \ product(intx, G1RSetUpdatingPauseTimePercent, 10, \
"A target percentage of time that is allowed to be spend on " \ "A target percentage of time that is allowed to be spend on " \
"process RS update buffers during the collection pause.") \ "process RS update buffers during the collection pause.") \
\ \
product(bool, G1AdaptiveConcRefine, true, \ product(bool, G1UseAdaptiveConcRefinement, true, \
"Select green, yellow and red zones adaptively to meet the " \ "Select green, yellow and red zones adaptively to meet the " \
"the pause requirements.") \ "the pause requirements.") \
\ \
...@@ -245,15 +232,15 @@ ...@@ -245,15 +232,15 @@
"the number of regions for which we'll print a surv rate " \ "the number of regions for which we'll print a surv rate " \
"summary.") \ "summary.") \
\ \
product(bool, G1UseScanOnlyPrefix, false, \ develop(bool, G1UseScanOnlyPrefix, false, \
"It determines whether the system will calculate an optimum " \ "It determines whether the system will calculate an optimum " \
"scan-only set.") \ "scan-only set.") \
\ \
product(intx, G1MinReservePercent, 10, \ product(intx, G1ReservePercent, 10, \
"It determines the minimum reserve we should have in the heap " \ "It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \ "to minimize the probability of promotion failure.") \
\ \
diagnostic(bool, G1PrintRegions, false, \ diagnostic(bool, G1PrintHeapRegions, false, \
"If set G1 will print information on which regions are being " \ "If set G1 will print information on which regions are being " \
"allocated and which are reclaimed.") \ "allocated and which are reclaimed.") \
\ \
...@@ -263,9 +250,6 @@ ...@@ -263,9 +250,6 @@
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \ develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
"Forces flushing of log buffers before verification.") \ "Forces flushing of log buffers before verification.") \
\ \
product(bool, G1UseSurvivorSpaces, true, \
"When true, use survivor space.") \
\
develop(bool, G1FailOnFPError, false, \ develop(bool, G1FailOnFPError, false, \
"When set, G1 will fail when it encounters an FP 'error', " \ "When set, G1 will fail when it encounters an FP 'error', " \
"so as to allow debugging") \ "so as to allow debugging") \
...@@ -280,21 +264,21 @@ ...@@ -280,21 +264,21 @@
"If non-0 is the size of the G1 survivor space, " \ "If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \ "otherwise SurvivorRatio is used to determine the size") \
\ \
product(bool, G1ForgetfulMMUTracker, false, \ product(bool, G1UseFixedWindowMMUTracker, false, \
"If the MMU tracker's memory is full, forget the oldest entry") \ "If the MMU tracker's memory is full, forget the oldest entry") \
\ \
product(uintx, G1HeapRegionSize, 0, \ product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \ "Size of the G1 regions.") \
\ \
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \ experimental(bool, G1UseParallelRSetUpdating, false, \
"Enables the parallelization of remembered set updating " \ "Enables the parallelization of remembered set updating " \
"during evacuation pauses") \ "during evacuation pauses") \
\ \
experimental(bool, G1ParallelRSetScanningEnabled, false, \ experimental(bool, G1UseParallelRSetScanning, false, \
"Enables the parallelization of remembered set scanning " \ "Enables the parallelization of remembered set scanning " \
"during evacuation pauses") \ "during evacuation pauses") \
\ \
product(uintx, G1ParallelRSetThreads, 0, \ product(uintx, G1ConcRefinementThreads, 0, \
"If non-0 is the number of parallel rem set update threads, " \ "If non-0 is the number of parallel rem set update threads, " \
"otherwise the value is determined ergonomically.") \ "otherwise the value is determined ergonomically.") \
\ \
......
...@@ -176,6 +176,7 @@ arguments.cpp management.hpp ...@@ -176,6 +176,7 @@ arguments.cpp management.hpp
arguments.cpp oop.inline.hpp arguments.cpp oop.inline.hpp
arguments.cpp os_<os_family>.inline.hpp arguments.cpp os_<os_family>.inline.hpp
arguments.cpp referenceProcessor.hpp arguments.cpp referenceProcessor.hpp
arguments.cpp taskqueue.hpp
arguments.cpp universe.inline.hpp arguments.cpp universe.inline.hpp
arguments.cpp vm_version_<arch>.hpp arguments.cpp vm_version_<arch>.hpp
......
...@@ -1203,6 +1203,11 @@ void Arguments::set_cms_and_parnew_gc_flags() { ...@@ -1203,6 +1203,11 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) { if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight); CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
} }
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
MarkStackSize / K, MarkStackSizeMax / K);
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
} }
#endif // KERNEL #endif // KERNEL
...@@ -1339,6 +1344,17 @@ void Arguments::set_g1_gc_flags() { ...@@ -1339,6 +1344,17 @@ void Arguments::set_g1_gc_flags() {
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
} }
if (FLAG_IS_DEFAULT(MarkStackSize)) {
// Size as a multiple of TaskQueueSuper::N which is larger
// for 64-bit.
FLAG_SET_DEFAULT(MarkStackSize, 128 * TaskQueueSuper::total_size());
}
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
MarkStackSize / K, MarkStackSizeMax / K);
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
} }
void Arguments::set_heap_size() { void Arguments::set_heap_size() {
...@@ -1737,6 +1753,11 @@ bool Arguments::check_vm_args_consistency() { ...@@ -1737,6 +1753,11 @@ bool Arguments::check_vm_args_consistency() {
status = false; status = false;
} }
if (UseG1GC) {
status = status && verify_percentage(InitiatingHeapOccupancyPercent,
"InitiatingHeapOccupancyPercent");
}
status = status && verify_interval(RefDiscoveryPolicy, status = status && verify_interval(RefDiscoveryPolicy,
ReferenceProcessor::DiscoveryPolicyMin, ReferenceProcessor::DiscoveryPolicyMin,
ReferenceProcessor::DiscoveryPolicyMax, ReferenceProcessor::DiscoveryPolicyMax,
...@@ -1795,6 +1816,29 @@ static bool match_option(const JavaVMOption* option, const char** names, const c ...@@ -1795,6 +1816,29 @@ static bool match_option(const JavaVMOption* option, const char** names, const c
return false; return false;
} }
bool Arguments::parse_uintx(const char* value,
uintx* uintx_arg,
uintx min_size) {
// Check the sign first since atomull() parses only unsigned values.
bool value_is_positive = !(*value == '-');
if (value_is_positive) {
julong n;
bool good_return = atomull(value, &n);
if (good_return) {
bool above_minimum = n >= min_size;
bool value_is_too_large = n > max_uintx;
if (above_minimum && !value_is_too_large) {
*uintx_arg = n;
return true;
}
}
}
return false;
}
Arguments::ArgsRange Arguments::parse_memory_size(const char* s, Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
julong* long_arg, julong* long_arg,
julong min_size) { julong min_size) {
...@@ -2453,6 +2497,37 @@ SOLARIS_ONLY( ...@@ -2453,6 +2497,37 @@ SOLARIS_ONLY(
jio_fprintf(defaultStream::error_stream(), jio_fprintf(defaultStream::error_stream(),
"Please use -XX:YoungPLABSize in place of " "Please use -XX:YoungPLABSize in place of "
"-XX:ParallelGCToSpaceAllocBufferSize in the future\n"); "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
} else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
match_option(option, "-XX:G1MarkStackSize=", &tail)) {
julong stack_size = 0;
ArgsRange errcode = parse_memory_size(tail, &stack_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid mark stack size: %s\n", option->optionString);
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
} else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
julong max_stack_size = 0;
ArgsRange errcode = parse_memory_size(tail, &max_stack_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid maximum mark stack size: %s\n",
option->optionString);
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
} else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
uintx conc_threads = 0;
if (!parse_uintx(tail, &conc_threads, 1)) {
jio_fprintf(defaultStream::error_stream(),
"Invalid concurrent threads: %s\n", option->optionString);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
} else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
// Skip -XX:Flags= since that case has already been handled // Skip -XX:Flags= since that case has already been handled
if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) { if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
......
...@@ -343,6 +343,12 @@ class Arguments : AllStatic { ...@@ -343,6 +343,12 @@ class Arguments : AllStatic {
static ArgsRange check_memory_size(julong size, julong min_size); static ArgsRange check_memory_size(julong size, julong min_size);
static ArgsRange parse_memory_size(const char* s, julong* long_arg, static ArgsRange parse_memory_size(const char* s, julong* long_arg,
julong min_size); julong min_size);
// Parse a string for a unsigned integer. Returns true if value
// is an unsigned integer greater than or equal to the minimum
// parameter passed and returns the value in uintx_arg. Returns
// false otherwise, with uintx_arg undefined.
static bool parse_uintx(const char* value, uintx* uintx_arg,
uintx min_size);
// methods to build strings from individual args // methods to build strings from individual args
static void build_jvm_args(const char* arg); static void build_jvm_args(const char* arg);
......
...@@ -1245,9 +1245,6 @@ class CommandLineFlags { ...@@ -1245,9 +1245,6 @@ class CommandLineFlags {
product(uintx, ParallelGCThreads, 0, \ product(uintx, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \ "Number of parallel threads parallel gc will use") \
\ \
product(uintx, ParallelCMSThreads, 0, \
"Max number of threads CMS will use for concurrent work") \
\
develop(bool, ParallelOldGCSplitALot, false, \ develop(bool, ParallelOldGCSplitALot, false, \
"Provoke splitting (copying data from a young gen space to" \ "Provoke splitting (copying data from a young gen space to" \
"multiple destination spaces)") \ "multiple destination spaces)") \
...@@ -1258,8 +1255,8 @@ class CommandLineFlags { ...@@ -1258,8 +1255,8 @@ class CommandLineFlags {
develop(bool, TraceRegionTasksQueuing, false, \ develop(bool, TraceRegionTasksQueuing, false, \
"Trace the queuing of the region tasks") \ "Trace the queuing of the region tasks") \
\ \
product(uintx, ParallelMarkingThreads, 0, \ product(uintx, ConcGCThreads, 0, \
"Number of marking threads concurrent gc will use") \ "Number of threads concurrent gc will use") \
\ \
product(uintx, YoungPLABSize, 4096, \ product(uintx, YoungPLABSize, 4096, \
"Size of young gen promotion labs (in HeapWords)") \ "Size of young gen promotion labs (in HeapWords)") \
...@@ -1535,11 +1532,11 @@ class CommandLineFlags { ...@@ -1535,11 +1532,11 @@ class CommandLineFlags {
develop(bool, CMSOverflowEarlyRestoration, false, \ develop(bool, CMSOverflowEarlyRestoration, false, \
"Whether preserved marks should be restored early") \ "Whether preserved marks should be restored early") \
\ \
product(uintx, CMSMarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
"Size of CMS marking stack") \ "Size of marking stack") \
\ \
product(uintx, CMSMarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
"Max size of CMS marking stack") \ "Max size of marking stack") \
\ \
notproduct(bool, CMSMarkStackOverflowALot, false, \ notproduct(bool, CMSMarkStackOverflowALot, false, \
"Whether we should simulate frequent marking stack / work queue" \ "Whether we should simulate frequent marking stack / work queue" \
...@@ -1724,6 +1721,13 @@ class CommandLineFlags { ...@@ -1724,6 +1721,13 @@ class CommandLineFlags {
"Percentage CMS generation occupancy to start a CMS collection " \ "Percentage CMS generation occupancy to start a CMS collection " \
"cycle. A negative value means that CMSTriggerRatio is used") \ "cycle. A negative value means that CMSTriggerRatio is used") \
\ \
product(uintx, InitiatingHeapOccupancyPercent, 45, \
"Percentage of the (entire) heap occupancy to start a " \
"concurrent GC cycle. It us used by GCs that trigger a " \
"concurrent GC cycle based on the occupancy of the entire heap, " \
"not just one of the generations (e.g., G1). A value of 0 " \
"denotes 'do constant GC cycles'.") \
\
product(intx, CMSInitiatingPermOccupancyFraction, -1, \ product(intx, CMSInitiatingPermOccupancyFraction, -1, \
"Percentage CMS perm generation occupancy to start a " \ "Percentage CMS perm generation occupancy to start a " \
"CMScollection cycle. A negative value means that " \ "CMScollection cycle. A negative value means that " \
......
...@@ -133,6 +133,9 @@ public: ...@@ -133,6 +133,9 @@ public:
// Maximum number of elements allowed in the queue. This is two less // Maximum number of elements allowed in the queue. This is two less
// than the actual queue size, for somewhat complicated reasons. // than the actual queue size, for somewhat complicated reasons.
uint max_elems() { return N - 2; } uint max_elems() { return N - 2; }
// Total size of queue.
static const uint total_size() { return N; }
}; };
template<class E> class GenericTaskQueue: public TaskQueueSuper { template<class E> class GenericTaskQueue: public TaskQueueSuper {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册