提交 c4973f85 编写于 作者: J johnc

7120038: G1: ParallelGCThreads==0 is broken

Summary: Running G1 with ParallelGCThreads==0 results in various crashes and asserts. Most of these are caused by unguarded references to the worker threads array or an incorrect number of active workers.
Reviewed-by: jmasa, tonyp
上级 cac942d6
...@@ -1117,12 +1117,9 @@ public: ...@@ -1117,12 +1117,9 @@ public:
// Calculates the number of active workers for a concurrent // Calculates the number of active workers for a concurrent
// phase. // phase.
int ConcurrentMark::calc_parallel_marking_threads() { size_t ConcurrentMark::calc_parallel_marking_threads() {
if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t n_conc_workers; size_t n_conc_workers = 0;
if (!G1CollectedHeap::use_parallel_gc_threads()) {
n_conc_workers = 1;
} else {
if (!UseDynamicNumberOfGCThreads || if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) && (!FLAG_IS_DEFAULT(ConcGCThreads) &&
!ForceDynamicNumberOfGCThreads)) { !ForceDynamicNumberOfGCThreads)) {
...@@ -1137,9 +1134,13 @@ int ConcurrentMark::calc_parallel_marking_threads() { ...@@ -1137,9 +1134,13 @@ int ConcurrentMark::calc_parallel_marking_threads() {
// Don't scale down "n_conc_workers" by scale_parallel_threads() because // Don't scale down "n_conc_workers" by scale_parallel_threads() because
// that scaling has already gone into "_max_parallel_marking_threads". // that scaling has already gone into "_max_parallel_marking_threads".
} }
assert(n_conc_workers > 0, "Always need at least 1");
return n_conc_workers;
} }
assert(n_conc_workers > 0, "Always need at least 1"); // If we are not running with any parallel GC threads we will not
return (int) MAX2(n_conc_workers, (size_t) 1); // have spawned any marking threads either. Hence the number of
// concurrent workers should be 0.
return 0;
} }
void ConcurrentMark::markFromRoots() { void ConcurrentMark::markFromRoots() {
...@@ -1151,24 +1152,24 @@ void ConcurrentMark::markFromRoots() { ...@@ -1151,24 +1152,24 @@ void ConcurrentMark::markFromRoots() {
// stop-the-world GC happens even as we mark in this generation. // stop-the-world GC happens even as we mark in this generation.
_restart_for_overflow = false; _restart_for_overflow = false;
// Parallel task terminator is set in "set_phase()".
force_overflow_conc()->init(); force_overflow_conc()->init();
// _g1h has _n_par_threads // _g1h has _n_par_threads
_parallel_marking_threads = calc_parallel_marking_threads(); _parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(), assert(parallel_marking_threads() <= max_parallel_marking_threads(),
"Maximum number of marking threads exceeded"); "Maximum number of marking threads exceeded");
_parallel_workers->set_active_workers((int)_parallel_marking_threads);
// Don't set _n_par_threads because it affects MT in proceess_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert( _parallel_workers->active_workers() > 0, "Should have been set"); size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
set_phase(_parallel_workers->active_workers(), true /* concurrent */);
// Parallel task terminator is set in "set_phase()"
set_phase(active_workers, true /* concurrent */);
CMConcurrentMarkingTask markingTask(this, cmThread()); CMConcurrentMarkingTask markingTask(this, cmThread());
if (parallel_marking_threads() > 0) { if (parallel_marking_threads() > 0) {
_parallel_workers->set_active_workers((int)active_workers);
// Don't set _n_par_threads because it affects MT in proceess_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask); _parallel_workers->run_task(&markingTask);
} else { } else {
markingTask.work(0); markingTask.work(0);
...@@ -1765,8 +1766,7 @@ void ConcurrentMark::cleanup() { ...@@ -1765,8 +1766,7 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks(); HeapRegionRemSet::reset_for_cleanup_tasks();
g1h->set_par_threads(); size_t n_workers;
size_t n_workers = g1h->n_par_threads();
// Do counting once more with the world stopped for good measure. // Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
...@@ -1776,8 +1776,10 @@ void ConcurrentMark::cleanup() { ...@@ -1776,8 +1776,10 @@ void ConcurrentMark::cleanup() {
HeapRegion::InitialClaimValue), HeapRegion::InitialClaimValue),
"sanity check"); "sanity check");
g1h->set_par_threads();
n_workers = g1h->n_par_threads();
assert(g1h->n_par_threads() == (int) n_workers, assert(g1h->n_par_threads() == (int) n_workers,
"Should not have been reset"); "Should not have been reset");
g1h->workers()->run_task(&g1_par_count_task); g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0. // Done with the parallel phase so reset to 0.
g1h->set_par_threads(0); g1h->set_par_threads(0);
...@@ -1786,6 +1788,7 @@ void ConcurrentMark::cleanup() { ...@@ -1786,6 +1788,7 @@ void ConcurrentMark::cleanup() {
HeapRegion::FinalCountClaimValue), HeapRegion::FinalCountClaimValue),
"sanity check"); "sanity check");
} else { } else {
n_workers = 1;
g1_par_count_task.work(0); g1_par_count_task.work(0);
} }
...@@ -1851,7 +1854,6 @@ void ConcurrentMark::cleanup() { ...@@ -1851,7 +1854,6 @@ void ConcurrentMark::cleanup() {
(note_end_end - note_end_start)*1000.0); (note_end_end - note_end_start)*1000.0);
} }
// call below, since it affects the metric by which we sort the heap // call below, since it affects the metric by which we sort the heap
// regions. // regions.
if (G1ScrubRemSets) { if (G1ScrubRemSets) {
...@@ -2329,9 +2331,9 @@ public: ...@@ -2329,9 +2331,9 @@ public:
} }
} }
CMRemarkTask(ConcurrentMark* cm) : CMRemarkTask(ConcurrentMark* cm, int active_workers) :
AbstractGangTask("Par Remark"), _cm(cm) { AbstractGangTask("Par Remark"), _cm(cm) {
_cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers()); _cm->terminator()->reset_for_reuse(active_workers);
} }
}; };
...@@ -2357,7 +2359,7 @@ void ConcurrentMark::checkpointRootsFinalWork() { ...@@ -2357,7 +2359,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
// constructor and pass values of the active workers // constructor and pass values of the active workers
// through the gang in the task. // through the gang in the task.
CMRemarkTask remarkTask(this); CMRemarkTask remarkTask(this, active_workers);
g1h->set_par_threads(active_workers); g1h->set_par_threads(active_workers);
g1h->workers()->run_task(&remarkTask); g1h->workers()->run_task(&remarkTask);
g1h->set_par_threads(0); g1h->set_par_threads(0);
...@@ -2367,7 +2369,7 @@ void ConcurrentMark::checkpointRootsFinalWork() { ...@@ -2367,7 +2369,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
int active_workers = 1; int active_workers = 1;
set_phase(active_workers, false /* concurrent */); set_phase(active_workers, false /* concurrent */);
CMRemarkTask remarkTask(this); CMRemarkTask remarkTask(this, active_workers);
// We will start all available threads, even if we decide that the // We will start all available threads, even if we decide that the
// active_workers will be fewer. The extra ones will just bail out // active_workers will be fewer. The extra ones will just bail out
// immediately. // immediately.
...@@ -3123,13 +3125,12 @@ void ConcurrentMark::complete_marking_in_collection_set() { ...@@ -3123,13 +3125,12 @@ void ConcurrentMark::complete_marking_in_collection_set() {
} }
double start = os::elapsedTime(); double start = os::elapsedTime();
int n_workers = g1h->workers()->total_workers();
G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers = g1h->workers()->active_workers();
g1h->set_par_threads(n_workers); g1h->set_par_threads(n_workers);
g1h->workers()->run_task(&complete_mark_task); g1h->workers()->run_task(&complete_mark_task);
g1h->set_par_threads(0); g1h->set_par_threads(0);
......
...@@ -718,7 +718,7 @@ public: ...@@ -718,7 +718,7 @@ public:
size_t scale_parallel_threads(size_t n_par_threads); size_t scale_parallel_threads(size_t n_par_threads);
// Calculates the number of GC threads to be used in a concurrent phase. // Calculates the number of GC threads to be used in a concurrent phase.
int calc_parallel_marking_threads(); size_t calc_parallel_marking_threads();
// The following three are interaction between CM and // The following three are interaction between CM and
// G1CollectedHeap // G1CollectedHeap
......
...@@ -3787,8 +3787,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3787,8 +3787,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
g1_policy()->record_pause_time_ms(pause_time_ms); g1_policy()->record_pause_time_ms(pause_time_ms);
int active_gc_threads = workers()->active_workers(); int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
g1_policy()->record_collection_pause_end(active_gc_threads); workers()->active_workers() : 1);
g1_policy()->record_collection_pause_end(active_workers);
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
...@@ -5312,8 +5313,10 @@ void G1CollectedHeap::process_discovered_references() { ...@@ -5312,8 +5313,10 @@ void G1CollectedHeap::process_discovered_references() {
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1); workers()->active_workers() : 1);
assert(active_workers == workers()->active_workers(), assert(!G1CollectedHeap::use_parallel_gc_threads() ||
"Need to reset active_workers"); active_workers == workers()->active_workers(),
"Need to reset active_workers");
set_par_threads(active_workers); set_par_threads(active_workers);
G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues); G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
...@@ -5451,13 +5454,13 @@ void G1CollectedHeap::evacuate_collection_set() { ...@@ -5451,13 +5454,13 @@ void G1CollectedHeap::evacuate_collection_set() {
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(), n_workers == workers()->total_workers(),
"If not dynamic should be using all the workers"); "If not dynamic should be using all the workers");
workers()->set_active_workers(n_workers);
set_par_threads(n_workers); set_par_threads(n_workers);
} else { } else {
assert(n_par_threads() == 0, assert(n_par_threads() == 0,
"Should be the original non-parallel value"); "Should be the original non-parallel value");
n_workers = 1; n_workers = 1;
} }
workers()->set_active_workers(n_workers);
G1ParTask g1_par_task(this, _task_queues); G1ParTask g1_par_task(this, _task_queues);
...@@ -5479,6 +5482,7 @@ void G1CollectedHeap::evacuate_collection_set() { ...@@ -5479,6 +5482,7 @@ void G1CollectedHeap::evacuate_collection_set() {
workers()->run_task(&g1_par_task); workers()->run_task(&g1_par_task);
} else { } else {
StrongRootsScope srs(this); StrongRootsScope srs(this);
g1_par_task.set_for_termination(n_workers);
g1_par_task.work(0); g1_par_task.work(0);
} }
...@@ -5727,8 +5731,8 @@ void G1CollectedHeap::cleanUpCardTable() { ...@@ -5727,8 +5731,8 @@ void G1CollectedHeap::cleanUpCardTable() {
// Iterate over the dirty cards region list. // Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this); G1ParCleanupCTTask cleanup_task(ct_bs, this);
if (ParallelGCThreads > 0) { if (G1CollectedHeap::use_parallel_gc_threads()) {
set_par_threads(workers()->total_workers()); set_par_threads();
workers()->run_task(&cleanup_task); workers()->run_task(&cleanup_task);
set_par_threads(0); set_par_threads(0);
} else { } else {
...@@ -6136,8 +6140,9 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, ...@@ -6136,8 +6140,9 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
void G1CollectedHeap::set_par_threads() { void G1CollectedHeap::set_par_threads() {
// Don't change the number of workers. Use the value previously set // Don't change the number of workers. Use the value previously set
// in the workgroup. // in the workgroup.
assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
int n_workers = workers()->active_workers(); int n_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(), n_workers == workers()->total_workers(),
"Otherwise should be using the total number of workers"); "Otherwise should be using the total number of workers");
if (n_workers == 0) { if (n_workers == 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册