diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 0c4f448fd560eb81e55e48ba0aeafc17973f23e9..f023f932ab85a012c13a335c92ce1ee2e3f4c022 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4151,7 +4151,7 @@ void G1CollectedHeap::init_gc_alloc_regions() { } } -void G1CollectedHeap::release_gc_alloc_regions() { +void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { _survivor_gc_alloc_region.release(); // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't @@ -4161,8 +4161,8 @@ void G1CollectedHeap::release_gc_alloc_regions() { _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); if (ResizePLAB) { - _survivor_plab_stats.adjust_desired_plab_sz(); - _old_plab_stats.adjust_desired_plab_sz(); + _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); + _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); } } @@ -5427,7 +5427,7 @@ public: }; // Weak Reference processing during an evacuation pause (part 1). -void G1CollectedHeap::process_discovered_references() { +void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { double ref_proc_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; @@ -5454,15 +5454,14 @@ void G1CollectedHeap::process_discovered_references() { // referents points to another object which is also referenced by an // object discovered by the STW ref processor. - uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? - workers()->active_workers() : 1); - assert(!G1CollectedHeap::use_parallel_gc_threads() || - active_workers == workers()->active_workers(), - "Need to reset active_workers"); + no_of_gc_workers == workers()->active_workers(), + "Need to reset active GC workers"); - set_par_threads(active_workers); - G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues); + set_par_threads(no_of_gc_workers); + G1ParPreserveCMReferentsTask keep_cm_referents(this, + no_of_gc_workers, + _task_queues); if (G1CollectedHeap::use_parallel_gc_threads()) { workers()->run_task(&keep_cm_referents); @@ -5528,10 +5527,10 @@ void G1CollectedHeap::process_discovered_references() { NULL); } else { // Parallel reference processing - assert(rp->num_q() == active_workers, "sanity"); - assert(active_workers <= rp->max_num_q(), "sanity"); + assert(rp->num_q() == no_of_gc_workers, "sanity"); + assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); - G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); + G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); } @@ -5546,7 +5545,7 @@ void G1CollectedHeap::process_discovered_references() { } // Weak Reference processing during an evacuation pause (part 2). -void G1CollectedHeap::enqueue_discovered_references() { +void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) { double ref_enq_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; @@ -5560,13 +5559,12 @@ void G1CollectedHeap::enqueue_discovered_references() { } else { // Parallel reference enqueuing - uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1); - assert(active_workers == workers()->active_workers(), - "Need to reset active_workers"); - assert(rp->num_q() == active_workers, "sanity"); - assert(active_workers <= rp->max_num_q(), "sanity"); + assert(no_of_gc_workers == workers()->active_workers(), + "Need to reset active workers"); + assert(rp->num_q() == no_of_gc_workers, "sanity"); + assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); - G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); + G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); rp->enqueue_discovered_references(&par_task_executor); } @@ -5658,7 +5656,7 @@ void G1CollectedHeap::evacuate_collection_set() { // as we may have to copy some 'reachable' referent // objects (and their reachable sub-graphs) that were // not copied during the pause. - process_discovered_references(); + process_discovered_references(n_workers); // Weak root processing. // Note: when JSR 292 is enabled and code blobs can contain @@ -5670,7 +5668,7 @@ void G1CollectedHeap::evacuate_collection_set() { JNIHandles::weak_oops_do(&is_alive, &keep_alive); } - release_gc_alloc_regions(); + release_gc_alloc_regions(n_workers); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); concurrent_g1_refine()->clear_hot_cache(); @@ -5694,7 +5692,7 @@ void G1CollectedHeap::evacuate_collection_set() { // will log these updates (and dirty their associated // cards). We need these updates logged to update any // RSets. - enqueue_discovered_references(); + enqueue_discovered_references(n_workers); if (G1DeferredRSUpdate) { RedirtyLoggedCardTableEntryFastClosure redirty; diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 1a9a02896cb4fe766e9fe2f7d2e26ef19d39a352..816d4f253ac008a94af312dca955c9fcdfcce858 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -326,7 +326,7 @@ private: void init_gc_alloc_regions(); // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(); + void release_gc_alloc_regions(uint no_of_gc_workers); // It does any cleanup that needs to be done on the GC alloc regions // before a Full GC. @@ -652,11 +652,11 @@ protected: // Process any reference objects discovered during // an incremental evacuation pause. - void process_discovered_references(); + void process_discovered_references(uint no_of_gc_workers); // Enqueue any remaining discovered references // after processing. - void enqueue_discovered_references(); + void enqueue_discovered_references(uint no_of_gc_workers); public: diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index df4626136aa95880e3a8e33d4b1f8641e4dba2d0..ea432002a1e249c358fa1f34b271e0c9e4ca2462 100644 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -1037,7 +1037,7 @@ void ParNewGeneration::collect(bool full, adjust_desired_tenuring_threshold(); if (ResizePLAB) { - plab_stats()->adjust_desired_plab_sz(); + plab_stats()->adjust_desired_plab_sz(n_workers); } if (PrintGC && !PrintGCDetails) { diff --git a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp index d159dbe28fc3eeca66133df4cac850c09e061521..87f74484ab82f88122eb512788ee1de5be2fb153 100644 --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp @@ -87,7 +87,7 @@ void ParGCAllocBuffer::flush_stats(PLABStats* stats) { // Compute desired plab size and latch result for later // use. This should be called once at the end of parallel // scavenge; it clears the sensor accumulators. -void PLABStats::adjust_desired_plab_sz() { +void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { assert(ResizePLAB, "Not set"); if (_allocated == 0) { assert(_unused == 0, @@ -107,7 +107,7 @@ void PLABStats::adjust_desired_plab_sz() { target_refills = 1; } _used = _allocated - _wasted - _unused; - size_t plab_sz = _used/(target_refills*ParallelGCThreads); + size_t plab_sz = _used/(target_refills*no_of_gc_workers); if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz); // Take historical weighted average _filter.sample(plab_sz); diff --git a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp index cb35ee28f2ec07ab7e11762bc7e608b0e3f4c979..0666353aa59891cc1f5ee1c64c6a813254e57306 100644 --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp @@ -204,7 +204,8 @@ class PLABStats VALUE_OBJ_CLASS_SPEC { return _desired_plab_sz; } - void adjust_desired_plab_sz(); // filter computation, latches output to + void adjust_desired_plab_sz(uint no_of_gc_workers); + // filter computation, latches output to // _desired_plab_sz, clears sensor accumulators void add_allocated(size_t v) {