diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 9358c0a24045b7e4b71939f96b2aa8387f8917fe..e5736743655924dcabf580e09e6dbcb5d1288001 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -4003,32 +4003,16 @@ void CMTask::drain_satb_buffers() { CMObjectClosure oc(this); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); - if (G1CollectedHeap::use_parallel_gc_threads()) { - satb_mq_set.set_par_closure(_worker_id, &oc); - } else { - satb_mq_set.set_closure(&oc); - } // This keeps claiming and applying the closure to completed buffers // until we run out of buffers or we need to abort. - if (G1CollectedHeap::use_parallel_gc_threads()) { - while (!has_aborted() && - satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { - if (_cm->verbose_medium()) { - gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); - } - statsOnly( ++_satb_buffers_processed ); - regular_clock_call(); - } - } else { - while (!has_aborted() && - satb_mq_set.apply_closure_to_completed_buffer()) { - if (_cm->verbose_medium()) { - gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); - } - statsOnly( ++_satb_buffers_processed ); - regular_clock_call(); + while (!has_aborted() && + satb_mq_set.apply_closure_to_completed_buffer(&oc)) { + if (_cm->verbose_medium()) { + gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); } + statsOnly( ++_satb_buffers_processed ); + regular_clock_call(); } _draining_satb_buffers = false; @@ -4037,12 +4021,6 @@ void CMTask::drain_satb_buffers() { concurrent() || satb_mq_set.completed_buffers_num() == 0, "invariant"); - if (G1CollectedHeap::use_parallel_gc_threads()) { - satb_mq_set.set_par_closure(_worker_id, NULL); - } else { - satb_mq_set.set_closure(NULL); - } - // again, this was a potentially expensive operation, decrease the // limits to get the regular clock call early decrease_limits(); diff --git a/src/share/vm/gc_implementation/g1/satbQueue.cpp b/src/share/vm/gc_implementation/g1/satbQueue.cpp index f3c2283b441f34c8c9c666cdb6625e7fe394cafb..62e77d22e4fe6df03ce9e31dfe0a8c306a623d10 100644 --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp @@ -227,7 +227,7 @@ void ObjPtrQueue::print(const char* name, #endif // _MSC_VER SATBMarkQueueSet::SATBMarkQueueSet() : - PtrQueueSet(), _closure(NULL), _par_closures(NULL), + PtrQueueSet(), _shared_satb_queue(this, true /*perm*/) { } void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, @@ -235,9 +235,6 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, Mutex* lock) { PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1); _shared_satb_queue.set_lock(lock); - if (ParallelGCThreads > 0) { - _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC); - } } void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) { @@ -300,17 +297,7 @@ void SATBMarkQueueSet::filter_thread_buffers() { shared_satb_queue()->filter(); } -void SATBMarkQueueSet::set_closure(ObjectClosure* closure) { - _closure = closure; -} - -void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) { - assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition"); - _par_closures[i] = par_closure; -} - -bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par, - uint worker) { +bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) { BufferNode* nd = NULL; { MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); @@ -322,7 +309,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par, if (_n_completed_buffers == 0) _process_completed = false; } } - ObjectClosure* cl = (par ? _par_closures[worker] : _closure); if (nd != NULL) { void **buf = BufferNode::make_buffer_from_node(nd); ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz); diff --git a/src/share/vm/gc_implementation/g1/satbQueue.hpp b/src/share/vm/gc_implementation/g1/satbQueue.hpp index c882794f5d4366466becffbb33ec84532caf40d7..f98c3457948fceabeb107245317614f5e4430c29 100644 --- a/src/share/vm/gc_implementation/g1/satbQueue.hpp +++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp @@ -75,16 +75,8 @@ public: }; class SATBMarkQueueSet: public PtrQueueSet { - ObjectClosure* _closure; - ObjectClosure** _par_closures; // One per ParGCThread. - ObjPtrQueue _shared_satb_queue; - // Utility function to support sequential and parallel versions. If - // "par" is true, then "worker" is the par thread id; if "false", worker - // is ignored. - bool apply_closure_to_completed_buffer_work(bool par, uint worker); - #ifdef ASSERT void dump_active_states(bool expected_active); void verify_active_states(bool expected_active); @@ -108,26 +100,10 @@ public: // Filter all the currently-active SATB buffers. void filter_thread_buffers(); - // Register "blk" as "the closure" for all queues. Only one such closure - // is allowed. The "apply_closure_to_completed_buffer" method will apply - // this closure to a completed buffer, and "iterate_closure_all_threads" - // applies it to partially-filled buffers (the latter should only be done - // with the world stopped). - void set_closure(ObjectClosure* closure); - // Set the parallel closures: pointer is an array of pointers to - // closures, one for each parallel GC thread. - void set_par_closure(int i, ObjectClosure* closure); - // If there exists some completed buffer, pop it, then apply the - // registered closure to all its elements, and return true. If no + // closure to all its elements, and return true. If no // completed buffers exist, return false. - bool apply_closure_to_completed_buffer() { - return apply_closure_to_completed_buffer_work(false, 0); - } - // Parallel version of the above. - bool par_apply_closure_to_completed_buffer(uint worker) { - return apply_closure_to_completed_buffer_work(true, worker); - } + bool apply_closure_to_completed_buffer(ObjectClosure* closure); // Apply the given closure on enqueued and currently-active buffers // respectively. Both methods are read-only, i.e., they do not