From f7cab4a66d7a70dbd91c55b9a554f89c482ac5e6 Mon Sep 17 00:00:00 2001 From: tonyp Date: Thu, 5 Apr 2012 13:57:23 -0400 Subject: [PATCH] 7127697: G1: remove dead code after recent concurrent mark changes Summary: Removed lots of dead code after some recent conc mark changes. Reviewed-by: brutisso, johnc --- .../gc_implementation/g1/concurrentMark.cpp | 916 +----------------- .../gc_implementation/g1/concurrentMark.hpp | 242 +---- .../gc_implementation/g1/g1CollectedHeap.hpp | 196 ---- .../g1/g1CollectorPolicy.cpp | 71 +- .../g1/g1CollectorPolicy.hpp | 14 - .../vm/gc_implementation/g1/g1_globals.hpp | 3 - .../vm/gc_implementation/g1/heapRegion.hpp | 7 +- src/share/vm/utilities/bitMap.cpp | 60 +- src/share/vm/utilities/bitMap.hpp | 27 +- 9 files changed, 36 insertions(+), 1500 deletions(-) diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 001f94e16..015d99dd5 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -103,16 +103,6 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { return (int) (diff >> _shifter); } -void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, - size_t from_start_index, - HeapWord* to_start_word, - size_t word_num) { - _bm.mostly_disjoint_range_union(from_bitmap, - from_start_index, - heapWordToOffset(to_start_word), - word_num); -} - #ifndef PRODUCT bool CMBitMapRO::covers(ReservedSpace rs) const { // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); @@ -271,140 +261,6 @@ bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { } } -CMRegionStack::CMRegionStack() : _base(NULL) {} - -void CMRegionStack::allocate(size_t size) { - _base = NEW_C_HEAP_ARRAY(MemRegion, size); - if (_base == NULL) { - vm_exit_during_initialization("Failed to allocate CM region mark stack"); - } - _index = 0; - _capacity = (jint) size; -} - -CMRegionStack::~CMRegionStack() { - if (_base != NULL) { - FREE_C_HEAP_ARRAY(oop, _base); - } -} - -void CMRegionStack::push_lock_free(MemRegion mr) { - guarantee(false, "push_lock_free(): don't call this any more"); - - assert(mr.word_size() > 0, "Precondition"); - while (true) { - jint index = _index; - - if (index >= _capacity) { - _overflow = true; - return; - } - // Otherwise... - jint next_index = index+1; - jint res = Atomic::cmpxchg(next_index, &_index, index); - if (res == index) { - _base[index] = mr; - return; - } - // Otherwise, we need to try again. - } -} - -// Lock-free pop of the region stack. Called during the concurrent -// marking / remark phases. Should only be called in tandem with -// other lock-free pops. -MemRegion CMRegionStack::pop_lock_free() { - guarantee(false, "pop_lock_free(): don't call this any more"); - - while (true) { - jint index = _index; - - if (index == 0) { - return MemRegion(); - } - // Otherwise... - jint next_index = index-1; - jint res = Atomic::cmpxchg(next_index, &_index, index); - if (res == index) { - MemRegion mr = _base[next_index]; - if (mr.start() != NULL) { - assert(mr.end() != NULL, "invariant"); - assert(mr.word_size() > 0, "invariant"); - return mr; - } else { - // that entry was invalidated... let's skip it - assert(mr.end() == NULL, "invariant"); - } - } - // Otherwise, we need to try again. - } -} - -#if 0 -// The routines that manipulate the region stack with a lock are -// not currently used. They should be retained, however, as a -// diagnostic aid. - -void CMRegionStack::push_with_lock(MemRegion mr) { - assert(mr.word_size() > 0, "Precondition"); - MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); - - if (isFull()) { - _overflow = true; - return; - } - - _base[_index] = mr; - _index += 1; -} - -MemRegion CMRegionStack::pop_with_lock() { - MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); - - while (true) { - if (_index == 0) { - return MemRegion(); - } - _index -= 1; - - MemRegion mr = _base[_index]; - if (mr.start() != NULL) { - assert(mr.end() != NULL, "invariant"); - assert(mr.word_size() > 0, "invariant"); - return mr; - } else { - // that entry was invalidated... let's skip it - assert(mr.end() == NULL, "invariant"); - } - } -} -#endif - -bool CMRegionStack::invalidate_entries_into_cset() { - guarantee(false, "invalidate_entries_into_cset(): don't call this any more"); - - bool result = false; - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - for (int i = 0; i < _oops_do_bound; ++i) { - MemRegion mr = _base[i]; - if (mr.start() != NULL) { - assert(mr.end() != NULL, "invariant"); - assert(mr.word_size() > 0, "invariant"); - HeapRegion* hr = g1h->heap_region_containing(mr.start()); - assert(hr != NULL, "invariant"); - if (hr->in_collection_set()) { - // The region points into the collection set - _base[i] = MemRegion(); - result = true; - } - } else { - // that entry was invalidated... let's skip it - assert(mr.end() == NULL, "invariant"); - } - } - return result; -} - template bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after @@ -565,10 +421,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, _prevMarkBitMap(&_markBitMap1), _nextMarkBitMap(&_markBitMap2), - _at_least_one_mark_complete(false), _markStack(this), - _regionStack(), // _finger set in set_non_marking_state _max_task_num(MAX2((uint)ParallelGCThreads, 1U)), @@ -582,7 +436,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, _has_aborted(false), _restart_for_overflow(false), _concurrent_marking_in_progress(false), - _should_gray_objects(false), // _verbose_level set below @@ -611,7 +464,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, } _markStack.allocate(MarkStackSize); - _regionStack.allocate(G1MarkRegionStackSize); // Create & start a ConcurrentMark thread. _cmThread = new ConcurrentMarkThread(this); @@ -744,15 +596,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, void ConcurrentMark::update_g1_committed(bool force) { // If concurrent marking is not in progress, then we do not need to - // update _heap_end. This has a subtle and important - // side-effect. Imagine that two evacuation pauses happen between - // marking completion and remark. The first one can grow the - // heap (hence now the finger is below the heap end). Then, the - // second one could unnecessarily push regions on the region - // stack. This causes the invariant that the region stack is empty - // at the beginning of remark to be false. By ensuring that we do - // not observe heap expansions after marking is complete, then we do - // not have this problem. + // update _heap_end. if (!concurrent_marking_in_progress() && !force) return; MemRegion committed = _g1h->g1_committed(); @@ -1058,86 +902,6 @@ bool ForceOverflowSettings::should_force() { } #endif // !PRODUCT -void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { - guarantee(false, "grayRegionIfNecessary(): don't call this any more"); - - // The objects on the region have already been marked "in bulk" by - // the caller. We only need to decide whether to push the region on - // the region stack or not. - - if (!concurrent_marking_in_progress() || !_should_gray_objects) { - // We're done with marking and waiting for remark. We do not need to - // push anything else on the region stack. - return; - } - - HeapWord* finger = _finger; - - if (verbose_low()) { - gclog_or_tty->print_cr("[global] attempting to push " - "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " - PTR_FORMAT, mr.start(), mr.end(), finger); - } - - if (mr.start() < finger) { - // The finger is always heap region aligned and it is not possible - // for mr to span heap regions. - assert(mr.end() <= finger, "invariant"); - - // Separated the asserts so that we know which one fires. - assert(mr.start() <= mr.end(), - "region boundaries should fall within the committed space"); - assert(_heap_start <= mr.start(), - "region boundaries should fall within the committed space"); - assert(mr.end() <= _heap_end, - "region boundaries should fall within the committed space"); - if (verbose_low()) { - gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " - "below the finger, pushing it", - mr.start(), mr.end()); - } - - if (!region_stack_push_lock_free(mr)) { - if (verbose_low()) { - gclog_or_tty->print_cr("[global] region stack has overflown."); - } - } - } -} - -void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { - guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more"); - - // The object is not marked by the caller. We need to at least mark - // it and maybe push in on the stack. - - HeapWord* addr = (HeapWord*)p; - if (!_nextMarkBitMap->isMarked(addr)) { - // We definitely need to mark it, irrespective whether we bail out - // because we're done with marking. - if (_nextMarkBitMap->parMark(addr)) { - if (!concurrent_marking_in_progress() || !_should_gray_objects) { - // If we're done with concurrent marking and we're waiting for - // remark, then we're not pushing anything on the stack. - return; - } - - // No OrderAccess:store_load() is needed. It is implicit in the - // CAS done in parMark(addr) above - HeapWord* finger = _finger; - - if (addr < finger) { - if (!mark_stack_push(oop(addr))) { - if (verbose_low()) { - gclog_or_tty->print_cr("[global] global stack overflow " - "during parMark"); - } - } - } - } - } -} - class CMConcurrentMarkingTask: public AbstractGangTask { private: ConcurrentMark* _cm; @@ -2255,7 +2019,6 @@ void ConcurrentMark::cleanup() { g1p->set_known_garbage_bytes(known_garbage_bytes); size_t start_used_bytes = g1h->used(); - _at_least_one_mark_complete = true; g1h->set_marking_complete(); ergo_verbose4(ErgoConcCycles, @@ -3066,89 +2829,6 @@ void ConcurrentMark::print_reachable(const char* str, #endif // PRODUCT -// This note is for drainAllSATBBuffers and the code in between. -// In the future we could reuse a task to do this work during an -// evacuation pause (since now tasks are not active and can be claimed -// during an evacuation pause). This was a late change to the code and -// is currently not being taken advantage of. - -void ConcurrentMark::deal_with_reference(oop obj) { - if (verbose_high()) { - gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, - (void*) obj); - } - - HeapWord* objAddr = (HeapWord*) obj; - assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); - if (_g1h->is_in_g1_reserved(objAddr)) { - assert(obj != NULL, "null check is implicit"); - if (!_nextMarkBitMap->isMarked(objAddr)) { - // Only get the containing region if the object is not marked on the - // bitmap (otherwise, it's a waste of time since we won't do - // anything with it). - HeapRegion* hr = _g1h->heap_region_containing_raw(obj); - if (!hr->obj_allocated_since_next_marking(obj)) { - if (verbose_high()) { - gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " - "marked", (void*) obj); - } - - // we need to mark it first - if (_nextMarkBitMap->parMark(objAddr)) { - // No OrderAccess:store_load() is needed. It is implicit in the - // CAS done in parMark(objAddr) above - HeapWord* finger = _finger; - if (objAddr < finger) { - if (verbose_high()) { - gclog_or_tty->print_cr("[global] below the global finger " - "("PTR_FORMAT"), pushing it", finger); - } - if (!mark_stack_push(obj)) { - if (verbose_low()) { - gclog_or_tty->print_cr("[global] global stack overflow during " - "deal_with_reference"); - } - } - } - } - } - } - } -} - -class CMGlobalObjectClosure : public ObjectClosure { -private: - ConcurrentMark* _cm; - -public: - void do_object(oop obj) { - _cm->deal_with_reference(obj); - } - - CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } -}; - -void ConcurrentMark::drainAllSATBBuffers() { - guarantee(false, "drainAllSATBBuffers(): don't call this any more"); - - CMGlobalObjectClosure oc(this); - SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); - satb_mq_set.set_closure(&oc); - - while (satb_mq_set.apply_closure_to_completed_buffer()) { - if (verbose_medium()) { - gclog_or_tty->print_cr("[global] processed an SATB buffer"); - } - } - - // no need to check whether we should do this, as this is only - // called during an evacuation pause - satb_mq_set.iterate_closure_all_threads(); - - satb_mq_set.set_closure(NULL); - assert(satb_mq_set.completed_buffers_num() == 0, "invariant"); -} - void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { // Note we are overriding the read-only view of the prev map here, via // the cast. @@ -3257,63 +2937,6 @@ ConcurrentMark::claim_region(int task_num) { return NULL; } -bool ConcurrentMark::invalidate_aborted_regions_in_cset() { - guarantee(false, "invalidate_aborted_regions_in_cset(): " - "don't call this any more"); - - bool result = false; - for (int i = 0; i < (int)_max_task_num; ++i) { - CMTask* the_task = _tasks[i]; - MemRegion mr = the_task->aborted_region(); - if (mr.start() != NULL) { - assert(mr.end() != NULL, "invariant"); - assert(mr.word_size() > 0, "invariant"); - HeapRegion* hr = _g1h->heap_region_containing(mr.start()); - assert(hr != NULL, "invariant"); - if (hr->in_collection_set()) { - // The region points into the collection set - the_task->set_aborted_region(MemRegion()); - result = true; - } - } - } - return result; -} - -bool ConcurrentMark::has_aborted_regions() { - for (int i = 0; i < (int)_max_task_num; ++i) { - CMTask* the_task = _tasks[i]; - MemRegion mr = the_task->aborted_region(); - if (mr.start() != NULL) { - assert(mr.end() != NULL, "invariant"); - assert(mr.word_size() > 0, "invariant"); - return true; - } - } - return false; -} - -void ConcurrentMark::oops_do(OopClosure* cl) { - if (_markStack.size() > 0 && verbose_low()) { - gclog_or_tty->print_cr("[global] scanning the global marking stack, " - "size = %d", _markStack.size()); - } - // we first iterate over the contents of the mark stack... - _markStack.oops_do(cl); - - for (int i = 0; i < (int)_max_task_num; ++i) { - OopTaskQueue* queue = _task_queues->queue((int)i); - - if (queue->size() > 0 && verbose_low()) { - gclog_or_tty->print_cr("[global] scanning task queue of task %d, " - "size = %d", i, queue->size()); - } - - // ...then over the contents of the all the task queues. - queue->oops_do(cl); - } -} - #ifndef PRODUCT enum VerifyNoCSetOopsPhase { VerifyNoCSetOopsStack, @@ -3445,8 +3068,6 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, void ConcurrentMark::clear_marking_state(bool clear_overflow) { _markStack.setEmpty(); _markStack.clear_overflow(); - _regionStack.setEmpty(); - _regionStack.clear_overflow(); if (clear_overflow) { clear_has_overflown(); } else { @@ -3457,8 +3078,6 @@ void ConcurrentMark::clear_marking_state(bool clear_overflow) { for (int i = 0; i < (int)_max_task_num; ++i) { OopTaskQueue* queue = _task_queues->queue(i); queue->set_empty(); - // Clear any partial regions from the CMTasks - _tasks[i]->clear_aborted_region(); } } @@ -3658,327 +3277,6 @@ void ConcurrentMark::print_stats() { } } -// Closures used by ConcurrentMark::complete_marking_in_collection_set(). - -class CSetMarkOopClosure: public OopClosure { - friend class CSetMarkBitMapClosure; - - G1CollectedHeap* _g1h; - CMBitMap* _bm; - ConcurrentMark* _cm; - oop* _ms; - jint* _array_ind_stack; - int _ms_size; - int _ms_ind; - int _array_increment; - uint _worker_id; - - bool push(oop obj, int arr_ind = 0) { - if (_ms_ind == _ms_size) { - gclog_or_tty->print_cr("Mark stack is full."); - return false; - } - _ms[_ms_ind] = obj; - if (obj->is_objArray()) { - _array_ind_stack[_ms_ind] = arr_ind; - } - _ms_ind++; - return true; - } - - oop pop() { - if (_ms_ind == 0) { - return NULL; - } else { - _ms_ind--; - return _ms[_ms_ind]; - } - } - - template bool drain() { - while (_ms_ind > 0) { - oop obj = pop(); - assert(obj != NULL, "Since index was non-zero."); - if (obj->is_objArray()) { - jint arr_ind = _array_ind_stack[_ms_ind]; - objArrayOop aobj = objArrayOop(obj); - jint len = aobj->length(); - jint next_arr_ind = arr_ind + _array_increment; - if (next_arr_ind < len) { - push(obj, next_arr_ind); - } - // Now process this portion of this one. - int lim = MIN2(next_arr_ind, len); - for (int j = arr_ind; j < lim; j++) { - do_oop(aobj->objArrayOopDesc::obj_at_addr(j)); - } - } else { - obj->oop_iterate(this); - } - if (abort()) return false; - } - return true; - } - -public: - CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) : - _g1h(G1CollectedHeap::heap()), - _cm(cm), - _bm(cm->nextMarkBitMap()), - _ms_size(ms_size), _ms_ind(0), - _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), - _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), - _array_increment(MAX2(ms_size/8, 16)), - _worker_id(worker_id) { } - - ~CSetMarkOopClosure() { - FREE_C_HEAP_ARRAY(oop, _ms); - FREE_C_HEAP_ARRAY(jint, _array_ind_stack); - } - - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - - template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(heap_oop)) return; - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (obj->is_forwarded()) { - // If the object has already been forwarded, we have to make sure - // that it's marked. So follow the forwarding pointer. Note that - // this does the right thing for self-forwarding pointers in the - // evacuation failure case. - obj = obj->forwardee(); - } - HeapRegion* hr = _g1h->heap_region_containing(obj); - if (hr != NULL) { - if (hr->in_collection_set()) { - if (_g1h->is_obj_ill(obj)) { - if (_bm->parMark((HeapWord*)obj)) { - if (!push(obj)) { - gclog_or_tty->print_cr("Setting abort in CSetMarkOopClosure because push failed."); - set_abort(); - } - } - } - } else { - // Outside the collection set; we need to gray it - _cm->deal_with_reference(obj); - } - } - } -}; - -class CSetMarkBitMapClosure: public BitMapClosure { - G1CollectedHeap* _g1h; - CMBitMap* _bitMap; - ConcurrentMark* _cm; - CSetMarkOopClosure _oop_cl; - uint _worker_id; - -public: - CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) : - _g1h(G1CollectedHeap::heap()), - _bitMap(cm->nextMarkBitMap()), - _oop_cl(cm, ms_size, worker_id), - _worker_id(worker_id) { } - - bool do_bit(size_t offset) { - // convert offset into a HeapWord* - HeapWord* addr = _bitMap->offsetToHeapWord(offset); - assert(_bitMap->endWord() && addr < _bitMap->endWord(), - "address out of range"); - assert(_bitMap->isMarked(addr), "tautology"); - oop obj = oop(addr); - if (!obj->is_forwarded()) { - if (!_oop_cl.push(obj)) return false; - if (UseCompressedOops) { - if (!_oop_cl.drain()) return false; - } else { - if (!_oop_cl.drain()) return false; - } - } - // Otherwise... - return true; - } -}; - -class CompleteMarkingInCSetHRClosure: public HeapRegionClosure { - CMBitMap* _bm; - CSetMarkBitMapClosure _bit_cl; - uint _worker_id; - - enum SomePrivateConstants { - MSSize = 1000 - }; - -public: - CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) : - _bm(cm->nextMarkBitMap()), - _bit_cl(cm, MSSize, worker_id), - _worker_id(worker_id) { } - - bool doHeapRegion(HeapRegion* hr) { - if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) { - // The current worker has successfully claimed the region. - if (!hr->evacuation_failed()) { - MemRegion mr = MemRegion(hr->bottom(), hr->next_top_at_mark_start()); - if (!mr.is_empty()) { - bool done = false; - while (!done) { - done = _bm->iterate(&_bit_cl, mr); - } - } - } - } - return false; - } -}; - -class G1ParCompleteMarkInCSetTask: public AbstractGangTask { -protected: - G1CollectedHeap* _g1h; - ConcurrentMark* _cm; - -public: - G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h, - ConcurrentMark* cm) : - AbstractGangTask("Complete Mark in CSet"), - _g1h(g1h), _cm(cm) { } - - void work(uint worker_id) { - CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id); - HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); - _g1h->collection_set_iterate_from(hr, &cmplt); - } -}; - -void ConcurrentMark::complete_marking_in_collection_set() { - guarantee(false, "complete_marking_in_collection_set(): " - "don't call this any more"); - - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - if (!g1h->mark_in_progress()) { - g1h->g1_policy()->record_mark_closure_time(0.0); - return; - } - - double start = os::elapsedTime(); - G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); - - assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); - - if (G1CollectedHeap::use_parallel_gc_threads()) { - int n_workers = g1h->workers()->active_workers(); - g1h->set_par_threads(n_workers); - g1h->workers()->run_task(&complete_mark_task); - g1h->set_par_threads(0); - } else { - complete_mark_task.work(0); - } - - assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity"); - - // Reset the claim values in the regions in the collection set. - g1h->reset_cset_heap_region_claim_values(); - - assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); - - double end_time = os::elapsedTime(); - double elapsed_time_ms = (end_time - start) * 1000.0; - g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); -} - -// The next two methods deal with the following optimisation. Some -// objects are gray by being marked and located above the finger. If -// they are copied, during an evacuation pause, below the finger then -// the need to be pushed on the stack. The observation is that, if -// there are no regions in the collection set located above the -// finger, then the above cannot happen, hence we do not need to -// explicitly gray any objects when copying them to below the -// finger. The global stack will be scanned to ensure that, if it -// points to objects being copied, it will update their -// location. There is a tricky situation with the gray objects in -// region stack that are being coped, however. See the comment in -// newCSet(). - -void ConcurrentMark::newCSet() { - guarantee(false, "newCSet(): don't call this any more"); - - if (!concurrent_marking_in_progress()) { - // nothing to do if marking is not in progress - return; - } - - // find what the lowest finger is among the global and local fingers - _min_finger = _finger; - for (int i = 0; i < (int)_max_task_num; ++i) { - CMTask* task = _tasks[i]; - HeapWord* task_finger = task->finger(); - if (task_finger != NULL && task_finger < _min_finger) { - _min_finger = task_finger; - } - } - - _should_gray_objects = false; - - // This fixes a very subtle and fustrating bug. It might be the case - // that, during en evacuation pause, heap regions that contain - // objects that are gray (by being in regions contained in the - // region stack) are included in the collection set. Since such gray - // objects will be moved, and because it's not easy to redirect - // region stack entries to point to a new location (because objects - // in one region might be scattered to multiple regions after they - // are copied), one option is to ensure that all marked objects - // copied during a pause are pushed on the stack. Notice, however, - // that this problem can only happen when the region stack is not - // empty during an evacuation pause. So, we make the fix a bit less - // conservative and ensure that regions are pushed on the stack, - // irrespective whether all collection set regions are below the - // finger, if the region stack is not empty. This is expected to be - // a rare case, so I don't think it's necessary to be smarted about it. - if (!region_stack_empty() || has_aborted_regions()) { - _should_gray_objects = true; - } -} - -void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { - guarantee(false, "registerCSetRegion(): don't call this any more"); - - if (!concurrent_marking_in_progress()) return; - - HeapWord* region_end = hr->end(); - if (region_end > _min_finger) { - _should_gray_objects = true; - } -} - -// Resets the region fields of active CMTasks whose values point -// into the collection set. -void ConcurrentMark::reset_active_task_region_fields_in_cset() { - guarantee(false, "reset_active_task_region_fields_in_cset(): " - "don't call this any more"); - - assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); - assert(parallel_marking_threads() <= _max_task_num, "sanity"); - - for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { - CMTask* task = _tasks[i]; - HeapWord* task_finger = task->finger(); - if (task_finger != NULL) { - assert(_g1h->is_in_g1_reserved(task_finger), "not in heap"); - HeapRegion* finger_region = _g1h->heap_region_containing(task_finger); - if (finger_region->in_collection_set()) { - // The task's current region is in the collection set. - // This region will be evacuated in the current GC and - // the region fields in the task will be stale. - task->giveup_current_region(); - } - } - } -} - // abandon current marking iteration due to a Full GC void ConcurrentMark::abort() { // Clear all marks to force marking thread to do nothing @@ -4112,36 +3410,21 @@ private: CMBitMap* _nextMarkBitMap; ConcurrentMark* _cm; CMTask* _task; - // true if we're scanning a heap region claimed by the task (so that - // we move the finger along), false if we're not, i.e. currently when - // scanning a heap region popped from the region stack (so that we - // do not move the task finger along; it'd be a mistake if we did so). - bool _scanning_heap_region; public: - CMBitMapClosure(CMTask *task, - ConcurrentMark* cm, - CMBitMap* nextMarkBitMap) - : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } - - void set_scanning_heap_region(bool scanning_heap_region) { - _scanning_heap_region = scanning_heap_region; - } + CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : + _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } bool do_bit(size_t offset) { HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); assert(_nextMarkBitMap->isMarked(addr), "invariant"); assert( addr < _cm->finger(), "invariant"); - if (_scanning_heap_region) { - statsOnly( _task->increase_objs_found_on_bitmap() ); - assert(addr >= _task->finger(), "invariant"); - // We move that task's local finger along. - _task->move_finger_to(addr); - } else { - // We move the task's region finger along. - _task->move_region_finger_to(addr); - } + statsOnly( _task->increase_objs_found_on_bitmap() ); + assert(addr >= _task->finger(), "invariant"); + + // We move that task's local finger along. + _task->move_finger_to(addr); _task->scan_object(oop(addr)); // we only partially drain the local queue and global stack @@ -4249,8 +3532,6 @@ void CMTask::clear_region_fields() { _curr_region = NULL; _finger = NULL; _region_limit = NULL; - - _region_finger = NULL; } void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { @@ -4271,7 +3552,6 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) { _nextMarkBitMap = nextMarkBitMap; clear_region_fields(); - assert(_aborted_region.is_empty(), "should have been cleared"); _calls = 0; _elapsed_time_ms = 0.0; @@ -4288,7 +3568,6 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) { _global_max_size = 0; _global_transfers_to = 0; _global_transfers_from = 0; - _region_stack_pops = 0; _regions_claimed = 0; _objs_found_on_bitmap = 0; _satb_buffers_processed = 0; @@ -4663,110 +3942,6 @@ void CMTask::drain_satb_buffers() { decrease_limits(); } -void CMTask::drain_region_stack(BitMapClosure* bc) { - assert(_cm->region_stack_empty(), "region stack should be empty"); - assert(_aborted_region.is_empty(), "aborted region should be empty"); - return; - - if (has_aborted()) return; - - assert(_region_finger == NULL, - "it should be NULL when we're not scanning a region"); - - if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) { - if (_cm->verbose_low()) { - gclog_or_tty->print_cr("[%d] draining region stack, size = %d", - _task_id, _cm->region_stack_size()); - } - - MemRegion mr; - - if (!_aborted_region.is_empty()) { - mr = _aborted_region; - _aborted_region = MemRegion(); - - if (_cm->verbose_low()) { - gclog_or_tty->print_cr("[%d] scanning aborted region " - "[ " PTR_FORMAT ", " PTR_FORMAT " )", - _task_id, mr.start(), mr.end()); - } - } else { - mr = _cm->region_stack_pop_lock_free(); - // it returns MemRegion() if the pop fails - statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); - } - - while (mr.start() != NULL) { - if (_cm->verbose_medium()) { - gclog_or_tty->print_cr("[%d] we are scanning region " - "["PTR_FORMAT", "PTR_FORMAT")", - _task_id, mr.start(), mr.end()); - } - - assert(mr.end() <= _cm->finger(), - "otherwise the region shouldn't be on the stack"); - assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); - if (_nextMarkBitMap->iterate(bc, mr)) { - assert(!has_aborted(), - "cannot abort the task without aborting the bitmap iteration"); - - // We finished iterating over the region without aborting. - regular_clock_call(); - if (has_aborted()) { - mr = MemRegion(); - } else { - mr = _cm->region_stack_pop_lock_free(); - // it returns MemRegion() if the pop fails - statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); - } - } else { - assert(has_aborted(), "currently the only way to do so"); - - // The only way to abort the bitmap iteration is to return - // false from the do_bit() method. However, inside the - // do_bit() method we move the _region_finger to point to the - // object currently being looked at. So, if we bail out, we - // have definitely set _region_finger to something non-null. - assert(_region_finger != NULL, "invariant"); - - // Make sure that any previously aborted region has been - // cleared. - assert(_aborted_region.is_empty(), "aborted region not cleared"); - - // The iteration was actually aborted. So now _region_finger - // points to the address of the object we last scanned. If we - // leave it there, when we restart this task, we will rescan - // the object. It is easy to avoid this. We move the finger by - // enough to point to the next possible object header (the - // bitmap knows by how much we need to move it as it knows its - // granularity). - MemRegion newRegion = - MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); - - if (!newRegion.is_empty()) { - if (_cm->verbose_low()) { - gclog_or_tty->print_cr("[%d] recording unscanned region" - "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask", - _task_id, - newRegion.start(), newRegion.end()); - } - // Now record the part of the region we didn't scan to - // make sure this task scans it later. - _aborted_region = newRegion; - } - // break from while - mr = MemRegion(); - } - _region_finger = NULL; - } - - if (_cm->verbose_low()) { - gclog_or_tty->print_cr("[%d] drained region stack, size = %d", - _task_id, _cm->region_stack_size()); - } - } -} - void CMTask::print_stats() { gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", _task_id, _calls); @@ -4795,8 +3970,7 @@ void CMTask::print_stats() { _global_pushes, _global_pops, _global_max_size); gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", _global_transfers_to,_global_transfers_from); - gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", - _regions_claimed, _region_stack_pops); + gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", _steal_attempts, _steals); @@ -4855,15 +4029,7 @@ void CMTask::print_stats() { popping by other tasks. Only when there is no more work, tasks will totally drain the global mark stack. - (4) Global Region Stack. Entries on it correspond to areas of - the bitmap that need to be scanned since they contain gray - objects. Pushes on the region stack only happen during - evacuation pauses and typically correspond to areas covered by - GC LABS. If it overflows, then the marking phase should restart - and iterate over the bitmap to identify gray objects. Tasks will - try to totally drain the region stack as soon as possible. - - (5) SATB Buffer Queue. This is where completed SATB buffers are + (4) SATB Buffer Queue. This is where completed SATB buffers are made available. Buffers are regularly removed from this queue and scanned for roots, so that the queue doesn't get too long. During remark, all completed buffers are processed, as @@ -4875,12 +4041,12 @@ void CMTask::print_stats() { (1) When the marking phase has been aborted (after a Full GC). - (2) When a global overflow (either on the global stack or the - region stack) has been triggered. Before the task aborts, it - will actually sync up with the other tasks to ensure that all - the marking data structures (local queues, stacks, fingers etc.) - are re-initialised so that when do_marking_step() completes, - the marking phase can immediately restart. + (2) When a global overflow (on the global stack) has been + triggered. Before the task aborts, it will actually sync up with + the other tasks to ensure that all the marking data structures + (local queues, stacks, fingers etc.) are re-initialised so that + when do_marking_step() completes, the marking phase can + immediately restart. (3) When enough completed SATB buffers are available. The do_marking_step() method only tries to drain SATB buffers right @@ -4923,13 +4089,6 @@ void CMTask::do_marking_step(double time_target_ms, assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); assert(concurrent() == _cm->concurrent(), "they should be the same"); - assert(concurrent() || _cm->region_stack_empty(), - "the region stack should have been cleared before remark"); - assert(concurrent() || !_cm->has_aborted_regions(), - "aborted regions should have been cleared before remark"); - assert(_region_finger == NULL, - "this should be non-null only when a region is being scanned"); - G1CollectorPolicy* g1_policy = _g1h->g1_policy(); assert(_task_queues != NULL, "invariant"); assert(_task_queue != NULL, "invariant"); @@ -4978,10 +4137,10 @@ void CMTask::do_marking_step(double time_target_ms, set_cm_oop_closure(&cm_oop_closure); if (_cm->has_overflown()) { - // This can happen if the region stack or the mark stack overflows - // during a GC pause and this task, after a yield point, - // restarts. We have to abort as we need to get into the overflow - // protocol which happens right at the end of this task. + // This can happen if the mark stack overflows during a GC pause + // and this task, after a yield point, restarts. We have to abort + // as we need to get into the overflow protocol which happens + // right at the end of this task. set_has_aborted(); } @@ -4994,17 +4153,6 @@ void CMTask::do_marking_step(double time_target_ms, drain_local_queue(true); drain_global_stack(true); - // Then totally drain the region stack. We will not look at - // it again before the next invocation of this method. Entries on - // the region stack are only added during evacuation pauses, for - // which we have to yield. When we do, we abort the task anyway so - // it will look at the region stack again when it restarts. - bitmap_closure.set_scanning_heap_region(false); - drain_region_stack(&bitmap_closure); - // ...then partially drain the local queue and the global stack - drain_local_queue(true); - drain_global_stack(true); - do { if (!has_aborted() && _curr_region != NULL) { // This means that we're already holding on to a region. @@ -5034,9 +4182,7 @@ void CMTask::do_marking_step(double time_target_ms, // Let's iterate over the bitmap of the part of the // region that is left. - bitmap_closure.set_scanning_heap_region(true); - if (mr.is_empty() || - _nextMarkBitMap->iterate(&bitmap_closure, mr)) { + if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) { // We successfully completed iterating over the region. Now, // let's give up the region. giveup_current_region(); @@ -5061,9 +4207,9 @@ void CMTask::do_marking_step(double time_target_ms, HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); // Check if bitmap iteration was aborted while scanning the last object if (new_finger >= _region_limit) { - giveup_current_region(); + giveup_current_region(); } else { - move_finger_to(new_finger); + move_finger_to(new_finger); } } } @@ -5119,9 +4265,7 @@ void CMTask::do_marking_step(double time_target_ms, if (!has_aborted()) { // We cannot check whether the global stack is empty, since other - // tasks might be pushing objects to it concurrently. We also cannot - // check if the region stack is empty because if a thread is aborting - // it can push a partially done region back. + // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions(), "at this point we should be out of regions"); @@ -5145,9 +4289,7 @@ void CMTask::do_marking_step(double time_target_ms, // we could. Let's try to do some stealing... // We cannot check whether the global stack is empty, since other - // tasks might be pushing objects to it concurrently. We also cannot - // check if the region stack is empty because if a thread is aborting - // it can push a partially done region back. + // tasks might be pushing objects to it concurrently. assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); @@ -5194,9 +4336,7 @@ void CMTask::do_marking_step(double time_target_ms, // termination protocol. if (do_termination && !has_aborted()) { // We cannot check whether the global stack is empty, since other - // tasks might be concurrently pushing objects on it. We also cannot - // check if the region stack is empty because if a thread is aborting - // it can push a partially done region back. + // tasks might be concurrently pushing objects on it. // Separated the asserts so that we know which one fires. assert(_cm->out_of_regions(), "only way to reach here"); assert(_task_queue->size() == 0, "only way to reach here"); @@ -5233,13 +4373,10 @@ void CMTask::do_marking_step(double time_target_ms, // that, if a condition is false, we can immediately find out // which one. guarantee(_cm->out_of_regions(), "only way to reach here"); - guarantee(_aborted_region.is_empty(), "only way to reach here"); - guarantee(_cm->region_stack_empty(), "only way to reach here"); guarantee(_cm->mark_stack_empty(), "only way to reach here"); guarantee(_task_queue->size() == 0, "only way to reach here"); guarantee(!_cm->has_overflown(), "only way to reach here"); guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); - guarantee(!_cm->region_stack_overflow(), "only way to reach here"); if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); @@ -5342,7 +4479,6 @@ CMTask::CMTask(int task_id, _task_queue(task_queue), _task_queues(task_queues), _cm_oop_closure(NULL), - _aborted_region(MemRegion()), _marked_bytes_array(marked_bytes), _card_bm(card_bm) { guarantee(task_queue != NULL, "invariant"); diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.hpp index e8795d6ac..deb471adb 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -42,9 +42,7 @@ typedef GenericTaskQueueSet CMTaskQueueSet; class G1CMIsAliveClosure: public BoolObjectClosure { G1CollectedHeap* _g1; public: - G1CMIsAliveClosure(G1CollectedHeap* g1) : - _g1(g1) - {} + G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } void do_object(oop obj) { ShouldNotCallThis(); @@ -111,11 +109,6 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC { return offsetToHeapWord(heapWordToOffset(addr) + 1); } - void mostly_disjoint_range_union(BitMap* from_bitmap, - size_t from_start_index, - HeapWord* to_start_word, - size_t word_num); - // debugging NOT_PRODUCT(bool covers(ReservedSpace rs) const;) }; @@ -258,60 +251,6 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC { void oops_do(OopClosure* f); }; -class CMRegionStack VALUE_OBJ_CLASS_SPEC { - MemRegion* _base; - jint _capacity; - jint _index; - jint _oops_do_bound; - bool _overflow; -public: - CMRegionStack(); - ~CMRegionStack(); - void allocate(size_t size); - - // This is lock-free; assumes that it will only be called in parallel - // with other "push" operations (no pops). - void push_lock_free(MemRegion mr); - - // Lock-free; assumes that it will only be called in parallel - // with other "pop" operations (no pushes). - MemRegion pop_lock_free(); - -#if 0 - // The routines that manipulate the region stack with a lock are - // not currently used. They should be retained, however, as a - // diagnostic aid. - - // These two are the implementations that use a lock. They can be - // called concurrently with each other but they should not be called - // concurrently with the lock-free versions (push() / pop()). - void push_with_lock(MemRegion mr); - MemRegion pop_with_lock(); -#endif - - bool isEmpty() { return _index == 0; } - bool isFull() { return _index == _capacity; } - - bool overflow() { return _overflow; } - void clear_overflow() { _overflow = false; } - - int size() { return _index; } - - // It iterates over the entries in the region stack and it - // invalidates (i.e. assigns MemRegion()) the ones that point to - // regions in the collection set. - bool invalidate_entries_into_cset(); - - // This gives an upper bound up to which the iteration in - // invalidate_entries_into_cset() will reach. This prevents - // newly-added entries to be unnecessarily scanned. - void set_oops_do_bound() { - _oops_do_bound = _index; - } - - void setEmpty() { _index = 0; clear_overflow(); } -}; - class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { private: #ifndef PRODUCT @@ -408,7 +347,6 @@ class ConcurrentMark : public CHeapObj { friend class ConcurrentMarkThread; friend class CMTask; friend class CMBitMapClosure; - friend class CSetMarkOopClosure; friend class CMGlobalObjectClosure; friend class CMRemarkTask; friend class CMConcurrentMarkingTask; @@ -443,7 +381,6 @@ protected: CMBitMap _markBitMap2; CMBitMapRO* _prevMarkBitMap; // completed mark bitmap CMBitMap* _nextMarkBitMap; // under-construction mark bitmap - bool _at_least_one_mark_complete; BitMap _region_bm; BitMap _card_bm; @@ -457,7 +394,6 @@ protected: // For gray objects CMMarkStack _markStack; // Grey objects behind global finger. - CMRegionStack _regionStack; // Grey regions behind global finger. HeapWord* volatile _finger; // the global finger, region aligned, // always points to the end of the // last claimed region @@ -502,18 +438,6 @@ protected: // verbose level CMVerboseLevel _verbose_level; - // These two fields are used to implement the optimisation that - // avoids pushing objects on the global/region stack if there are - // no collection set regions above the lowest finger. - - // This is the lowest finger (among the global and local fingers), - // which is calculated before a new collection set is chosen. - HeapWord* _min_finger; - // If this flag is true, objects/regions that are marked below the - // finger should be pushed on the stack(s). If this is flag is - // false, it is safe not to push them on the stack(s). - bool _should_gray_objects; - // All of these times are in ms. NumberSeq _init_times; NumberSeq _remark_times; @@ -604,7 +528,7 @@ protected: CMTaskQueueSet* task_queues() { return _task_queues; } // Access / manipulation of the overflow flag which is set to - // indicate that the global stack or region stack has overflown + // indicate that the global stack has overflown bool has_overflown() { return _has_overflown; } void set_has_overflown() { _has_overflown = true; } void clear_has_overflown() { _has_overflown = false; } @@ -684,68 +608,6 @@ public: bool mark_stack_overflow() { return _markStack.overflow(); } bool mark_stack_empty() { return _markStack.isEmpty(); } - // (Lock-free) Manipulation of the region stack - bool region_stack_push_lock_free(MemRegion mr) { - // Currently we only call the lock-free version during evacuation - // pauses. - assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); - - _regionStack.push_lock_free(mr); - if (_regionStack.overflow()) { - set_has_overflown(); - return false; - } - return true; - } - - // Lock-free version of region-stack pop. Should only be - // called in tandem with other lock-free pops. - MemRegion region_stack_pop_lock_free() { - return _regionStack.pop_lock_free(); - } - -#if 0 - // The routines that manipulate the region stack with a lock are - // not currently used. They should be retained, however, as a - // diagnostic aid. - - bool region_stack_push_with_lock(MemRegion mr) { - // Currently we only call the lock-based version during either - // concurrent marking or remark. - assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), - "if we are at a safepoint it should be the remark safepoint"); - - _regionStack.push_with_lock(mr); - if (_regionStack.overflow()) { - set_has_overflown(); - return false; - } - return true; - } - - MemRegion region_stack_pop_with_lock() { - // Currently we only call the lock-based version during either - // concurrent marking or remark. - assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), - "if we are at a safepoint it should be the remark safepoint"); - - return _regionStack.pop_with_lock(); - } -#endif - - int region_stack_size() { return _regionStack.size(); } - bool region_stack_overflow() { return _regionStack.overflow(); } - bool region_stack_empty() { return _regionStack.isEmpty(); } - - // Iterate over any regions that were aborted while draining the - // region stack (any such regions are saved in the corresponding - // CMTask) and invalidate (i.e. assign to the empty MemRegion()) - // any regions that point into the collection set. - bool invalidate_aborted_regions_in_cset(); - - // Returns true if there are any aborted memory regions. - bool has_aborted_regions(); - CMRootRegions* root_regions() { return &_root_regions; } bool concurrent_marking_in_progress() { @@ -774,10 +636,6 @@ public: return _task_queues->steal(task_num, hash_seed, obj); } - // It grays an object by first marking it. Then, if it's behind the - // global finger, it also pushes it on the global stack. - void deal_with_reference(oop obj); - ConcurrentMark(ReservedSpace rs, int max_regions); ~ConcurrentMark(); @@ -810,22 +668,6 @@ public: inline void grayRoot(oop obj, size_t word_size, uint worker_id, HeapRegion* hr = NULL); - // It's used during evacuation pauses to gray a region, if - // necessary, and it's MT-safe. It assumes that the caller has - // marked any objects on that region. If _should_gray_objects is - // true and we're still doing concurrent marking, the region is - // pushed on the region stack, if it is located below the global - // finger, otherwise we do nothing. - void grayRegionIfNecessary(MemRegion mr); - - // It's used during evacuation pauses to mark and, if necessary, - // gray a single object and it's MT-safe. It assumes the caller did - // not mark the object. If _should_gray_objects is true and we're - // still doing concurrent marking, the objects is pushed on the - // global stack, if it is located below the global finger, otherwise - // we do nothing. - void markAndGrayObjectIfNecessary(oop p); - // It iterates over the heap and for each object it comes across it // will dump the contents of its reference fields, as well as // liveness information for the object and its referents. The dump @@ -869,10 +711,6 @@ public: // Do concurrent phase of marking, to a tentative transitive closure. void markFromRoots(); - // Process all unprocessed SATB buffers. It is called at the - // beginning of an evacuation pause. - void drainAllSATBBuffers(); - void checkpointRootsFinal(bool clear_all_soft_refs); void checkpointRootsFinalWork(); void cleanup(); @@ -899,10 +737,6 @@ public: _markStack.note_end_of_gc(); } - // Iterate over the oops in the mark stack and all local queues. It - // also calls invalidate_entries_into_cset() on the region stack. - void oops_do(OopClosure* f); - // Verify that there are no CSet oops on the stacks (taskqueues / // global mark stack), enqueued SATB buffers, per-thread SATB // buffers, and fingers (global / per-task). The boolean parameters @@ -919,40 +753,6 @@ public: // unless the force parameter is true. void update_g1_committed(bool force = false); - void complete_marking_in_collection_set(); - - // It indicates that a new collection set is being chosen. - void newCSet(); - - // It registers a collection set heap region with CM. This is used - // to determine whether any heap regions are located above the finger. - void registerCSetRegion(HeapRegion* hr); - - // Resets the region fields of any active CMTask whose region fields - // are in the collection set (i.e. the region currently claimed by - // the CMTask will be evacuated and may be used, subsequently, as - // an alloc region). When this happens the region fields in the CMTask - // are stale and, hence, should be cleared causing the worker thread - // to claim a new region. - void reset_active_task_region_fields_in_cset(); - - // Registers the maximum region-end associated with a set of - // regions with CM. Again this is used to determine whether any - // heap regions are located above the finger. - void register_collection_set_finger(HeapWord* max_finger) { - // max_finger is the highest heap region end of the regions currently - // contained in the collection set. If this value is larger than - // _min_finger then we need to gray objects. - // This routine is like registerCSetRegion but for an entire - // collection of regions. - if (max_finger > _min_finger) { - _should_gray_objects = true; - } - } - - // Returns "true" if at least one mark has been completed. - bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } - bool isMarked(oop p) const { assert(p != NULL && p->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)p; @@ -1164,23 +964,6 @@ private: // limit of the region this task is scanning, NULL if we're not scanning one HeapWord* _region_limit; - // This is used only when we scan regions popped from the region - // stack. It records what the last object on such a region we - // scanned was. It is used to ensure that, if we abort region - // iteration, we do not rescan the first part of the region. This - // should be NULL when we're not scanning a region from the region - // stack. - HeapWord* _region_finger; - - // If we abort while scanning a region we record the remaining - // unscanned portion and check this field when marking restarts. - // This avoids having to push on the region stack while other - // marking threads may still be popping regions. - // If we were to push the unscanned portion directly to the - // region stack then we would need to using locking versions - // of the push and pop operations. - MemRegion _aborted_region; - // the number of words this task has scanned size_t _words_scanned; // When _words_scanned reaches this limit, the regular clock is @@ -1268,8 +1051,6 @@ private: int _global_transfers_to; int _global_transfers_from; - int _region_stack_pops; - int _regions_claimed; int _objs_found_on_bitmap; @@ -1347,15 +1128,6 @@ public: bool has_timed_out() { return _has_timed_out; } bool claimed() { return _claimed; } - // Support routines for the partially scanned region that may be - // recorded as a result of aborting while draining the CMRegionStack - MemRegion aborted_region() { return _aborted_region; } - void set_aborted_region(MemRegion mr) - { _aborted_region = mr; } - - // Clears any recorded partially scanned region - void clear_aborted_region() { set_aborted_region(MemRegion()); } - void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); // It grays the object by marking it and, if necessary, pushing it @@ -1385,22 +1157,12 @@ public: // buffers are available. void drain_satb_buffers(); - // It keeps popping regions from the region stack and processing - // them until the region stack is empty. - void drain_region_stack(BitMapClosure* closure); - // moves the local finger to a new location inline void move_finger_to(HeapWord* new_finger) { assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); _finger = new_finger; } - // moves the region finger to a new location - inline void move_region_finger_to(HeapWord* new_finger) { - assert(new_finger < _cm->finger(), "invariant"); - _region_finger = new_finger; - } - CMTask(int task_num, ConcurrentMark *cm, size_t* marked_bytes, BitMap* card_bm, CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 58befab9a..ad13c52e3 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1677,202 +1677,6 @@ protected: size_t _max_heap_capacity; }; -#define use_local_bitmaps 1 -#define verify_local_bitmaps 0 -#define oop_buffer_length 256 - -#ifndef PRODUCT -class GCLabBitMap; -class GCLabBitMapClosure: public BitMapClosure { -private: - ConcurrentMark* _cm; - GCLabBitMap* _bitmap; - -public: - GCLabBitMapClosure(ConcurrentMark* cm, - GCLabBitMap* bitmap) { - _cm = cm; - _bitmap = bitmap; - } - - virtual bool do_bit(size_t offset); -}; -#endif // !PRODUCT - -class GCLabBitMap: public BitMap { -private: - ConcurrentMark* _cm; - - int _shifter; - size_t _bitmap_word_covers_words; - - // beginning of the heap - HeapWord* _heap_start; - - // this is the actual start of the GCLab - HeapWord* _real_start_word; - - // this is the actual end of the GCLab - HeapWord* _real_end_word; - - // this is the first word, possibly located before the actual start - // of the GCLab, that corresponds to the first bit of the bitmap - HeapWord* _start_word; - - // size of a GCLab in words - size_t _gclab_word_size; - - static int shifter() { - return MinObjAlignment - 1; - } - - // how many heap words does a single bitmap word corresponds to? - static size_t bitmap_word_covers_words() { - return BitsPerWord << shifter(); - } - - size_t gclab_word_size() const { - return _gclab_word_size; - } - - // Calculates actual GCLab size in words - size_t gclab_real_word_size() const { - return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word)) - / BitsPerWord; - } - - static size_t bitmap_size_in_bits(size_t gclab_word_size) { - size_t bits_in_bitmap = gclab_word_size >> shifter(); - // We are going to ensure that the beginning of a word in this - // bitmap also corresponds to the beginning of a word in the - // global marking bitmap. To handle the case where a GCLab - // starts from the middle of the bitmap, we need to add enough - // space (i.e. up to a bitmap word) to ensure that we have - // enough bits in the bitmap. - return bits_in_bitmap + BitsPerWord - 1; - } -public: - GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size) - : BitMap(bitmap_size_in_bits(gclab_word_size)), - _cm(G1CollectedHeap::heap()->concurrent_mark()), - _shifter(shifter()), - _bitmap_word_covers_words(bitmap_word_covers_words()), - _heap_start(heap_start), - _gclab_word_size(gclab_word_size), - _real_start_word(NULL), - _real_end_word(NULL), - _start_word(NULL) { - guarantee(false, "GCLabBitMap::GCLabBitmap(): don't call this any more"); - } - - inline unsigned heapWordToOffset(HeapWord* addr) { - unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; - assert(offset < size(), "offset should be within bounds"); - return offset; - } - - inline HeapWord* offsetToHeapWord(size_t offset) { - HeapWord* addr = _start_word + (offset << _shifter); - assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); - return addr; - } - - bool fields_well_formed() { - bool ret1 = (_real_start_word == NULL) && - (_real_end_word == NULL) && - (_start_word == NULL); - if (ret1) - return true; - - bool ret2 = _real_start_word >= _start_word && - _start_word < _real_end_word && - (_real_start_word + _gclab_word_size) == _real_end_word && - (_start_word + _gclab_word_size + _bitmap_word_covers_words) - > _real_end_word; - return ret2; - } - - inline bool mark(HeapWord* addr) { - guarantee(use_local_bitmaps, "invariant"); - assert(fields_well_formed(), "invariant"); - - if (addr >= _real_start_word && addr < _real_end_word) { - assert(!isMarked(addr), "should not have already been marked"); - - // first mark it on the bitmap - at_put(heapWordToOffset(addr), true); - - return true; - } else { - return false; - } - } - - inline bool isMarked(HeapWord* addr) { - guarantee(use_local_bitmaps, "invariant"); - assert(fields_well_formed(), "invariant"); - - return at(heapWordToOffset(addr)); - } - - void set_buffer(HeapWord* start) { - guarantee(false, "set_buffer(): don't call this any more"); - - guarantee(use_local_bitmaps, "invariant"); - clear(); - - assert(start != NULL, "invariant"); - _real_start_word = start; - _real_end_word = start + _gclab_word_size; - - size_t diff = - pointer_delta(start, _heap_start) % _bitmap_word_covers_words; - _start_word = start - diff; - - assert(fields_well_formed(), "invariant"); - } - -#ifndef PRODUCT - void verify() { - // verify that the marks have been propagated - GCLabBitMapClosure cl(_cm, this); - iterate(&cl); - } -#endif // PRODUCT - - void retire() { - guarantee(false, "retire(): don't call this any more"); - - guarantee(use_local_bitmaps, "invariant"); - assert(fields_well_formed(), "invariant"); - - if (_start_word != NULL) { - CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); - - // this means that the bitmap was set up for the GCLab - assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); - - mark_bitmap->mostly_disjoint_range_union(this, - 0, // always start from the start of the bitmap - _start_word, - gclab_real_word_size()); - _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); - -#ifndef PRODUCT - if (use_local_bitmaps && verify_local_bitmaps) - verify(); -#endif // PRODUCT - } else { - assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); - } - } - - size_t bitmap_size_in_words() const { - return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord; - } - -}; - class G1ParGCAllocBuffer: public ParGCAllocBuffer { private: bool _retired; diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index dea81b974..8b4cc1360 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -140,7 +140,6 @@ G1CollectorPolicy::G1CollectorPolicy() : _summary(new Summary()), _cur_clear_ct_time_ms(0.0), - _mark_closure_time_ms(0.0), _root_region_scan_wait_time_ms(0.0), _cur_ref_proc_time_ms(0.0), @@ -944,9 +943,6 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, _cur_aux_times_set[i] = false; } - // This is initialized to zero here and is set during - // the evacuation pause if marking is in progress. - _cur_satb_drain_time_ms = 0.0; // This is initialized to zero here and is set during the evacuation // pause if we actually waited for the root region scanning to finish. _root_region_scan_wait_time_ms = 0.0; @@ -1246,11 +1242,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { double other_time_ms = elapsed_ms; - // Subtract the SATB drain time. It's initialized to zero at the - // start of the pause and is updated during the pause if marking - // is in progress. - other_time_ms -= _cur_satb_drain_time_ms; - // Subtract the root region scanning wait time. It's initialized to // zero at the start of the pause. other_time_ms -= _root_region_scan_wait_time_ms; @@ -1268,11 +1259,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { // current value of "other time" other_time_ms -= _cur_clear_ct_time_ms; - // Subtract the time spent completing marking in the collection - // set. Note if marking is not in progress during the pause - // the value of _mark_closure_time_ms will be zero. - other_time_ms -= _mark_closure_time_ms; - // TraceGen0Time and TraceGen1Time summary info updating. _all_pause_times_ms->add(elapsed_ms); @@ -1283,16 +1269,8 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { MainBodySummary* body_summary = _summary->main_body_summary(); assert(body_summary != NULL, "should not be null!"); - // This will be non-zero iff marking is currently in progress (i.e. - // _g1->mark_in_progress() == true) and the currrent pause was not - // an initial mark pause. Since the body_summary items are NumberSeqs, - // however, they have to be consistent and updated in lock-step with - // each other. Therefore we unconditionally record the SATB drain - // time - even if it's zero. - body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms); body_summary->record_root_region_scan_wait_time_ms( _root_region_scan_wait_time_ms); - body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); body_summary->record_satb_filtering_time_ms(satb_filtering_time); body_summary->record_update_rs_time_ms(update_rs_time); @@ -1308,7 +1286,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { body_summary->record_parallel_other_time_ms(parallel_other_time); } - body_summary->record_mark_closure_time_ms(_mark_closure_time_ms); body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms); // We exempt parallel collection from this check because Alloc Buffer @@ -1434,9 +1411,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { print_stats(1, "Object Copying", obj_copy_time); } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); - if (print_marking_info) { - print_stats(1, "Complete CSet Marking", _mark_closure_time_ms); - } print_stats(1, "Clear CT", _cur_clear_ct_time_ms); #ifndef PRODUCT print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms); @@ -1584,8 +1558,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { } double all_other_time_ms = pause_time_ms - - (update_rs_time + scan_rs_time + obj_copy_time + - _mark_closure_time_ms + termination_time); + (update_rs_time + scan_rs_time + obj_copy_time + termination_time); double young_other_time_ms = 0.0; if (young_cset_region_length() > 0) { @@ -1712,41 +1685,6 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, dcqs.notify_if_necessary(); } -double -G1CollectorPolicy:: -predict_young_collection_elapsed_time_ms(size_t adjustment) { - guarantee( adjustment == 0 || adjustment == 1, "invariant" ); - - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - size_t young_num = g1h->young_list()->length(); - if (young_num == 0) - return 0.0; - - young_num += adjustment; - size_t pending_cards = predict_pending_cards(); - size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() + - predict_rs_length_diff(); - size_t card_num; - if (gcs_are_young()) { - card_num = predict_young_card_num(rs_lengths); - } else { - card_num = predict_non_young_card_num(rs_lengths); - } - size_t young_byte_size = young_num * HeapRegion::GrainBytes; - double accum_yg_surv_rate = - _short_lived_surv_rate_group->accum_surv_rate(adjustment); - - size_t bytes_to_copy = - (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes); - - return - predict_rs_update_time_ms(pending_cards) + - predict_rs_scan_time_ms(card_num) + - predict_object_copy_time_ms(bytes_to_copy) + - predict_young_other_time_ms(young_num) + - predict_constant_other_time_ms(); -} - double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { size_t rs_length = predict_rs_length_diff(); @@ -1980,7 +1918,6 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { print_summary(1, "Object Copy", body_summary->get_obj_copy_seq()); } } - print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq()); print_summary(1, "Clear CT", body_summary->get_clear_ct_seq()); print_summary(1, "Other", summary->get_other_seq()); { @@ -1989,17 +1926,15 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { if (parallel) { // parallel NumberSeq* other_parts[] = { - body_summary->get_satb_drain_seq(), body_summary->get_root_region_scan_wait_seq(), body_summary->get_parallel_seq(), body_summary->get_clear_ct_seq() }; calc_other_times_ms = NumberSeq(summary->get_total_seq(), - 4, other_parts); + 3, other_parts); } else { // serial NumberSeq* other_parts[] = { - body_summary->get_satb_drain_seq(), body_summary->get_root_region_scan_wait_seq(), body_summary->get_update_rs_seq(), body_summary->get_ext_root_scan_seq(), @@ -2008,7 +1943,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { body_summary->get_obj_copy_seq() }; calc_other_times_ms = NumberSeq(summary->get_total_seq(), - 7, other_parts); + 6, other_parts); } check_other_times(1, summary->get_other_seq(), &calc_other_times_ms); } diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index 6965f50df..1a51e4c75 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -64,7 +64,6 @@ public: }; class MainBodySummary: public CHeapObj { - define_num_seq(satb_drain) // optional define_num_seq(root_region_scan_wait) define_num_seq(parallel) // parallel only define_num_seq(ext_root_scan) @@ -74,7 +73,6 @@ class MainBodySummary: public CHeapObj { define_num_seq(obj_copy) define_num_seq(termination) // parallel only define_num_seq(parallel_other) // parallel only - define_num_seq(mark_closure) define_num_seq(clear_ct) }; @@ -182,7 +180,6 @@ private: double _cur_collection_code_root_fixup_time_ms; - double _cur_satb_drain_time_ms; double _cur_clear_ct_time_ms; double _cur_ref_proc_time_ms; double _cur_ref_enq_time_ms; @@ -491,7 +488,6 @@ public: get_new_prediction(_non_young_other_cost_per_region_ms_seq); } - double predict_young_collection_elapsed_time_ms(size_t adjustment); double predict_base_elapsed_time_ms(size_t pending_cards); double predict_base_elapsed_time_ms(size_t pending_cards, size_t scanned_cards); @@ -712,7 +708,6 @@ private: double _cur_mark_stop_world_time_ms; double _mark_remark_start_sec; double _mark_cleanup_start_sec; - double _mark_closure_time_ms; double _root_region_scan_wait_time_ms; // Update the young list target length either by setting it to the @@ -812,10 +807,6 @@ public: void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); - void record_mark_closure_time(double mark_closure_time_ms) { - _mark_closure_time_ms = mark_closure_time_ms; - } - void record_root_region_scan_wait_time(double time_ms) { _root_region_scan_wait_time_ms = time_ms; } @@ -849,11 +840,6 @@ public: _par_last_satb_filtering_times_ms[worker_i] = ms; } - void record_satb_drain_time(double ms) { - assert(_g1->mark_in_progress(), "shouldn't be here otherwise"); - _cur_satb_drain_time_ms = ms; - } - void record_update_rs_time(int thread, double ms) { _par_last_update_rs_times_ms[thread] = ms; } diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index 3de711c00..57e977e06 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -69,9 +69,6 @@ diagnostic(bool, G1TraceConcRefinement, false, \ "Trace G1 concurrent refinement") \ \ - product(intx, G1MarkRegionStackSize, 1024 * 1024, \ - "Size of the region stack for concurrent marking.") \ - \ product(double, G1ConcMarkStepDurationMillis, 10.0, \ "Target duration of individual concurrent marking steps " \ "in milliseconds.") \ diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index 76843a01f..2e0b75200 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -373,10 +373,9 @@ class HeapRegion: public G1OffsetTableContigSpace { ScrubRemSetClaimValue = 3, ParVerifyClaimValue = 4, RebuildRSClaimValue = 5, - CompleteMarkCSetClaimValue = 6, - ParEvacFailureClaimValue = 7, - AggregateCountClaimValue = 8, - VerifyCountClaimValue = 9 + ParEvacFailureClaimValue = 6, + AggregateCountClaimValue = 7, + VerifyCountClaimValue = 8 }; inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { diff --git a/src/share/vm/utilities/bitMap.cpp b/src/share/vm/utilities/bitMap.cpp index 17231d355..3141bc0de 100644 --- a/src/share/vm/utilities/bitMap.cpp +++ b/src/share/vm/utilities/bitMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -179,64 +179,6 @@ void BitMap::clear_large_range(idx_t beg, idx_t end) { clear_range_within_word(bit_index(end_full_word), end); } -void BitMap::mostly_disjoint_range_union(BitMap* from_bitmap, - idx_t from_start_index, - idx_t to_start_index, - size_t word_num) { - // Ensure that the parameters are correct. - // These shouldn't be that expensive to check, hence I left them as - // guarantees. - guarantee(from_bitmap->bit_in_word(from_start_index) == 0, - "it should be aligned on a word boundary"); - guarantee(bit_in_word(to_start_index) == 0, - "it should be aligned on a word boundary"); - guarantee(word_num >= 2, "word_num should be at least 2"); - - intptr_t* from = (intptr_t*) from_bitmap->word_addr(from_start_index); - intptr_t* to = (intptr_t*) word_addr(to_start_index); - - if (*from != 0) { - // if it's 0, then there's no point in doing the CAS - while (true) { - intptr_t old_value = *to; - intptr_t new_value = old_value | *from; - intptr_t res = Atomic::cmpxchg_ptr(new_value, to, old_value); - if (res == old_value) break; - } - } - ++from; - ++to; - - for (size_t i = 0; i < word_num - 2; ++i) { - if (*from != 0) { - // if it's 0, then there's no point in doing the CAS - assert(*to == 0, "nobody else should be writing here"); - intptr_t new_value = *from; - *to = new_value; - } - - ++from; - ++to; - } - - if (*from != 0) { - // if it's 0, then there's no point in doing the CAS - while (true) { - intptr_t old_value = *to; - intptr_t new_value = old_value | *from; - intptr_t res = Atomic::cmpxchg_ptr(new_value, to, old_value); - if (res == old_value) break; - } - } - - // the -1 is because we didn't advance them after the final CAS - assert(from == - (intptr_t*) from_bitmap->word_addr(from_start_index) + word_num - 1, - "invariant"); - assert(to == (intptr_t*) word_addr(to_start_index) + word_num - 1, - "invariant"); -} - void BitMap::at_put(idx_t offset, bool value) { if (value) { set_bit(offset); diff --git a/src/share/vm/utilities/bitMap.hpp b/src/share/vm/utilities/bitMap.hpp index 757787711..2c0975f67 100644 --- a/src/share/vm/utilities/bitMap.hpp +++ b/src/share/vm/utilities/bitMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -192,31 +192,6 @@ class BitMap VALUE_OBJ_CLASS_SPEC { void par_set_range(idx_t beg, idx_t end, RangeSizeHint hint); void par_clear_range (idx_t beg, idx_t end, RangeSizeHint hint); - // It performs the union operation between subsets of equal length - // of two bitmaps (the target bitmap of the method and the - // from_bitmap) and stores the result to the target bitmap. The - // from_start_index represents the first bit index of the subrange - // of the from_bitmap. The to_start_index is the equivalent of the - // target bitmap. Both indexes should be word-aligned, i.e. they - // should correspond to the first bit on a bitmap word (it's up to - // the caller to ensure this; the method does check it). The length - // of the subset is specified with word_num and it is in number of - // bitmap words. The caller should ensure that this is at least 2 - // (smaller ranges are not support to save extra checks). Again, - // this is checked in the method. - // - // Atomicity concerns: it is assumed that any contention on the - // target bitmap with other threads will happen on the first and - // last words; the ones in between will be "owned" exclusively by - // the calling thread and, in fact, they will already be 0. So, the - // method performs a CAS on the first word, copies the next - // word_num-2 words, and finally performs a CAS on the last word. - void mostly_disjoint_range_union(BitMap* from_bitmap, - idx_t from_start_index, - idx_t to_start_index, - size_t word_num); - - // Clearing void clear_large(); inline void clear(); -- GitLab