From 086cc9b4d0a8c53a4d43ec179ff5e97c315bf726 Mon Sep 17 00:00:00 2001 From: tonyp Date: Fri, 29 Apr 2011 14:59:04 -0400 Subject: [PATCH] 7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...) Summary: We should only undirty cards after we decide that they are not on a young region, not before. The fix also includes improvements to the verify_dirty_region() method which print out which cards were not found dirty. Reviewed-by: johnc, brutisso --- .../gc_implementation/g1/concurrentMark.cpp | 2 +- .../gc_implementation/g1/g1CollectedHeap.cpp | 47 +++++++++------ .../gc_implementation/g1/g1CollectedHeap.hpp | 2 + .../vm/gc_implementation/g1/g1RemSet.cpp | 52 +++++++---------- .../vm/gc_implementation/g1/heapRegion.cpp | 30 +++++++++- .../vm/gc_implementation/g1/heapRegion.hpp | 11 +++- src/share/vm/memory/cardTableModRefBS.cpp | 58 +++++++++---------- src/share/vm/memory/cardTableModRefBS.hpp | 5 +- src/share/vm/memory/modRefBarrierSet.hpp | 6 -- 9 files changed, 118 insertions(+), 95 deletions(-) diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 72cdb2cee..14c855c1b 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -1899,7 +1899,7 @@ void ConcurrentMark::completeCleanup() { while (!_cleanup_list.is_empty()) { HeapRegion* hr = _cleanup_list.remove_head(); assert(hr != NULL, "the list was not empty"); - hr->rem_set()->clear(); + hr->par_clear(); tmp_free_list.add_as_tail(hr); // Instead of adding one region at a time to the secondary_free_list, diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 642c7acb7..25cc6a792 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4961,36 +4961,45 @@ public: #ifndef PRODUCT class G1VerifyCardTableCleanup: public HeapRegionClosure { + G1CollectedHeap* _g1h; CardTableModRefBS* _ct_bs; public: - G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) - : _ct_bs(ct_bs) { } + G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) + : _g1h(g1h), _ct_bs(ct_bs) { } virtual bool doHeapRegion(HeapRegion* r) { - MemRegion mr(r->bottom(), r->end()); if (r->is_survivor()) { - _ct_bs->verify_dirty_region(mr); + _g1h->verify_dirty_region(r); } else { - _ct_bs->verify_clean_region(mr); + _g1h->verify_not_dirty_region(r); } return false; } }; +void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { + // All of the region should be clean. + CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); + MemRegion mr(hr->bottom(), hr->end()); + ct_bs->verify_not_dirty_region(mr); +} + +void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { + // We cannot guarantee that [bottom(),end()] is dirty. Threads + // dirty allocated blocks as they allocate them. The thread that + // retires each region and replaces it with a new one will do a + // maximal allocation to fill in [pre_dummy_top(),end()] but will + // not dirty that area (one less thing to have to do while holding + // a lock). So we can only verify that [bottom(),pre_dummy_top()] + // is dirty. + CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); + MemRegion mr(hr->bottom(), hr->pre_dummy_top()); + ct_bs->verify_dirty_region(mr); +} + void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { - CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); + CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { - // We cannot guarantee that [bottom(),end()] is dirty. Threads - // dirty allocated blocks as they allocate them. The thread that - // retires each region and replaces it with a new one will do a - // maximal allocation to fill in [pre_dummy_top(),end()] but will - // not dirty that area (one less thing to have to do while holding - // a lock). So we can only verify that [bottom(),pre_dummy_top()] - // is dirty. Also note that verify_dirty_region() requires - // mr.start() and mr.end() to be card aligned and pre_dummy_top() - // is not guaranteed to be. - MemRegion mr(hr->bottom(), - ct_bs->align_to_card_boundary(hr->pre_dummy_top())); - ct_bs->verify_dirty_region(mr); + verify_dirty_region(hr); } } @@ -5033,7 +5042,7 @@ void G1CollectedHeap::cleanUpCardTable() { g1_policy()->record_clear_ct_time( elapsed * 1000.0); #ifndef PRODUCT if (G1VerifyCTCleanup || VerifyAfterGC) { - G1VerifyCardTableCleanup cleanup_verifier(ct_bs); + G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); heap_region_iterate(&cleanup_verifier); } #endif diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 7c318a577..f2936639c 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -970,6 +970,8 @@ public: // The number of regions available for "regular" expansion. size_t expansion_regions() { return _expansion_regions; } + void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; + void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_regions() PRODUCT_RETURN; diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 7d8c604f2..6794174c1 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -157,7 +157,6 @@ public: void set_try_claimed() { _try_claimed = true; } void scanCard(size_t index, HeapRegion *r) { - _cards_done++; DirtyCardToOopClosure* cl = r->new_dcto_closure(_oc, CardTableModRefBS::Precise, @@ -168,17 +167,14 @@ public: HeapWord* card_start = _bot_shared->address_for_index(index); HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; Space *sp = SharedHeap::heap()->space_containing(card_start); - MemRegion sm_region; - if (ParallelGCThreads > 0) { - // first find the used area - sm_region = sp->used_region_at_save_marks(); - } else { - // The closure is not idempotent. We shouldn't look at objects - // allocated during the GC. - sm_region = sp->used_region_at_save_marks(); - } + MemRegion sm_region = sp->used_region_at_save_marks(); MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); - if (!mr.is_empty()) { + if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { + // We make the card as "claimed" lazily (so races are possible + // but they're benign), which reduces the number of duplicate + // scans (the rsets of the regions in the cset can intersect). + _ct_bs->set_card_claimed(index); + _cards_done++; cl->do_MemRegion(mr); } } @@ -199,6 +195,9 @@ public: HeapRegionRemSet* hrrs = r->rem_set(); if (hrrs->iter_is_complete()) return false; // All done. if (!_try_claimed && !hrrs->claim_iter()) return false; + // If we ever free the collection set concurrently, we should also + // clear the card table concurrently therefore we won't need to + // add regions of the collection set to the dirty cards region. _g1h->push_dirty_cards_region(r); // If we didn't return above, then // _try_claimed || r->claim_iter() @@ -230,15 +229,10 @@ public: _g1h->push_dirty_cards_region(card_region); } - // If the card is dirty, then we will scan it during updateRS. - if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { - // We make the card as "claimed" lazily (so races are possible but they're benign), - // which reduces the number of duplicate scans (the rsets of the regions in the cset - // can intersect). - if (!_ct_bs->is_card_claimed(card_index)) { - _ct_bs->set_card_claimed(card_index); - scanCard(card_index, card_region); - } + // If the card is dirty, then we will scan it during updateRS. + if (!card_region->in_collection_set() && + !_ct_bs->is_card_dirty(card_index)) { + scanCard(card_index, card_region); } } if (!_try_claimed) { @@ -246,8 +240,6 @@ public: } return false; } - // Set all cards back to clean. - void cleanup() {_g1h->cleanUpCardTable();} size_t cards_done() { return _cards_done;} size_t cards_looked_up() { return _cards;} }; @@ -566,8 +558,9 @@ public: update_rs_cl.set_region(r); HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(scanRegion, - &filter_then_update_rs_cset_oop_cl, - false /* filter_young */); + &filter_then_update_rs_cset_oop_cl, + false /* filter_young */, + NULL /* card_ptr */); // Since this is performed in the event of an evacuation failure, we // we shouldn't see a non-null stop point @@ -735,12 +728,6 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); - // Undirty the card. - *card_ptr = CardTableModRefBS::clean_card_val(); - // We must complete this write before we do any of the reads below. - OrderAccess::storeload(); - // And process it, being careful of unallocated portions of TLAB's. - // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had @@ -749,7 +736,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, // and tagged as young. // // We wish to filter out cards for such a region but the current - // thread, if we're running conucrrently, may "see" the young type + // thread, if we're running concurrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual @@ -759,7 +746,8 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, - filter_young); + filter_young, + card_ptr); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp index 65d9b7c4d..b85c04e9b 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -376,6 +376,17 @@ void HeapRegion::hr_clear(bool par, bool clear_space) { if (clear_space) clear(SpaceDecorator::Mangle); } +void HeapRegion::par_clear() { + assert(used() == 0, "the region should have been already cleared"); + assert(capacity() == (size_t) HeapRegion::GrainBytes, + "should be back to normal"); + HeapRegionRemSet* hrrs = rem_set(); + hrrs->clear(); + CardTableModRefBS* ct_bs = + (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); + ct_bs->clear(MemRegion(bottom(), end())); +} + // void HeapRegion::calc_gc_efficiency() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -600,7 +611,15 @@ HeapWord* HeapRegion:: oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, - bool filter_young) { + bool filter_young, + jbyte* card_ptr) { + // Currently, we should only have to clean the card if filter_young + // is true and vice versa. + if (filter_young) { + assert(card_ptr != NULL, "pre-condition"); + } else { + assert(card_ptr == NULL, "pre-condition"); + } G1CollectedHeap* g1h = G1CollectedHeap::heap(); // If we're within a stop-world GC, then we might look at a card in a @@ -626,6 +645,15 @@ oops_on_card_seq_iterate_careful(MemRegion mr, assert(!is_young(), "check value of filter_young"); + // We can only clean the card here, after we make the decision that + // the card is not young. And we only clean the card if we have been + // asked to (i.e., card_ptr != NULL). + if (card_ptr != NULL) { + *card_ptr = CardTableModRefBS::clean_card_val(); + // We must complete this write before we do any of the reads below. + OrderAccess::storeload(); + } + // We used to use "block_start_careful" here. But we're actually happy // to update the BOT while we do this... HeapWord* cur = block_start(mr.start()); diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index 6c307f942..03fa15d79 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -584,6 +584,7 @@ class HeapRegion: public G1OffsetTableContigSpace { // Reset HR stuff to default values. void hr_clear(bool par, bool clear_space); + void par_clear(); void initialize(MemRegion mr, bool clear_space, bool mangle_space); @@ -802,12 +803,16 @@ class HeapRegion: public G1OffsetTableContigSpace { HeapWord* object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); - // In this version - if filter_young is true and the region - // is a young region then we skip the iteration. + // filter_young: if true and the region is a young region then we + // skip the iteration. + // card_ptr: if not NULL, and we decide that the card is not young + // and we iterate over it, we'll clean the card before we start the + // iteration. HeapWord* oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, - bool filter_young); + bool filter_young, + jbyte* card_ptr); // A version of block start that is guaranteed to find *some* block // boundary at or before "p", but does not object iteration, and may diff --git a/src/share/vm/memory/cardTableModRefBS.cpp b/src/share/vm/memory/cardTableModRefBS.cpp index abdd31bdb..356a77ade 100644 --- a/src/share/vm/memory/cardTableModRefBS.cpp +++ b/src/share/vm/memory/cardTableModRefBS.cpp @@ -652,43 +652,37 @@ void CardTableModRefBS::verify() { } #ifndef PRODUCT -class GuaranteeNotModClosure: public MemRegionClosure { - CardTableModRefBS* _ct; -public: - GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} - void do_MemRegion(MemRegion mr) { - jbyte* entry = _ct->byte_for(mr.start()); - guarantee(*entry != CardTableModRefBS::clean_card, - "Dirty card in region that should be clean"); +void CardTableModRefBS::verify_region(MemRegion mr, + jbyte val, bool val_equals) { + jbyte* start = byte_for(mr.start()); + jbyte* end = byte_for(mr.last()); + bool failures = false; + for (jbyte* curr = start; curr <= end; ++curr) { + jbyte curr_val = *curr; + bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); + if (failed) { + if (!failures) { + tty->cr(); + tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]"); + tty->print_cr("== %sexpecting value: %d", + (val_equals) ? "" : "not ", val); + failures = true; + } + tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " + "val: %d", curr, addr_for(curr), + (HeapWord*) (((size_t) addr_for(curr)) + card_size), + (int) curr_val); + } } -}; - -void CardTableModRefBS::verify_clean_region(MemRegion mr) { - GuaranteeNotModClosure blk(this); - non_clean_card_iterate_serial(mr, &blk); + guarantee(!failures, "there should not have been any failures"); } -// To verify a MemRegion is entirely dirty this closure is passed to -// dirty_card_iterate. If the region is dirty do_MemRegion will be -// invoked only once with a MemRegion equal to the one being -// verified. -class GuaranteeDirtyClosure: public MemRegionClosure { - CardTableModRefBS* _ct; - MemRegion _mr; - bool _result; -public: - GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr) - : _ct(ct), _mr(mr), _result(false) {} - void do_MemRegion(MemRegion mr) { - _result = _mr.equals(mr); - } - bool result() const { return _result; } -}; +void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { + verify_region(mr, dirty_card, false /* val_equals */); +} void CardTableModRefBS::verify_dirty_region(MemRegion mr) { - GuaranteeDirtyClosure blk(this, mr); - dirty_card_iterate(mr, &blk); - guarantee(blk.result(), "Non-dirty cards in region that should be dirty"); + verify_region(mr, dirty_card, true /* val_equals */); } #endif diff --git a/src/share/vm/memory/cardTableModRefBS.hpp b/src/share/vm/memory/cardTableModRefBS.hpp index 770a8e578..a8d48515c 100644 --- a/src/share/vm/memory/cardTableModRefBS.hpp +++ b/src/share/vm/memory/cardTableModRefBS.hpp @@ -475,7 +475,10 @@ public: void verify(); void verify_guard(); - void verify_clean_region(MemRegion mr) PRODUCT_RETURN; + // val_equals -> it will check that all cards covered by mr equal val + // !val_equals -> it will check that all cards covered by mr do not equal val + void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; + void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; static size_t par_chunk_heapword_alignment() { diff --git a/src/share/vm/memory/modRefBarrierSet.hpp b/src/share/vm/memory/modRefBarrierSet.hpp index ea3d2dc26..1378fbf4a 100644 --- a/src/share/vm/memory/modRefBarrierSet.hpp +++ b/src/share/vm/memory/modRefBarrierSet.hpp @@ -100,12 +100,6 @@ public: // Pass along the argument to the superclass. ModRefBarrierSet(int max_covered_regions) : BarrierSet(max_covered_regions) {} - -#ifndef PRODUCT - // Verifies that the given region contains no modified references. - virtual void verify_clean_region(MemRegion mr) = 0; -#endif - }; #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP -- GitLab