diff --git a/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/src/share/vm/gc_implementation/g1/g1Allocator.cpp index 2e223fcc11c16e54de0334292639391169af0d43..1eb913ba40482204aac512bf86ba387a1da6fee0 100644 --- a/src/share/vm/gc_implementation/g1/g1Allocator.cpp +++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp @@ -59,7 +59,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, !(retained_region->top() == retained_region->end()) && !retained_region->is_empty() && !retained_region->isHumongous()) { - retained_region->record_top_and_timestamp(); + retained_region->record_timestamp(); // The retained region was added to the old region set when it was // retired. We have to remove it now, since we don't allow regions // we allocate to in the region sets. We'll re-add it later, when @@ -94,6 +94,9 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat // want either way so no reason to check explicitly for either // condition. _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release(); + if (_retained_old_gc_alloc_region != NULL) { + _retained_old_gc_alloc_region->record_retained_region(); + } if (ResizePLAB) { _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index e3b2c268026858190159b3a69843cefb42107f17..fd381b2a263b5614587f53c368c8f9d4a447c79d 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -6808,7 +6808,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, // We really only need to do this for old regions given that we // should never scan survivors. But it doesn't hurt to do it // for survivors too. - new_alloc_region->record_top_and_timestamp(); + new_alloc_region->record_timestamp(); if (survivor) { new_alloc_region->set_survivor(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index b28fd2376dde1242f1c37cb8a593fd9e76cae973..eba50e6ac867660d57c77d7ccc28396b44da3b75 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -147,11 +147,9 @@ public: // Set the "from" region in the closure. _oc->set_region(r); - HeapWord* card_start = _bot_shared->address_for_index(index); - HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; - Space *sp = SharedHeap::heap()->space_containing(card_start); - MemRegion sm_region = sp->used_region_at_save_marks(); - MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); + MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words); + MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); + MemRegion mr = pre_gc_allocated.intersection(card_region); if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { // We make the card as "claimed" lazily (so races are possible // but they're benign), which reduces the number of duplicate diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp index 3a7251b563965791a4b371c3f71021ff16a69cd9..2bac27936d18e3a3a416878ce503fe7c15ca720e 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -338,7 +338,7 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { _orig_end = mr.end(); hr_clear(false /*par*/, false /*clear_space*/); set_top(bottom()); - record_top_and_timestamp(); + record_timestamp(); } CompactibleSpace* HeapRegion::next_compaction_space() const { @@ -426,9 +426,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr, // If we're within a stop-world GC, then we might look at a card in a // GC alloc region that extends onto a GC LAB, which may not be - // parseable. Stop such at the "saved_mark" of the region. + // parseable. Stop such at the "scan_top" of the region. if (g1h->is_gc_active()) { - mr = mr.intersection(used_region_at_save_marks()); + mr = mr.intersection(MemRegion(bottom(), scan_top())); } else { mr = mr.intersection(used_region()); } @@ -980,7 +980,7 @@ void HeapRegion::verify() const { void G1OffsetTableContigSpace::clear(bool mangle_space) { set_top(bottom()); - set_saved_mark_word(bottom()); + _scan_top = bottom(); CompactibleSpace::clear(mangle_space); reset_bot(); } @@ -1012,41 +1012,42 @@ HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, return _offsets.threshold(); } -HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { +HeapWord* G1OffsetTableContigSpace::scan_top() const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); HeapWord* local_top = top(); OrderAccess::loadload(); - if (_gc_time_stamp < g1h->get_gc_time_stamp()) { + const unsigned local_time_stamp = _gc_time_stamp; + assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); + if (local_time_stamp < g1h->get_gc_time_stamp()) { return local_top; } else { - return Space::saved_mark_word(); + return _scan_top; } } -void G1OffsetTableContigSpace::record_top_and_timestamp() { +void G1OffsetTableContigSpace::record_timestamp() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); if (_gc_time_stamp < curr_gc_time_stamp) { - // The order of these is important, as another thread might be - // about to start scanning this region. If it does so after - // set_saved_mark and before _gc_time_stamp = ..., then the latter - // will be false, and it will pick up top() as the high water mark - // of region. If it does so after _gc_time_stamp = ..., then it - // will pick up the right saved_mark_word() as the high water mark - // of the region. Either way, the behaviour will be correct. - Space::set_saved_mark_word(top()); - OrderAccess::storestore(); + // Setting the time stamp here tells concurrent readers to look at + // scan_top to know the maximum allowed address to look at. + + // scan_top should be bottom for all regions except for the + // retained old alloc region which should have scan_top == top + HeapWord* st = _scan_top; + guarantee(st == _bottom || st == _top, "invariant"); + _gc_time_stamp = curr_gc_time_stamp; - // No need to do another barrier to flush the writes above. If - // this is called in parallel with other threads trying to - // allocate into the region, the caller should call this while - // holding a lock and when the lock is released the writes will be - // flushed. } } +void G1OffsetTableContigSpace::record_retained_region() { + // scan_top is the maximum address where it's safe for the next gc to + // scan this region. + _scan_top = top(); +} + void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { object_iterate(blk); } @@ -1080,6 +1081,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { CompactibleSpace::initialize(mr, clear_space, mangle_space); _top = bottom(); + _scan_top = bottom(); + set_saved_mark_word(NULL); reset_bot(); } diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index 16c0d719ef3db579258096c6efde9320e9fe9b0d..10a211d909cbe3b6feb9b68489893969cc72dfa1 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -101,28 +101,25 @@ public: // OffsetTableContigSpace. If the two versions of BlockOffsetTable could // be reconciled, then G1OffsetTableContigSpace could go away. -// The idea behind time stamps is the following. Doing a save_marks on -// all regions at every GC pause is time consuming (if I remember -// well, 10ms or so). So, we would like to do that only for regions -// that are GC alloc regions. To achieve this, we use time -// stamps. For every evacuation pause, G1CollectedHeap generates a -// unique time stamp (essentially a counter that gets -// incremented). Every time we want to call save_marks on a region, -// we set the saved_mark_word to top and also copy the current GC -// time stamp to the time stamp field of the space. Reading the -// saved_mark_word involves checking the time stamp of the -// region. If it is the same as the current GC time stamp, then we -// can safely read the saved_mark_word field, as it is valid. If the -// time stamp of the region is not the same as the current GC time -// stamp, then we instead read top, as the saved_mark_word field is -// invalid. Time stamps (on the regions and also on the -// G1CollectedHeap) are reset at every cleanup (we iterate over -// the regions anyway) and at the end of a Full GC. The current scheme -// that uses sequential unsigned ints will fail only if we have 4b +// The idea behind time stamps is the following. We want to keep track of +// the highest address where it's safe to scan objects for each region. +// This is only relevant for current GC alloc regions so we keep a time stamp +// per region to determine if the region has been allocated during the current +// GC or not. If the time stamp is current we report a scan_top value which +// was saved at the end of the previous GC for retained alloc regions and which is +// equal to the bottom for all other regions. +// There is a race between card scanners and allocating gc workers where we must ensure +// that card scanners do not read the memory allocated by the gc workers. +// In order to enforce that, we must not return a value of _top which is more recent than the +// time stamp. This is due to the fact that a region may become a gc alloc region at +// some point after we've read the timestamp value as being < the current time stamp. +// The time stamps are re-initialized to zero at cleanup and at Full GCs. +// The current scheme that uses sequential unsigned ints will fail only if we have 4b // evacuation pauses between two cleanups, which is _highly_ unlikely. class G1OffsetTableContigSpace: public CompactibleSpace { friend class VMStructs; HeapWord* _top; + HeapWord* volatile _scan_top; protected: G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; @@ -166,10 +163,11 @@ class G1OffsetTableContigSpace: public CompactibleSpace { void set_bottom(HeapWord* value); void set_end(HeapWord* value); - virtual HeapWord* saved_mark_word() const; - void record_top_and_timestamp(); + HeapWord* scan_top() const; + void record_timestamp(); void reset_gc_time_stamp() { _gc_time_stamp = 0; } unsigned get_gc_time_stamp() { return _gc_time_stamp; } + void record_retained_region(); // See the comment above in the declaration of _pre_dummy_top for an // explanation of what it is. @@ -193,6 +191,8 @@ class G1OffsetTableContigSpace: public CompactibleSpace { virtual HeapWord* allocate(size_t word_size); HeapWord* par_allocate(size_t word_size); + HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; } + // MarkSweep support phase3 virtual HeapWord* initialize_threshold(); virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);