提交 a9a0cccb 编写于 作者: T tonyp

6991377: G1: race between concurrent refinement and humongous object allocation

Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr
上级 f0680241
......@@ -175,7 +175,7 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
}
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
assert(_array->offset_array(start_card-1) <= N_words,
"Offset card has an unexpected value");
"Offset card has an unexpected value");
size_t start_card_for_region = start_card;
u_char offset = max_jubyte;
for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
......@@ -577,6 +577,16 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
#endif
}
void
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
assert(_end == new_end, "_end should have already been updated");
// The first BOT entry should have offset 0.
_array->set_offset_array(_array->index_for(_bottom), 0);
// The rest should point to the first one.
set_remainder_to_point_to_start(_bottom + N_words, new_end);
}
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArrayContigSpace
//////////////////////////////////////////////////////////////////////
......@@ -626,3 +636,12 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
"Precondition of call");
_array->set_offset_array(bottom_index, 0);
}
void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
G1BlockOffsetArray::set_for_starts_humongous(new_end);
// Make sure _next_offset_threshold and _next_offset_index point to new_end.
_next_offset_threshold = new_end;
_next_offset_index = _array->index_for(new_end);
}
......@@ -436,6 +436,8 @@ public:
}
void check_all_cards(size_t left_card, size_t right_card) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
};
// A subtype of BlockOffsetArray that takes advantage of the fact
......@@ -484,4 +486,6 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
};
......@@ -377,10 +377,26 @@ void HeapRegion::calc_gc_efficiency() {
}
// </PREDICTION>
void HeapRegion::set_startsHumongous() {
void HeapRegion::set_startsHumongous(HeapWord* new_end) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
_humongous_type = StartsHumongous;
_humongous_start_region = this;
assert(end() == _orig_end, "Should be normal before alloc.");
set_end(new_end);
_offsets.set_for_starts_humongous(new_end);
}
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(start->startsHumongous(), "pre-condition");
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
bool HeapRegion::claimHeapRegion(jint claimValue) {
......@@ -500,23 +516,6 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
return blk.result();
}
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
// The order is important here.
start->add_continuingHumongousRegion(this);
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
// Must join the blocks of the current H region seq with the block of the
// added region.
offsets()->join_blocks(bottom(), cont->bottom());
arrayOop obj = (arrayOop)(bottom());
obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
set_end(cont->end());
set_top(cont->end());
}
void HeapRegion::save_marks() {
set_saved_mark();
}
......
......@@ -395,14 +395,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Causes the current region to represent a humongous object spanning "n"
// regions.
virtual void set_startsHumongous();
void set_startsHumongous(HeapWord* new_end);
// The regions that continue a humongous sequence should be added using
// this method, in increasing address order.
void set_continuesHumongous(HeapRegion* start);
void add_continuingHumongousRegion(HeapRegion* cont);
// If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const {
return _rem_set;
......@@ -733,13 +731,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
FilterOutOfRegionClosure* cl,
bool filter_young);
// The region "mr" is entirely in "this", and starts and ends at block
// boundaries. The caller declares that all the contained blocks are
// coalesced into one.
void declare_filled_region_to_BOT(MemRegion mr) {
_offsets.single_block(mr.start(), mr.end());
}
// A version of block start that is guaranteed to find *some* block
// boundary at or before "p", but does not object iteration, and may
// therefore be used safely when the heap is unparseable.
......
......@@ -91,34 +91,118 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
}
if (sumSizes >= word_size) {
_alloc_search_start = cur;
// Mark the allocated regions as allocated.
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// Currently, allocs_are_zero_filled() returns false. The zero
// filling infrastructure will be going away soon (see CR 6977804).
// So no need to do anything else here.
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
assert(!zf, "not supported");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _regions.at(first);
for (int i = first; i < cur; i++) {
HeapRegion* hr = _regions.at(i);
if (zf)
hr->ensure_zero_filled();
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
first_hr->set_zero_fill_allocated();
}
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
// (Note: sumSizes = "region size" x "number of regions we found").
HeapWord* new_end = new_obj + sumSizes;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
hr->set_zero_fill_allocated();
}
size_t sz = hr->capacity() / HeapWordSize;
HeapWord* tmp = hr->allocate(sz);
assert(tmp != NULL, "Humongous allocation failure");
MemRegion mr = MemRegion(tmp, sz);
CollectedHeap::fill_with_object(mr);
hr->declare_filled_region_to_BOT(mr);
if (i == first) {
first_hr->set_startsHumongous();
hr->set_continuesHumongous(first_hr);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here.
hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
if ((i + 1) == cur) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
assert(i > first, "sanity");
hr->set_continuesHumongous(first_hr);
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
}
}
HeapWord* first_hr_bot = first_hr->bottom();
HeapWord* obj_end = first_hr_bot + word_size;
first_hr->set_top(obj_end);
return first_hr_bot;
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
return new_obj;
} else {
// If we started from the beginning, we want to know why we can't alloc.
return NULL;
......
......@@ -308,7 +308,7 @@ void RSHashTable::add_entry(SparsePRTEntry* e) {
assert(e2->num_valid_cards() > 0, "Postcondition.");
}
CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
CardIdx_t RSHashTableIter::find_first_card_in_list() {
CardIdx_t res;
while (_bl_ind != RSHashTable::NullEntry) {
res = _rsht->entry(_bl_ind)->card(0);
......@@ -322,11 +322,11 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
return SparsePRTEntry::NullEntry;
}
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
}
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
bool RSHashTableIter::has_next(size_t& card_index) {
_card_ind++;
CardIdx_t ci;
if (_card_ind < SparsePRTEntry::cards_num() &&
......
......@@ -282,8 +282,6 @@ public:
class SparsePRTIter: public RSHashTableIter {
public:
SparsePRTIter() : RSHashTableIter() { }
void init(const SparsePRT* sprt) {
RSHashTableIter::init(sprt->cur());
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册