提交 d5318930 编写于 作者: T tonyp

6722565: G1: assert !r->is_on_unclean_list() fires

Summary: Under certain circumstances, two cleanup threads can claim and process the same region.
Reviewed-by: apetrusenko, ysr
上级 f632dbb2
......@@ -1423,7 +1423,8 @@ public:
NULL /* CO tracker */);
calccl.no_yield();
if (ParallelGCThreads > 0) {
_g1h->heap_region_par_iterate_chunked(&calccl, i, 1);
_g1h->heap_region_par_iterate_chunked(&calccl, i,
HeapRegion::FinalCountClaimValue);
} else {
_g1h->heap_region_iterate(&calccl);
}
......@@ -1502,7 +1503,8 @@ public:
&_par_cleanup_thread_state[i]->list,
i);
if (ParallelGCThreads > 0) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i, 2);
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
HeapRegion::NoteEndClaimValue);
} else {
_g1h->heap_region_iterate(&g1_note_end);
}
......@@ -1545,7 +1547,8 @@ public:
void work(int i) {
if (ParallelGCThreads > 0) {
_g1rs->scrub_par(_region_bm, _card_bm, i, 3);
_g1rs->scrub_par(_region_bm, _card_bm, i,
HeapRegion::ScrubRemSetClaimValue);
} else {
_g1rs->scrub(_region_bm, _card_bm);
}
......@@ -1610,10 +1613,18 @@ void ConcurrentMark::cleanup() {
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
&_region_bm, &_card_bm);
if (ParallelGCThreads > 0) {
assert(g1h->check_heap_region_claim_values(
HeapRegion::InitialClaimValue),
"sanity check");
int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers);
g1h->workers()->run_task(&g1_par_count_task);
g1h->set_par_threads(0);
assert(g1h->check_heap_region_claim_values(
HeapRegion::FinalCountClaimValue),
"sanity check");
} else {
g1_par_count_task.work(0);
}
......@@ -1654,6 +1665,9 @@ void ConcurrentMark::cleanup() {
g1h->set_par_threads(n_workers);
g1h->workers()->run_task(&g1_par_note_end_task);
g1h->set_par_threads(0);
assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
"sanity check");
} else {
g1_par_note_end_task.work(0);
}
......@@ -1665,7 +1679,7 @@ void ConcurrentMark::cleanup() {
(note_end_end - note_end_start)*1000.0);
}
// Now we "scrub" remembered sets. Note that we must do this before the
// call below, since it affects the metric by which we sort the heap
// regions.
if (G1ScrubRemSets) {
......@@ -1676,6 +1690,10 @@ void ConcurrentMark::cleanup() {
g1h->set_par_threads(n_workers);
g1h->workers()->run_task(&g1_par_scrub_rs_task);
g1h->set_par_threads(0);
assert(g1h->check_heap_region_claim_values(
HeapRegion::ScrubRemSetClaimValue),
"sanity check");
} else {
g1_par_scrub_rs_task.work(0);
}
......
......@@ -1715,38 +1715,129 @@ G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
const int OverpartitionFactor = 4;
void
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
int worker,
jint claim_value) {
// We break up the heap regions into blocks of size ParallelGCThreads (to
// decrease iteration costs).
const size_t nregions = n_regions();
const size_t n_thrds = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
const size_t partitions = n_thrds * OverpartitionFactor;
const size_t BlkSize = MAX2(nregions/partitions, (size_t)1);
const size_t n_blocks = (nregions + BlkSize - 1)/BlkSize;
assert(ParallelGCThreads > 0 || worker == 0, "Precondition");
const int init_idx = (int) (n_blocks/n_thrds * worker);
for (size_t blk = 0; blk < n_blocks; blk++) {
size_t idx = init_idx + blk;
if (idx >= n_blocks) idx = idx - n_blocks;
size_t reg_idx = idx * BlkSize;
assert(reg_idx < nregions, "Because we rounded blk up.");
HeapRegion* r = region_at(reg_idx);
const size_t regions = n_regions();
const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
// try to spread out the starting points of the workers
const size_t start_index = regions / worker_num * (size_t) worker;
// each worker will actually look at all regions
for (size_t count = 0; count < regions; ++count) {
const size_t index = (start_index + count) % regions;
assert(0 <= index && index < regions, "sanity");
HeapRegion* r = region_at(index);
// we'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed
if (r->claim_value() == claim_value || r->continuesHumongous()) {
continue;
}
// OK, try to claim it
if (r->claimHeapRegion(claim_value)) {
for (size_t j = 0; j < BlkSize; j++) {
size_t reg_idx2 = reg_idx + j;
if (reg_idx2 == nregions) break;
HeapRegion* r2 = region_at(reg_idx2);
if (j > 0) r2->set_claim_value(claim_value);
bool res = cl->doHeapRegion(r2);
guarantee(!res, "Should not abort.");
// success!
assert(!r->continuesHumongous(), "sanity");
if (r->startsHumongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In on case, calling the
// closure on the "starts humongous" region might de-allocate
// and clear all its "continues humongous" regions and, as a
// result, we might end up processing them twice. So, we'll do
// them first (notice: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
HeapRegion* chr = region_at(ch_index);
// if the region has already been claimed or it's not
// "continues humongous" we're done
if (chr->claim_value() == claim_value ||
!chr->continuesHumongous()) {
break;
}
// Noone should have claimed it directly. We can given
// that we claimed its "starts humongous" region.
assert(chr->claim_value() != claim_value, "sanity");
assert(chr->humongous_start_region() == r, "sanity");
if (chr->claimHeapRegion(claim_value)) {
// we should always be able to claim it; noone else should
// be trying to claim this region
bool res2 = cl->doHeapRegion(chr);
assert(!res2, "Should not abort");
// Right now, this holds (i.e., no closure that actually
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
assert(chr->continuesHumongous(), "should still be the case");
assert(chr->humongous_start_region() == r, "sanity");
} else {
guarantee(false, "we should not reach here");
}
}
}
assert(!r->continuesHumongous(), "sanity");
bool res = cl->doHeapRegion(r);
assert(!res, "Should not abort");
}
}
}
#ifdef ASSERT
// This checks whether all regions in the heap have the correct claim
// value. I also piggy-backed on this a check to ensure that the
// humongous_start_region() information on "continues humongous"
// regions is correct.
class CheckClaimValuesClosure : public HeapRegionClosure {
private:
jint _claim_value;
size_t _failures;
HeapRegion* _sh_region;
public:
CheckClaimValuesClosure(jint claim_value) :
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
bool doHeapRegion(HeapRegion* r) {
if (r->claim_value() != _claim_value) {
gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
"claim value = %d, should be %d",
r->bottom(), r->end(), r->claim_value(),
_claim_value);
++_failures;
}
if (!r->isHumongous()) {
_sh_region = NULL;
} else if (r->startsHumongous()) {
_sh_region = r;
} else if (r->continuesHumongous()) {
if (r->humongous_start_region() != _sh_region) {
gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
r->bottom(), r->end(),
r->humongous_start_region(),
_sh_region);
++_failures;
}
}
return false;
}
size_t failures() {
return _failures;
}
};
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
CheckClaimValuesClosure cl(claim_value);
heap_region_iterate(&cl);
return cl.failures() == 0;
}
#endif // ASSERT
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = g1_policy()->collection_set();
......
......@@ -873,7 +873,6 @@ public:
HeapRegion* region_at(size_t idx);
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
......@@ -891,6 +890,10 @@ public:
int worker,
jint claim_value);
#ifdef ASSERT
bool check_heap_region_claim_values(jint claim_value);
#endif // ASSERT
// Iterate over the regions (if any) in the current collection set.
void collection_set_iterate(HeapRegionClosure* blk);
......
......@@ -2897,7 +2897,8 @@ public:
void work(int i) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
// Back to zero for the claim value.
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, 0);
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
HeapRegion::InitialClaimValue);
jint regions_added = parKnownGarbageCl.marked_regions_added();
_hrSorted->incNumMarkedHeapRegions(regions_added);
if (G1PrintParCleanupStats) {
......@@ -2933,6 +2934,9 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
(int) ChunkSize);
_g1->workers()->run_task(&parKnownGarbageTask);
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
"sanity check");
} else {
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
_g1->heap_region_iterate(&knownGarbagecl);
......
......@@ -263,8 +263,7 @@ HeapRegion::new_dcto_closure(OopClosure* cl,
}
void HeapRegion::hr_clear(bool par, bool clear_space) {
_humongous = false;
_humongous_start = false;
_humongous_type = NotHumongous;
_humongous_start_region = NULL;
_in_collection_set = false;
_is_gc_alloc_region = false;
......@@ -284,7 +283,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
// If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set();
if (hrrs != NULL) hrrs->clear();
_claimed = 0;
_claimed = InitialClaimValue;
}
zero_marked_bytes();
set_sort_index(-1);
......@@ -305,7 +304,7 @@ void HeapRegion::calc_gc_efficiency() {
// </PREDICTION>
void HeapRegion::set_startsHumongous() {
_humongous_start = true; _humongous = true;
_humongous_type = StartsHumongous;
_humongous_start_region = this;
assert(end() == _orig_end, "Should be normal before alloc.");
}
......@@ -368,11 +367,11 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_next_fk(HeapRegionDCTOC::NoFilterKind),
_hrs_index(-1),
_humongous(false), _humongous_start(false), _humongous_start_region(NULL),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _is_gc_alloc_region(false),
_is_on_free_list(false), _is_on_unclean_list(false),
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(0), _evacuation_failed(false),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
_popularity(NotPopular),
_young_type(NotYoung), _next_young_region(NULL),
......@@ -426,7 +425,7 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
// The order is important here.
start->add_continuingHumongousRegion(this);
_humongous = true; _humongous_start = false;
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
......
......@@ -168,6 +168,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
friend class VMStructs;
private:
enum HumongousType {
NotHumongous = 0,
StartsHumongous,
ContinuesHumongous
};
// The next filter kind that should be used for a "new_dcto_cl" call with
// the "traditional" signature.
HeapRegionDCTOC::FilterKind _next_fk;
......@@ -188,8 +194,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// sequence, otherwise -1.
int _hrs_index;
bool _humongous; // starts or continues a humongous object
bool _humongous_start; // starts a humongous object
HumongousType _humongous_type;
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
// For the start region of a humongous sequence, it's original end().
......@@ -308,6 +313,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
MaxAge = 2, NoOfAges = MaxAge+1
};
enum ClaimValues {
InitialClaimValue = 0,
FinalCountClaimValue = 1,
NoteEndClaimValue = 2,
ScrubRemSetClaimValue = 3
};
// Concurrent refinement requires contiguous heap regions (in which TLABs
// might be allocated) to be zero-filled. Each region therefore has a
// zero-fill-state.
......@@ -355,9 +367,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
_prev_marked_bytes = _next_marked_bytes = 0;
}
bool isHumongous() const { return _humongous; }
bool startsHumongous() const { return _humongous_start; }
bool continuesHumongous() const { return _humongous && ! _humongous_start; }
bool isHumongous() const { return _humongous_type != NotHumongous; }
bool startsHumongous() const { return _humongous_type == StartsHumongous; }
bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const {
return _humongous_start_region;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册