提交 2eeebfd8 编写于 作者: T tonyp

Merge

...@@ -820,6 +820,40 @@ public: ...@@ -820,6 +820,40 @@ public:
} }
}; };
class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
UpdateRSOopClosure _cl;
int _worker_i;
public:
RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
_cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
_worker_i(worker_i),
_g1h(g1)
{ }
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
_cl.set_from(r);
r->oop_iterate(&_cl);
}
return false;
}
};
class ParRebuildRSTask: public AbstractGangTask {
G1CollectedHeap* _g1;
public:
ParRebuildRSTask(G1CollectedHeap* g1)
: AbstractGangTask("ParRebuildRSTask"),
_g1(g1)
{ }
void work(int i) {
RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
_g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
HeapRegion::RebuildRSClaimValue);
}
};
void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
size_t word_size) { size_t word_size) {
ResourceMark rm; ResourceMark rm;
...@@ -926,24 +960,35 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ...@@ -926,24 +960,35 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
reset_gc_time_stamp(); reset_gc_time_stamp();
// Since everything potentially moved, we will clear all remembered // Since everything potentially moved, we will clear all remembered
// sets, and clear all cards. Later we will also cards in the used // sets, and clear all cards. Later we will rebuild remebered
// portion of the heap after the resizing (which could be a shrinking.) // sets. We will also reset the GC time stamps of the regions.
// We will also reset the GC time stamps of the regions.
PostMCRemSetClearClosure rs_clear(mr_bs()); PostMCRemSetClearClosure rs_clear(mr_bs());
heap_region_iterate(&rs_clear); heap_region_iterate(&rs_clear);
// Resize the heap if necessary. // Resize the heap if necessary.
resize_if_necessary_after_full_collection(full ? 0 : word_size); resize_if_necessary_after_full_collection(full ? 0 : word_size);
// Since everything potentially moved, we will clear all remembered
// sets, but also dirty all cards corresponding to used regions.
PostMCRemSetInvalidateClosure rs_invalidate(mr_bs());
heap_region_iterate(&rs_invalidate);
if (_cg1r->use_cache()) { if (_cg1r->use_cache()) {
_cg1r->clear_and_record_card_counts(); _cg1r->clear_and_record_card_counts();
_cg1r->clear_hot_cache(); _cg1r->clear_hot_cache();
} }
// Rebuild remembered sets of all regions.
if (ParallelGCThreads > 0) {
ParRebuildRSTask rebuild_rs_task(this);
assert(check_heap_region_claim_values(
HeapRegion::InitialClaimValue), "sanity check");
set_par_threads(workers()->total_workers());
workers()->run_task(&rebuild_rs_task);
set_par_threads(0);
assert(check_heap_region_claim_values(
HeapRegion::RebuildRSClaimValue), "sanity check");
reset_heap_region_claim_values();
} else {
RebuildRSOutOfRegionClosure rebuild_rs(this);
heap_region_iterate(&rebuild_rs);
}
if (PrintGC) { if (PrintGC) {
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
} }
......
...@@ -105,33 +105,6 @@ StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, ...@@ -105,33 +105,6 @@ StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1->heap_region_iterate(&rc); _g1->heap_region_iterate(&rc);
} }
class UpdateRSOopClosure: public OopClosure {
HeapRegion* _from;
HRInto_G1RemSet* _rs;
int _worker_i;
public:
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
_from(NULL), _rs(rs), _worker_i(worker_i) {
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
}
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
_from = from;
}
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
assert(_from != NULL, "from region must be non-NULL");
_rs->par_write_ref(_from, p, _worker_i);
}
// Override: this closure is idempotent.
// bool idempotent() { return true; }
bool apply_to_weak_ref_discovered_field() { return true; }
};
class UpdateRSOutOfRegionClosure: public HeapRegionClosure { class UpdateRSOutOfRegionClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
ModRefBarrierSet* _mr_bs; ModRefBarrierSet* _mr_bs;
......
...@@ -215,3 +215,27 @@ public: ...@@ -215,3 +215,27 @@ public:
int n() { return _n; }; int n() { return _n; };
HeapWord* start_first() { return _start_first; } HeapWord* start_first() { return _start_first; }
}; };
class UpdateRSOopClosure: public OopClosure {
HeapRegion* _from;
HRInto_G1RemSet* _rs;
int _worker_i;
public:
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
_from(NULL), _rs(rs), _worker_i(worker_i) {
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
}
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
_from = from;
}
virtual void do_oop(narrowOop* p);
virtual void do_oop(oop* p);
// Override: this closure is idempotent.
// bool idempotent() { return true; }
bool apply_to_weak_ref_discovered_field() { return true; }
};
...@@ -94,3 +94,12 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) { ...@@ -94,3 +94,12 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
} }
} }
} }
inline void UpdateRSOopClosure::do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
inline void UpdateRSOopClosure::do_oop(oop* p) {
assert(_from != NULL, "from region must be non-NULL");
_rs->par_write_ref(_from, p, _worker_i);
}
...@@ -318,7 +318,8 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -318,7 +318,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
FinalCountClaimValue = 1, FinalCountClaimValue = 1,
NoteEndClaimValue = 2, NoteEndClaimValue = 2,
ScrubRemSetClaimValue = 3, ScrubRemSetClaimValue = 3,
ParVerifyClaimValue = 4 ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5
}; };
// Concurrent refinement requires contiguous heap regions (in which TLABs // Concurrent refinement requires contiguous heap regions (in which TLABs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册