提交 9ef303d5 编写于 作者: T tschatzl

8035401: Fix visibility of G1ParScanThreadState members

Summary: After JDK-8035400 there were several opportunities to fix the visibility of several members of the G1ParScanThreadState class.
Reviewed-by: brutisso, mgerdin
上级 e49ff7e1
...@@ -4689,24 +4689,10 @@ bool G1ParEvacuateFollowersClosure::offer_termination() { ...@@ -4689,24 +4689,10 @@ bool G1ParEvacuateFollowersClosure::offer_termination() {
} }
void G1ParEvacuateFollowersClosure::do_void() { void G1ParEvacuateFollowersClosure::do_void() {
StarTask stolen_task;
G1ParScanThreadState* const pss = par_scan_state(); G1ParScanThreadState* const pss = par_scan_state();
pss->trim_queue(); pss->trim_queue();
do { do {
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { pss->steal_and_trim_queue(queues());
assert(pss->verify_task(stolen_task), "sanity");
if (stolen_task.is_narrow()) {
pss->deal_with_reference((narrowOop*) stolen_task);
} else {
pss->deal_with_reference((oop*) stolen_task);
}
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
pss->trim_queue();
}
} while (!offer_termination()); } while (!offer_termination());
} }
...@@ -4752,8 +4738,7 @@ protected: ...@@ -4752,8 +4738,7 @@ protected:
} }
public: public:
G1ParTask(G1CollectedHeap* g1h, G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
RefToScanQueueSet *task_queues)
: AbstractGangTask("G1 collection"), : AbstractGangTask("G1 collection"),
_g1h(g1h), _g1h(g1h),
_queues(task_queues), _queues(task_queues),
...@@ -4851,7 +4836,7 @@ public: ...@@ -4851,7 +4836,7 @@ public:
pss.print_termination_stats(worker_id); pss.print_termination_stats(worker_id);
} }
assert(pss.refs()->is_empty(), "should be empty"); assert(pss.queue_is_empty(), "should be empty");
// Close the inner scope so that the ResourceMark and HandleMark // Close the inner scope so that the ResourceMark and HandleMark
// destructors are executed here and are included as part of the // destructors are executed here and are included as part of the
...@@ -5394,8 +5379,7 @@ public: ...@@ -5394,8 +5379,7 @@ public:
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); assert(pss.queue_is_empty(), "both queue and overflow should be empty");
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
...@@ -5453,7 +5437,7 @@ public: ...@@ -5453,7 +5437,7 @@ public:
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator); G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
drain_queue.do_void(); drain_queue.do_void();
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
assert(pss.refs()->is_empty(), "should be"); assert(pss.queue_is_empty(), "should be");
} }
}; };
...@@ -5520,7 +5504,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { ...@@ -5520,7 +5504,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
pss.set_evac_failure_closure(&evac_failure_cl); pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.refs()->is_empty(), "pre-condition"); assert(pss.queue_is_empty(), "pre-condition");
G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
...@@ -5572,7 +5556,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { ...@@ -5572,7 +5556,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
_gc_tracer_stw->report_gc_reference_stats(stats); _gc_tracer_stw->report_gc_reference_stats(stats);
// We have completed copying any necessary live referent objects. // We have completed copying any necessary live referent objects.
assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); assert(pss.queue_is_empty(), "both queue and overflow should be empty");
double ref_proc_time = os::elapsedTime() - ref_proc_start; double ref_proc_time = os::elapsedTime() - ref_proc_start;
g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
......
...@@ -69,6 +69,11 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ...@@ -69,6 +69,11 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_start = os::elapsedTime(); _start = os::elapsedTime();
} }
G1ParScanThreadState::~G1ParScanThreadState() {
retire_alloc_buffers();
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
void void
G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
{ {
...@@ -139,14 +144,14 @@ void G1ParScanThreadState::trim_queue() { ...@@ -139,14 +144,14 @@ void G1ParScanThreadState::trim_queue() {
StarTask ref; StarTask ref;
do { do {
// Drain the overflow stack first, so other threads can steal. // Drain the overflow stack first, so other threads can steal.
while (refs()->pop_overflow(ref)) { while (_refs->pop_overflow(ref)) {
deal_with_reference(ref); dispatch_reference(ref);
} }
while (refs()->pop_local(ref)) { while (_refs->pop_local(ref)) {
deal_with_reference(ref); dispatch_reference(ref);
} }
} while (!refs()->is_empty()); } while (!_refs->is_empty());
} }
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
...@@ -249,3 +254,56 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { ...@@ -249,3 +254,56 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
} }
return obj; return obj;
} }
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz);
}
void G1ParScanThreadState::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
...@@ -39,7 +39,7 @@ class HeapRegion; ...@@ -39,7 +39,7 @@ class HeapRegion;
class outputStream; class outputStream;
class G1ParScanThreadState : public StackObj { class G1ParScanThreadState : public StackObj {
protected: private:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
RefToScanQueue* _refs; RefToScanQueue* _refs;
DirtyCardQueue _dcq; DirtyCardQueue _dcq;
...@@ -98,14 +98,10 @@ protected: ...@@ -98,14 +98,10 @@ protected:
} }
} }
public: public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState() { ~G1ParScanThreadState();
retire_alloc_buffers();
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
RefToScanQueue* refs() { return _refs; }
ageTable* age_table() { return &_age_table; } ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
...@@ -116,6 +112,8 @@ public: ...@@ -116,6 +112,8 @@ public:
size_t undo_waste() const { return _undo_waste; } size_t undo_waste() const { return _undo_waste; }
#ifdef ASSERT #ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }
bool verify_ref(narrowOop* ref) const; bool verify_ref(narrowOop* ref) const;
bool verify_ref(oop* ref) const; bool verify_ref(oop* ref) const;
bool verify_task(StarTask ref) const; bool verify_task(StarTask ref) const;
...@@ -123,56 +121,24 @@ public: ...@@ -123,56 +121,24 @@ public:
template <class T> void push_on_queue(T* ref) { template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity"); assert(verify_ref(ref), "sanity");
refs()->push(ref); _refs->push(ref);
} }
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid); template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { private:
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
if (obj != NULL) return obj; inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
return allocate_slow(purpose, word_sz);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { public:
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl; _evac_failure_cl = evac_failure_cl;
} }
OopsInHeapRegionClosure* evac_failure_closure() {
return _evac_failure_cl; OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
}
int* hash_seed() { return &_hash_seed; } int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; } uint queue_num() { return _queue_num; }
...@@ -201,10 +167,8 @@ public: ...@@ -201,10 +167,8 @@ public:
return os::elapsedTime() - _start; return os::elapsedTime() - _start;
} }
static void static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
print_termination_stats_hdr(outputStream* const st = gclog_or_tty); void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
void
print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
size_t* surviving_young_words() { size_t* surviving_young_words() {
// We add on to hide entry 0 which accumulates surviving words for // We add on to hide entry 0 which accumulates surviving words for
...@@ -213,15 +177,7 @@ public: ...@@ -213,15 +177,7 @@ public:
} }
private: private:
void retire_alloc_buffers() { void retire_alloc_buffers();
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
#define G1_PARTIAL_ARRAY_MASK 0x2 #define G1_PARTIAL_ARRAY_MASK 0x2
...@@ -254,39 +210,18 @@ public: ...@@ -254,39 +210,18 @@ public:
inline void do_oop_partial_array(oop* p); inline void do_oop_partial_array(oop* p);
// This method is applied to the fields of the objects that have just been copied. // This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) { template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
public:
oop copy_to_survivor_space(oop const obj);
template <class T> inline void deal_with_reference(T* ref_to_scan); template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void deal_with_reference(StarTask ref); inline void dispatch_reference(StarTask ref);
public:
oop copy_to_survivor_space(oop const obj);
public:
void trim_queue(); void trim_queue();
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
}; };
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
...@@ -43,6 +43,30 @@ template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, ...@@ -43,6 +43,30 @@ template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p,
} }
} }
template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant"); assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p); oop from_obj = clear_partial_array_mask(p);
...@@ -104,7 +128,7 @@ template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_ ...@@ -104,7 +128,7 @@ template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_
} }
} }
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
assert(verify_task(ref), "sanity"); assert(verify_task(ref), "sanity");
if (ref.is_narrow()) { if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref); deal_with_reference((narrowOop*)ref);
...@@ -113,5 +137,18 @@ inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { ...@@ -113,5 +137,18 @@ inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
} }
} }
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
StarTask stolen_task;
while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
assert(verify_task(stolen_task), "sanity");
dispatch_reference(stolen_task);
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
trim_queue();
}
}
#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */ #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册