提交 0e9650db 编写于 作者: J jcoomes

6989448: G1: refactor and simplify G1ParScanThreadState

Reviewed-by: iveresov, tonyp
上级 37367381
...@@ -3845,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i, ...@@ -3845,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
undo_waste() * HeapWordSize / K); undo_waste() * HeapWordSize / K);
} }
#ifdef ASSERT
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
assert(ref != NULL, "invariant");
assert(UseCompressedOops, "sanity");
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
return true;
}
bool G1ParScanThreadState::verify_ref(oop* ref) const {
assert(ref != NULL, "invariant");
if (has_partial_array_mask(ref)) {
// Must be in the collection set--it's already been copied.
oop p = clear_partial_array_mask(ref);
assert(_g1h->obj_in_cs(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
} else {
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
}
return true;
}
bool G1ParScanThreadState::verify_task(StarTask ref) const {
if (ref.is_narrow()) {
return verify_ref((narrowOop*) ref);
} else {
return verify_ref((oop*) ref);
}
}
#endif // ASSERT
void G1ParScanThreadState::trim_queue() {
StarTask ref;
do {
// Drain the overflow stack first, so other threads can steal.
while (refs()->pop_overflow(ref)) {
deal_with_reference(ref);
}
while (refs()->pop_local(ref)) {
deal_with_reference(ref);
}
} while (!refs()->is_empty());
}
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
_par_scan_state(par_scan_state) { } _par_scan_state(par_scan_state) { }
...@@ -4047,38 +4095,39 @@ public: ...@@ -4047,38 +4095,39 @@ public:
: _g1h(g1h), _par_scan_state(par_scan_state), : _g1h(g1h), _par_scan_state(par_scan_state),
_queues(queues), _terminator(terminator) {} _queues(queues), _terminator(terminator) {}
void do_void() { void do_void();
G1ParScanThreadState* pss = par_scan_state();
while (true) {
pss->trim_queue();
StarTask stolen_task; private:
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { inline bool offer_termination();
// slightly paranoid tests; I'm trying to catch potential };
// problems before we go into push_on_queue to know where the
// problem is coming from bool G1ParEvacuateFollowersClosure::offer_termination() {
assert((oop*)stolen_task != NULL, "Error"); G1ParScanThreadState* const pss = par_scan_state();
if (stolen_task.is_narrow()) { pss->start_term_time();
assert(UseCompressedOops, "Error"); const bool res = terminator()->offer_termination();
narrowOop* p = (narrowOop*) stolen_task; pss->end_term_time();
assert(has_partial_array_mask(p) || return res;
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error"); }
pss->push_on_queue(p);
} else { void G1ParEvacuateFollowersClosure::do_void() {
oop* p = (oop*) stolen_task; StarTask stolen_task;
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error"); G1ParScanThreadState* const pss = par_scan_state();
pss->push_on_queue(p); pss->trim_queue();
}
continue; do {
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
assert(pss->verify_task(stolen_task), "sanity");
if (stolen_task.is_narrow()) {
pss->push_on_queue((narrowOop*) stolen_task);
} else {
pss->push_on_queue((oop*) stolen_task);
} }
pss->start_term_time(); pss->trim_queue();
if (terminator()->offer_termination()) break;
pss->end_term_time();
} }
pss->end_term_time(); } while (!offer_termination());
pss->retire_alloc_buffers();
} pss->retire_alloc_buffers();
}; }
class G1ParTask : public AbstractGangTask { class G1ParTask : public AbstractGangTask {
protected: protected:
...@@ -4177,8 +4226,7 @@ public: ...@@ -4177,8 +4226,7 @@ public:
pss.print_termination_stats(i); pss.print_termination_stats(i);
} }
assert(pss.refs_to_scan() == 0, "Task queue should be empty"); assert(pss.refs()->is_empty(), "should be empty");
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
double end_time_ms = os::elapsedTime() * 1000.0; double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
} }
......
...@@ -1651,49 +1651,17 @@ public: ...@@ -1651,49 +1651,17 @@ public:
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
size_t undo_waste() const { return _undo_waste; } size_t undo_waste() const { return _undo_waste; }
template <class T> void push_on_queue(T* ref) {
assert(ref != NULL, "invariant");
assert(has_partial_array_mask(ref) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
#ifdef ASSERT #ifdef ASSERT
if (has_partial_array_mask(ref)) { bool verify_ref(narrowOop* ref) const;
oop p = clear_partial_array_mask(ref); bool verify_ref(oop* ref) const;
// Verify that we point into the CS bool verify_task(StarTask ref) const;
assert(_g1h->obj_in_cs(p), "Should be in CS"); #endif // ASSERT
}
#endif
refs()->push(ref);
}
void pop_from_queue(StarTask& ref) {
if (refs()->pop_local(ref)) {
assert((oop*)ref != NULL, "pop_local() returned true");
assert(UseCompressedOops || !ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)ref) ||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
: oopDesc::load_decode_heap_oop((oop*)ref)),
"invariant");
} else {
StarTask null_task;
ref = null_task;
}
}
void pop_from_overflow_queue(StarTask& ref) { template <class T> void push_on_queue(T* ref) {
StarTask new_ref; assert(verify_ref(ref), "sanity");
refs()->pop_overflow(new_ref); refs()->push(ref);
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)new_ref) ||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
"invariant");
ref = new_ref;
} }
int refs_to_scan() { return (int)refs()->size(); }
int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
template <class T> void update_rs(HeapRegion* from, T* p, int tid) { template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) { if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid); deferred_rs_update(from, p, tid);
...@@ -1818,59 +1786,15 @@ private: ...@@ -1818,59 +1786,15 @@ private:
} }
} }
public: void deal_with_reference(StarTask ref) {
void trim_queue() { assert(verify_task(ref), "sanity");
// I've replicated the loop twice, first to drain the overflow if (ref.is_narrow()) {
// queue, second to drain the task queue. This is better than deal_with_reference((narrowOop*)ref);
// having a single loop, which checks both conditions and, inside } else {
// it, either pops the overflow queue or the task queue, as each deal_with_reference((oop*)ref);
// loop is tighter. Also, the decision to drain the overflow queue
// first is not arbitrary, as the overflow queue is not visible
// to the other workers, whereas the task queue is. So, we want to
// drain the "invisible" entries first, while allowing the other
// workers to potentially steal the "visible" entries.
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
while (overflowed_refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_overflow_queue(ref_to_scan);
// We shouldn't have pushed it on the queue if it was not
// pointing into the CSet.
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
while (refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_queue(ref_to_scan);
if ((oop*)ref_to_scan != NULL) {
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
}
} }
} }
public:
void trim_queue();
}; };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册