提交 2d701762 编写于 作者: J johnc

6983204: G1: Nightly test nsk/regression/b4958615 failing with +ExplicitGCInvokesConcurrent

Summary: Enable reference discovery during concurrent marking by setting the reference processor field of the concurrent marking closure. Keep reference objects on the discovered reference lists alive during incremental evacuation pauses until they are processed at the end of concurrent marking.
Reviewed-by: ysr, tonyp
上级 9980e6a5
......@@ -1051,6 +1051,7 @@ public:
void work(int worker_i) {
assert(Thread::current()->is_ConcurrentGC_thread(),
"this should only be done by a conc GC thread");
ResourceMark rm;
double start_vtime = os::elapsedVTime();
......@@ -1888,6 +1889,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ReferenceProcessor* rp = g1h->ref_processor();
// See the comment in G1CollectedHeap::ref_processing_init()
// about how reference processing currently works in G1.
// Process weak references.
rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
......@@ -2918,7 +2922,11 @@ public:
CMOopClosure(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMTask* task)
: _g1h(g1h), _cm(cm), _task(task) { }
: _g1h(g1h), _cm(cm), _task(task)
{
_ref_processor = g1h->ref_processor();
assert(_ref_processor != NULL, "should not be NULL");
}
};
void CMTask::setup_for_region(HeapRegion* hr) {
......
......@@ -1251,6 +1251,9 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy()->set_full_young_gcs(true);
}
// See the comment in G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1.
// Temporarily make reference _discovery_ single threaded (non-MT).
ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
......@@ -2012,6 +2015,24 @@ jint G1CollectedHeap::initialize() {
}
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
// * There is only one reference processor instance that
// 'spans' the entire heap. It is created by the code
// below.
// * Reference discovery is not enabled during an incremental
// pause (see 6484982).
// * Discoverered refs are not enqueued nor are they processed
// during an incremental pause (see 6484982).
// * Reference discovery is enabled at initial marking.
// * Reference discovery is disabled and the discovered
// references processed etc during remarking.
// * Reference discovery is MT (see below).
// * Reference discovery requires a barrier (see below).
// * Reference processing is currently not MT (see 6608385).
// * A full GC enables (non-MT) reference discovery and
// processes any discovered references.
SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
_ref_processor = ReferenceProcessor::create_ref_processor(
......@@ -3231,6 +3252,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
COMPILER2_PRESENT(DerivedPointerTable::clear());
// Please see comment in G1CollectedHeap::ref_processing_init()
// to see how reference processing currently works in G1.
//
// We want to turn off ref discovery, if necessary, and turn it back on
// on again later if we do. XXX Dubious: why is discovery disabled?
bool was_enabled = ref_processor()->discovery_enabled();
......@@ -3660,6 +3684,7 @@ void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
// untag the GC alloc regions and tear down the GC alloc region
// list. It's desirable that no regions are tagged as GC alloc
// outside GCs.
forget_alloc_region_list();
// The current alloc regions contain objs that have survived
......@@ -4665,6 +4690,10 @@ g1_process_strong_roots(bool collecting_perm_gen,
}
// Finish with the ref_processor roots.
if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
// We need to treat the discovered reference lists as roots and
// keep entries (which are added by the marking threads) on them
// live until they can be processed at the end of marking.
ref_processor()->weak_oops_do(scan_non_heap_roots);
ref_processor()->oops_do(scan_non_heap_roots);
}
g1_policy()->record_collection_pause_end_G1_strong_roots();
......@@ -4730,6 +4759,11 @@ void G1CollectedHeap::evacuate_collection_set() {
// on individual heap regions when we allocate from
// them in parallel, so this seems like the correct place for this.
retire_all_alloc_regions();
// Weak root processing.
// Note: when JSR 292 is enabled and code blobs can contain
// non-perm oops then we will need to process the code blobs
// here too.
{
G1IsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
......
......@@ -770,9 +770,8 @@ void ReferenceProcessor::abandon_partial_discovery() {
// loop over the lists
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr(
"\nAbandoning %s discovered list",
list_name(i));
gclog_or_tty->print_cr("\nAbandoning %s discovered list",
list_name(i));
}
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
}
......@@ -1059,9 +1058,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
// During a multi-threaded discovery phase,
// each thread saves to its "own" list.
Thread* thr = Thread::current();
assert(thr->is_GC_task_thread(),
"Dubious cast from Thread* to WorkerThread*?");
id = ((WorkerThread*)thr)->id();
id = thr->as_Worker_thread()->id();
} else {
// single-threaded discovery, we save in round-robin
// fashion to each of the lists.
......@@ -1095,8 +1092,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
ShouldNotReachHere();
}
if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT,
id, list);
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
}
return list;
}
......@@ -1135,6 +1131,11 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
if (_discovered_list_needs_barrier) {
_bs->write_ref_field((void*)discovered_addr, current_head);
}
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
}
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
......@@ -1239,8 +1240,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// Check assumption that an object is not potentially
// discovered twice except by concurrent collectors that potentially
// trace the same Reference object twice.
assert(UseConcMarkSweepGC,
"Only possible with an incremental-update concurrent collector");
assert(UseConcMarkSweepGC || UseG1GC,
"Only possible with a concurrent marking collector");
return true;
}
}
......@@ -1293,26 +1294,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
}
list->set_head(obj);
list->inc_length(1);
}
// In the MT discovery case, it is currently possible to see
// the following message multiple times if several threads
// discover a reference about the same time. Only one will
// however have actually added it to the disocvered queue.
// One could let add_to_discovered_list_mt() return an
// indication for success in queueing (by 1 thread) or
// failure (by all other threads), but I decided the extra
// code was not worth the effort for something that is
// only used for debugging support.
if (TraceReferenceGC) {
oop referent = java_lang_ref_Reference::referent(obj);
if (PrintGCDetails) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
obj, obj->blueprint()->internal_name());
}
assert(referent->is_oop(), "Enqueued a bad referent");
}
assert(obj->is_oop(), "Enqueued a bad reference");
assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent");
return true;
}
......
......@@ -78,6 +78,8 @@ class GCTaskQueue;
class ThreadClosure;
class IdealGraphPrinter;
class WorkerThread;
// Class hierarchy
// - Thread
// - NamedThread
......@@ -289,6 +291,10 @@ class Thread: public ThreadShadow {
virtual bool is_Watcher_thread() const { return false; }
virtual bool is_ConcurrentGC_thread() const { return false; }
virtual bool is_Named_thread() const { return false; }
virtual bool is_Worker_thread() const { return false; }
// Casts
virtual WorkerThread* as_Worker_thread() const { return NULL; }
virtual char* name() const { return (char*)"Unknown thread"; }
......@@ -628,9 +634,16 @@ class WorkerThread: public NamedThread {
private:
uint _id;
public:
WorkerThread() : _id(0) { }
void set_id(uint work_id) { _id = work_id; }
uint id() const { return _id; }
WorkerThread() : _id(0) { }
virtual bool is_Worker_thread() const { return true; }
virtual WorkerThread* as_Worker_thread() const {
assert(is_Worker_thread(), "Dubious cast to WorkerThread*?");
return (WorkerThread*) this;
}
void set_id(uint work_id) { _id = work_id; }
uint id() const { return _id; }
};
// A single WatcherThread is used for simulating timer interrupts.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册