提交 652e6217 编写于 作者: T tonyp

7129892: G1: explicit marking cycle initiation might fail to initiate a marking cycle

Summary: If we try to schedule an initial-mark GC in order to explicit start a conc mark cycle and it gets pre-empted by antoher GC, we should retry the attempt as long as it's appropriate for the GC cause.
Reviewed-by: brutisso, johnc
上级 a25a22c6
...@@ -958,7 +958,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, ...@@ -958,7 +958,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
should_try_gc = false; should_try_gc = false;
} else { } else {
// Read the GC count while still holding the Heap_lock. // Read the GC count while still holding the Heap_lock.
gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = total_collections();
should_try_gc = true; should_try_gc = true;
} }
} }
...@@ -976,7 +976,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, ...@@ -976,7 +976,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// failed to allocate. No point in trying to allocate // failed to allocate. No point in trying to allocate
// further. We'll just return NULL. // further. We'll just return NULL.
MutexLockerEx x(Heap_lock); MutexLockerEx x(Heap_lock);
*gc_count_before_ret = SharedHeap::heap()->total_collections(); *gc_count_before_ret = total_collections();
return NULL; return NULL;
} }
} else { } else {
...@@ -1031,7 +1031,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1031,7 +1031,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// the check before we do the actual allocation. The reason for doing it // the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly // before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC. // allocated memory while we do a GC.
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
word_size)) {
collect(GCCause::_g1_humongous_allocation); collect(GCCause::_g1_humongous_allocation);
} }
...@@ -1059,7 +1060,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1059,7 +1060,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
should_try_gc = false; should_try_gc = false;
} else { } else {
// Read the GC count while still holding the Heap_lock. // Read the GC count while still holding the Heap_lock.
gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = total_collections();
should_try_gc = true; should_try_gc = true;
} }
} }
...@@ -1081,7 +1082,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1081,7 +1082,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// failed to allocate. No point in trying to allocate // failed to allocate. No point in trying to allocate
// further. We'll just return NULL. // further. We'll just return NULL.
MutexLockerEx x(Heap_lock); MutexLockerEx x(Heap_lock);
*gc_count_before_ret = SharedHeap::heap()->total_collections(); *gc_count_before_ret = total_collections();
return NULL; return NULL;
} }
} else { } else {
...@@ -2311,10 +2312,12 @@ size_t G1CollectedHeap::unsafe_max_alloc() { ...@@ -2311,10 +2312,12 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
} }
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return switch (cause) {
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
cause == GCCause::_g1_humongous_allocation); case GCCause::_g1_humongous_allocation: return true;
default: return false;
}
} }
#ifndef PRODUCT #ifndef PRODUCT
...@@ -2408,17 +2411,21 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { ...@@ -2408,17 +2411,21 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
} }
void G1CollectedHeap::collect(GCCause::Cause cause) { void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock assert_heap_not_locked();
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
unsigned int gc_count_before; unsigned int gc_count_before;
unsigned int full_gc_count_before; unsigned int full_gc_count_before;
bool retry_gc;
do {
retry_gc = false;
{ {
MutexLocker ml(Heap_lock); MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = total_collections();
full_gc_count_before = SharedHeap::heap()->total_full_collections(); full_gc_count_before = total_full_collections();
} }
if (should_do_concurrent_full_gc(cause)) { if (should_do_concurrent_full_gc(cause)) {
...@@ -2431,6 +2438,20 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { ...@@ -2431,6 +2438,20 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
g1_policy()->max_pause_time_ms(), g1_policy()->max_pause_time_ms(),
cause); cause);
VMThread::execute(&op); VMThread::execute(&op);
if (!op.pause_succeeded()) {
// Another GC got scheduled and prevented us from scheduling
// the initial-mark GC. It's unlikely that the GC that
// pre-empted us was also an initial-mark GC. So, we'll retry
// the initial-mark GC.
if (full_gc_count_before == total_full_collections()) {
retry_gc = true;
} else {
// A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
}
} else { } else {
if (cause == GCCause::_gc_locker if (cause == GCCause::_gc_locker
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
...@@ -2449,6 +2470,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { ...@@ -2449,6 +2470,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
} while (retry_gc);
} }
bool G1CollectedHeap::is_in(const void* p) const { bool G1CollectedHeap::is_in(const void* p) const {
...@@ -3149,12 +3171,12 @@ void G1CollectedHeap::verify(bool allow_dirty, ...@@ -3149,12 +3171,12 @@ void G1CollectedHeap::verify(bool allow_dirty,
// We apply the relevant closures to all the oops in the // We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache. // system dictionary, the string table and the code cache.
const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
process_strong_roots(true, // activate StrongRootsScope process_strong_roots(true, // activate StrongRootsScope
true, // we set "collecting perm gen" to true, true, // we set "collecting perm gen" to true,
// so we don't reset the dirty cards in the perm gen. // so we don't reset the dirty cards in the perm gen.
SharedHeap::ScanningOption(so), // roots scanning options ScanningOption(so), // roots scanning options
&rootsCl, &rootsCl,
&blobsCl, &blobsCl,
&rootsCl); &rootsCl);
...@@ -4734,7 +4756,7 @@ public: ...@@ -4734,7 +4756,7 @@ public:
void void
G1CollectedHeap:: G1CollectedHeap::
g1_process_strong_roots(bool collecting_perm_gen, g1_process_strong_roots(bool collecting_perm_gen,
SharedHeap::ScanningOption so, ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
......
...@@ -770,7 +770,7 @@ protected: ...@@ -770,7 +770,7 @@ protected:
// the "i" of the calling parallel worker thread's work(i) function. // the "i" of the calling parallel worker thread's work(i) function.
// In the sequential case this param will be ignored. // In the sequential case this param will be ignored.
void g1_process_strong_roots(bool collecting_perm_gen, void g1_process_strong_roots(bool collecting_perm_gen,
SharedHeap::ScanningOption so, ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册