提交 ad3b6f69 编写于 作者: Y ysr

Merge

...@@ -1007,9 +1007,9 @@ class StubGenerator: public StubCodeGenerator { ...@@ -1007,9 +1007,9 @@ class StubGenerator: public StubCodeGenerator {
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
__ delayed()->cmp(to_from, byte_count); __ delayed()->cmp(to_from, byte_count);
if (NOLp == NULL) if (NOLp == NULL)
__ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target); __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
else else
__ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp)); __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
__ delayed()->nop(); __ delayed()->nop();
} }
......
...@@ -234,6 +234,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() { ...@@ -234,6 +234,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_cause != GCCause::_gc_locker && if (_gc_cause != GCCause::_gc_locker &&
gch->total_full_collections_completed() <= _full_gc_count_before) { gch->total_full_collections_completed() <= _full_gc_count_before) {
// maybe we should change the condition to test _gc_cause ==
// GCCause::_java_lang_system_gc, instead of
// _gc_cause != GCCause::_gc_locker
assert(_gc_cause == GCCause::_java_lang_system_gc,
"the only way to get here if this was a System.gc()-induced GC");
assert(ExplicitGCInvokesConcurrent, "Error"); assert(ExplicitGCInvokesConcurrent, "Error");
// Now, wait for witnessing concurrent gc cycle to complete, // Now, wait for witnessing concurrent gc cycle to complete,
// but do so in native mode, because we want to lock the // but do so in native mode, because we want to lock the
......
...@@ -266,6 +266,12 @@ void ConcurrentMarkThread::run() { ...@@ -266,6 +266,12 @@ void ConcurrentMarkThread::run() {
_cm->clearNextBitmap(); _cm->clearNextBitmap();
_sts.leave(); _sts.leave();
} }
// Update the number of full collections that have been
// completed. This will also notify the FullGCCount_lock in case a
// Java thread is waiting for a full GC to happen (e.g., it
// called System.gc() with +ExplicitGCInvokesConcurrent).
g1->increment_full_collections_completed(true /* outer */);
} }
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");
......
...@@ -809,7 +809,8 @@ public: ...@@ -809,7 +809,8 @@ public:
} }
}; };
void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, void G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) { size_t word_size) {
if (GC_locker::check_active_before_gc()) { if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
...@@ -821,10 +822,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ...@@ -821,10 +822,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
Universe::print_heap_before_gc(); Universe::print_heap_before_gc();
} }
if (full && DisableExplicitGC) {
return;
}
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
...@@ -837,9 +834,11 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ...@@ -837,9 +834,11 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
IsGCActiveMark x; IsGCActiveMark x;
// Timing // Timing
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
assert(!system_gc || explicit_gc, "invariant");
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
PrintGC, true, gclog_or_tty); PrintGC, true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */); TraceMemoryManagerStats tms(true /* fullGC */);
...@@ -944,7 +943,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ...@@ -944,7 +943,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
heap_region_iterate(&rs_clear); heap_region_iterate(&rs_clear);
// Resize the heap if necessary. // Resize the heap if necessary.
resize_if_necessary_after_full_collection(full ? 0 : word_size); resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
if (_cg1r->use_cache()) { if (_cg1r->use_cache()) {
_cg1r->clear_and_record_card_counts(); _cg1r->clear_and_record_card_counts();
...@@ -1009,13 +1008,18 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ...@@ -1009,13 +1008,18 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
"young list should be empty at this point"); "young list should be empty at this point");
} }
// Update the number of full collections that have been completed.
increment_full_collections_completed(false /* outer */);
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
Universe::print_heap_after_gc(); Universe::print_heap_after_gc();
} }
} }
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
do_collection(true, clear_all_soft_refs, 0); do_collection(true, /* explicit_gc */
clear_all_soft_refs,
0 /* word_size */);
} }
// This code is mostly copied from TenuredGeneration. // This code is mostly copied from TenuredGeneration.
...@@ -1331,6 +1335,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : ...@@ -1331,6 +1335,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_young_list(new YoungList(this)), _young_list(new YoungList(this)),
_gc_time_stamp(0), _gc_time_stamp(0),
_surviving_young_words(NULL), _surviving_young_words(NULL),
_full_collections_completed(0),
_in_cset_fast_test(NULL), _in_cset_fast_test(NULL),
_in_cset_fast_test_base(NULL), _in_cset_fast_test_base(NULL),
_dirty_cards_region_list(NULL) { _dirty_cards_region_list(NULL) {
...@@ -1689,6 +1694,51 @@ size_t G1CollectedHeap::unsafe_max_alloc() { ...@@ -1689,6 +1694,51 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
return car->free(); return car->free();
} }
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
}
void G1CollectedHeap::increment_full_collections_completed(bool outer) {
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// We have already incremented _total_full_collections at the start
// of the GC, so total_full_collections() represents how many full
// collections have been started.
unsigned int full_collections_started = total_full_collections();
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
// completed should be either one (in the case where there was no
// nesting) or two (when a Full GC interrupted a concurrent cycle)
// behind the number of full collections started.
// This is the case for the inner caller, i.e. a Full GC.
assert(outer ||
(full_collections_started == _full_collections_completed + 1) ||
(full_collections_started == _full_collections_completed + 2),
err_msg("for inner caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
// This is the case for the outer caller, i.e. the concurrent cycle.
assert(!outer ||
(full_collections_started == _full_collections_completed + 1),
err_msg("for outer caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
_full_collections_completed += 1;
// This notify_all() will ensure that a thread that called
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
// and it's waiting for a full GC to finish will be woken up. It is
// waiting in VM_G1IncCollectionPause::doit_epilogue().
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1"); assert(Thread::current()->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2"); assert(Heap_lock->is_locked(), "Precondition#2");
...@@ -1709,25 +1759,41 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { ...@@ -1709,25 +1759,41 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock // The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
int gc_count_before; unsigned int gc_count_before;
unsigned int full_gc_count_before;
{ {
MutexLocker ml(Heap_lock); MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = SharedHeap::heap()->total_collections();
full_gc_count_before = SharedHeap::heap()->total_full_collections();
// Don't want to do a GC until cleanup is completed. // Don't want to do a GC until cleanup is completed.
wait_for_cleanup_complete(); wait_for_cleanup_complete();
} // We give up heap lock; VMThread::execute gets it back below
switch (cause) { // We give up heap lock; VMThread::execute gets it back below
case GCCause::_scavenge_alot: {
// Do an incremental pause, which might sometimes be abandoned.
VM_G1IncCollectionPause op(gc_count_before, cause);
VMThread::execute(&op);
break;
} }
default: {
// In all other cases, we currently do a full gc. if (should_do_concurrent_full_gc(cause)) {
VM_G1CollectFull op(gc_count_before, cause); // Schedule an initial-mark evacuation pause that will start a
// concurrent cycle.
VM_G1IncCollectionPause op(gc_count_before,
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op);
} else {
if (cause == GCCause::_gc_locker
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause.
VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op);
} else {
// Schedule a Full GC.
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
...@@ -1989,6 +2055,11 @@ void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { ...@@ -1989,6 +2055,11 @@ void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
HeapRegionClosure *cl) { HeapRegionClosure *cl) {
if (r == NULL) {
// The CSet is empty so there's nothing to do.
return;
}
assert(r->in_collection_set(), assert(r->in_collection_set(),
"Start region must be a member of the collection set."); "Start region must be a member of the collection set.");
HeapRegion* cur = r; HeapRegion* cur = r;
...@@ -2481,11 +2552,13 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { ...@@ -2481,11 +2552,13 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
} }
void G1CollectedHeap::do_collection_pause() { void G1CollectedHeap::do_collection_pause() {
assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
// we need to do this _before_ wait_for_cleanup_complete(), to // we need to do this _before_ wait_for_cleanup_complete(), to
// ensure that we do not give up the heap lock and potentially // ensure that we do not give up the heap lock and potentially
// pick up the wrong count // pick up the wrong count
int gc_count_before = SharedHeap::heap()->total_collections(); unsigned int gc_count_before = SharedHeap::heap()->total_collections();
// Don't want to do a GC pause while cleanup is being completed! // Don't want to do a GC pause while cleanup is being completed!
wait_for_cleanup_complete(); wait_for_cleanup_complete();
...@@ -2493,7 +2566,10 @@ void G1CollectedHeap::do_collection_pause() { ...@@ -2493,7 +2566,10 @@ void G1CollectedHeap::do_collection_pause() {
g1_policy()->record_stop_world_start(); g1_policy()->record_stop_world_start();
{ {
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_G1IncCollectionPause op(gc_count_before); VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
GCCause::_g1_inc_collection_pause);
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
...@@ -2612,7 +2688,7 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure { ...@@ -2612,7 +2688,7 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure {
}; };
void void
G1CollectedHeap::do_collection_pause_at_safepoint() { G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if (GC_locker::check_active_before_gc()) { if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }
...@@ -2637,8 +2713,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint() { ...@@ -2637,8 +2713,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
else else
strcat(verbose_str, "(partial)"); strcat(verbose_str, "(partial)");
} }
if (g1_policy()->during_initial_mark_pause()) if (g1_policy()->during_initial_mark_pause()) {
strcat(verbose_str, " (initial-mark)"); strcat(verbose_str, " (initial-mark)");
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_total_full_collections();
}
// if PrintGCDetails is on, we'll print long statistics information // if PrintGCDetails is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output // in the collector policy code, so let's not print this as the output
...@@ -2661,7 +2741,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() { ...@@ -2661,7 +2741,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
"young list should be well formed"); "young list should be well formed");
} }
bool abandoned = false;
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC { // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x; IsGCActiveMark x;
...@@ -2743,7 +2822,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() { ...@@ -2743,7 +2822,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// Now choose the CS. We may abandon a pause if we find no // Now choose the CS. We may abandon a pause if we find no
// region that will fit in the MMU pause. // region that will fit in the MMU pause.
bool abandoned = g1_policy()->choose_collection_set(); bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
// Nothing to do if we were unable to choose a collection set. // Nothing to do if we were unable to choose a collection set.
if (!abandoned) { if (!abandoned) {
......
...@@ -277,6 +277,18 @@ private: ...@@ -277,6 +277,18 @@ private:
void update_surviving_young_words(size_t* surv_young_words); void update_surviving_young_words(size_t* surv_young_words);
void cleanup_surviving_young_words(); void cleanup_surviving_young_words();
// It decides whether an explicit GC should start a concurrent cycle
// instead of doing a STW GC. Currently, a concurrent cycle is
// explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "full collections" (i.e., Full GCs or
// concurrent cycles) we have completed. The number of them we have
// started is maintained in _total_full_collections in CollectedHeap.
volatile unsigned int _full_collections_completed;
protected: protected:
// Returns "true" iff none of the gc alloc regions have any allocations // Returns "true" iff none of the gc alloc regions have any allocations
...@@ -356,13 +368,14 @@ protected: ...@@ -356,13 +368,14 @@ protected:
// GC pause. // GC pause.
void retire_alloc_region(HeapRegion* alloc_region, bool par); void retire_alloc_region(HeapRegion* alloc_region, bool par);
// Helper function for two callbacks below. // - if explicit_gc is true, the GC is for a System.gc() or a heap
// "full", if true, indicates that the GC is for a System.gc() request, // inspection request and should collect the entire heap
// and should collect the entire heap. If "clear_all_soft_refs" is true, // - if clear_all_soft_refs is true, all soft references are cleared
// all soft references are cleared during the GC. If "full" is false, // during the GC
// "word_size" describes the allocation that the GC should // - if explicit_gc is false, word_size describes the allocation that
// attempt (at least) to satisfy. // the GC should attempt (at least) to satisfy
void do_collection(bool full, bool clear_all_soft_refs, void do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size); size_t word_size);
// Callback from VM_G1CollectFull operation. // Callback from VM_G1CollectFull operation.
...@@ -431,6 +444,26 @@ public: ...@@ -431,6 +444,26 @@ public:
_in_cset_fast_test_length * sizeof(bool)); _in_cset_fast_test_length * sizeof(bool));
} }
// This is called at the end of either a concurrent cycle or a Full
// GC to update the number of full collections completed. Those two
// can happen in a nested fashion, i.e., we start a concurrent
// cycle, a Full GC happens half-way through it which ends first,
// and then the cycle notices that a Full GC happened and ends
// too. The outer parameter is a boolean to help us do a bit tighter
// consistency checking in the method. If outer is false, the caller
// is the inner caller in the nesting (i.e., the Full GC). If outer
// is true, the caller is the outer caller in this nesting (i.e.,
// the concurrent cycle). Further nesting is not currently
// supported. The end of the this call also notifies the
// FullGCCount_lock in case a Java thread is waiting for a full GC
// to happen (e.g., it called System.gc() with
// +ExplicitGCInvokesConcurrent).
void increment_full_collections_completed(bool outer);
unsigned int full_collections_completed() {
return _full_collections_completed;
}
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).
...@@ -444,7 +477,7 @@ protected: ...@@ -444,7 +477,7 @@ protected:
// The guts of the incremental collection pause, executed by the vm // The guts of the incremental collection pause, executed by the vm
// thread. // thread.
virtual void do_collection_pause_at_safepoint(); virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set. // Actually do the work of evacuating the collection set.
virtual void evacuate_collection_set(); virtual void evacuate_collection_set();
......
...@@ -154,7 +154,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -154,7 +154,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_known_garbage_bytes(0), _known_garbage_bytes(0),
_young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
_target_pause_time_ms(-1.0),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
...@@ -1635,8 +1634,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) { ...@@ -1635,8 +1634,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
// </NEW PREDICTION> // </NEW PREDICTION>
_target_pause_time_ms = -1.0;
} }
// <NEW PREDICTION> // <NEW PREDICTION>
...@@ -2366,7 +2363,6 @@ G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t ...@@ -2366,7 +2363,6 @@ G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
if (reached_target_length) { if (reached_target_length) {
assert( young_list_length > 0 && _g1->young_list()->length() > 0, assert( young_list_length > 0 && _g1->young_list()->length() > 0,
"invariant" ); "invariant" );
_target_pause_time_ms = max_pause_time_ms;
return true; return true;
} }
} else { } else {
...@@ -2398,6 +2394,17 @@ bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() { ...@@ -2398,6 +2394,17 @@ bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
} }
#endif #endif
bool
G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
if (!during_cycle) {
set_initiate_conc_mark_if_possible();
return true;
} else {
return false;
}
}
void void
G1CollectorPolicy::decide_on_conc_mark_initiation() { G1CollectorPolicy::decide_on_conc_mark_initiation() {
// We are about to decide on whether this pause will be an // We are about to decide on whether this pause will be an
...@@ -2864,7 +2871,8 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream ...@@ -2864,7 +2871,8 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
#endif // !PRODUCT #endif // !PRODUCT
bool bool
G1CollectorPolicy_BestRegionsFirst::choose_collection_set() { G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
double target_pause_time_ms) {
// Set this here - in case we're not doing young collections. // Set this here - in case we're not doing young collections.
double non_young_start_time_sec = os::elapsedTime(); double non_young_start_time_sec = os::elapsedTime();
...@@ -2877,26 +2885,19 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set() { ...@@ -2877,26 +2885,19 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
start_recording_regions(); start_recording_regions();
guarantee(_target_pause_time_ms > -1.0 guarantee(target_pause_time_ms > 0.0,
NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot), err_msg("target_pause_time_ms = %1.6lf should be positive",
"_target_pause_time_ms should have been set!"); target_pause_time_ms));
#ifndef PRODUCT guarantee(_collection_set == NULL, "Precondition");
if (_target_pause_time_ms <= -1.0) {
assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
_target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
}
#endif
assert(_collection_set == NULL, "Precondition");
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
double predicted_pause_time_ms = base_time_ms; double predicted_pause_time_ms = base_time_ms;
double target_time_ms = _target_pause_time_ms; double time_remaining_ms = target_pause_time_ms - base_time_ms;
double time_remaining_ms = target_time_ms - base_time_ms;
// the 10% and 50% values are arbitrary... // the 10% and 50% values are arbitrary...
if (time_remaining_ms < 0.10*target_time_ms) { if (time_remaining_ms < 0.10 * target_pause_time_ms) {
time_remaining_ms = 0.50 * target_time_ms; time_remaining_ms = 0.50 * target_pause_time_ms;
_within_target = false; _within_target = false;
} else { } else {
_within_target = true; _within_target = true;
...@@ -3059,7 +3060,18 @@ choose_collection_set_end: ...@@ -3059,7 +3060,18 @@ choose_collection_set_end:
_recorded_non_young_cset_choice_time_ms = _recorded_non_young_cset_choice_time_ms =
(non_young_end_time_sec - non_young_start_time_sec) * 1000.0; (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
return abandon_collection; // Here we are supposed to return whether the pause should be
// abandoned or not (i.e., whether the collection set is empty or
// not). However, this introduces a subtle issue when a pause is
// initiated explicitly with System.gc() and
// +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
// supposed to start a marking cycle, and it's abandoned. So, by
// returning false here we are telling the caller never to consider
// a pause to be abandoned. We'll actually remove all the code
// associated with abandoned pauses as part of CR 6963209, but we are
// just disabling them this way for the moment to avoid increasing
// further the amount of changes for CR 6944166.
return false;
} }
void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
......
...@@ -199,8 +199,6 @@ protected: ...@@ -199,8 +199,6 @@ protected:
size_t _young_cset_length; size_t _young_cset_length;
bool _last_young_gc_full; bool _last_young_gc_full;
double _target_pause_time_ms;
unsigned _full_young_pause_num; unsigned _full_young_pause_num;
unsigned _partial_young_pause_num; unsigned _partial_young_pause_num;
...@@ -526,6 +524,10 @@ public: ...@@ -526,6 +524,10 @@ public:
return _mmu_tracker; return _mmu_tracker;
} }
double max_pause_time_ms() {
return _mmu_tracker->max_gc_time() * 1000.0;
}
double predict_init_time_ms() { double predict_init_time_ms() {
return get_new_prediction(_concurrent_mark_init_times_ms); return get_new_prediction(_concurrent_mark_init_times_ms);
} }
...@@ -1008,7 +1010,7 @@ public: ...@@ -1008,7 +1010,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
virtual bool choose_collection_set() = 0; virtual bool choose_collection_set(double target_pause_time_ms) = 0;
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.
...@@ -1077,6 +1079,12 @@ public: ...@@ -1077,6 +1079,12 @@ public:
void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
// progress or not is stable.
bool force_initial_mark_if_outside_cycle();
// This is called at the very beginning of an evacuation pause (it // This is called at the very beginning of an evacuation pause (it
// has to be the first thing that the pause does). If // has to be the first thing that the pause does). If
// initiate_conc_mark_if_possible() is true, and the concurrent // initiate_conc_mark_if_possible() is true, and the concurrent
...@@ -1259,7 +1267,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { ...@@ -1259,7 +1267,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
// If the estimated is less then desirable, resize if possible. // If the estimated is less then desirable, resize if possible.
void expand_if_possible(size_t numRegions); void expand_if_possible(size_t numRegions);
virtual bool choose_collection_set(); virtual bool choose_collection_set(double target_pause_time_ms);
virtual void record_collection_pause_start(double start_time_sec, virtual void record_collection_pause_start(double start_time_sec,
size_t start_used); size_t start_used);
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
......
...@@ -42,8 +42,65 @@ void VM_G1CollectFull::doit() { ...@@ -42,8 +42,65 @@ void VM_G1CollectFull::doit() {
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm; JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
"only a GC locker or a System.gc() induced GC should start a cycle");
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_collection_pause_at_safepoint(); if (_should_initiate_conc_mark) {
// It's safer to read full_collections_completed() here, given
// that noone else will be updating it concurrently. Since we'll
// only need it if we're initiating a marking cycle, no point in
// setting it earlier.
_full_collections_completed_before = g1h->full_collections_completed();
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
}
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
}
void VM_G1IncCollectionPause::doit_epilogue() {
VM_GC_Operation::doit_epilogue();
// If the pause was initiated by a System.gc() and
// +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
// that just started (or maybe one that was already in progress) to
// finish.
if (_gc_cause == GCCause::_java_lang_system_gc &&
_should_initiate_conc_mark) {
assert(ExplicitGCInvokesConcurrent,
"the only way to be here is if ExplicitGCInvokesConcurrent is set");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// In the doit() method we saved g1h->full_collections_completed()
// in the _full_collections_completed_before field. We have to
// wait until we observe that g1h->full_collections_completed()
// has increased by at least one. This can happen if a) we started
// a cycle and it completes, b) a cycle already in progress
// completes, or c) a Full GC happens.
// If the condition has already been reached, there's no point in
// actually taking the lock and doing the wait.
if (g1h->full_collections_completed() <=
_full_collections_completed_before) {
// The following is largely copied from CMS
Thread* thr = Thread::current();
assert(thr->is_Java_thread(), "invariant");
JavaThread* jt = (JavaThread*)thr;
ThreadToNativeFromVM native(jt);
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
while (g1h->full_collections_completed() <=
_full_collections_completed_before) {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
}
} }
void VM_CGC_Operation::doit() { void VM_CGC_Operation::doit() {
......
...@@ -31,13 +31,12 @@ ...@@ -31,13 +31,12 @@
// - VM_G1PopRegionCollectionPause // - VM_G1PopRegionCollectionPause
class VM_G1CollectFull: public VM_GC_Operation { class VM_G1CollectFull: public VM_GC_Operation {
private:
public: public:
VM_G1CollectFull(int gc_count_before, VM_G1CollectFull(unsigned int gc_count_before,
GCCause::Cause gc_cause) unsigned int full_gc_count_before,
: VM_GC_Operation(gc_count_before) GCCause::Cause cause)
{ : VM_GC_Operation(gc_count_before, full_gc_count_before) {
_gc_cause = gc_cause; _gc_cause = cause;
} }
~VM_G1CollectFull() {} ~VM_G1CollectFull() {}
virtual VMOp_Type type() const { return VMOp_G1CollectFull; } virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
...@@ -67,12 +66,28 @@ class VM_G1CollectForAllocation: public VM_GC_Operation { ...@@ -67,12 +66,28 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
}; };
class VM_G1IncCollectionPause: public VM_GC_Operation { class VM_G1IncCollectionPause: public VM_GC_Operation {
public: private:
VM_G1IncCollectionPause(int gc_count_before, bool _should_initiate_conc_mark;
GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) : double _target_pause_time_ms;
VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; } unsigned int _full_collections_completed_before;
public:
VM_G1IncCollectionPause(unsigned int gc_count_before,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before),
_full_collections_completed_before(0),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms) {
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
target_pause_time_ms));
_gc_cause = cause;
}
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual void doit(); virtual void doit();
virtual void doit_epilogue();
virtual const char* name() const { virtual const char* name() const {
return "garbage-first incremental collection pause"; return "garbage-first incremental collection pause";
} }
......
...@@ -367,4 +367,6 @@ vm_operations_g1.hpp vmGCOperations.hpp ...@@ -367,4 +367,6 @@ vm_operations_g1.hpp vmGCOperations.hpp
vm_operations_g1.cpp vm_operations_g1.hpp vm_operations_g1.cpp vm_operations_g1.hpp
vm_operations_g1.cpp g1CollectedHeap.inline.hpp vm_operations_g1.cpp g1CollectedHeap.inline.hpp
vm_operations_g1.cpp g1CollectorPolicy.hpp
vm_operations_g1.cpp interfaceSupport.hpp
vm_operations_g1.cpp isGCActiveMark.hpp vm_operations_g1.cpp isGCActiveMark.hpp
...@@ -86,9 +86,7 @@ class VM_GC_Operation: public VM_Operation { ...@@ -86,9 +86,7 @@ class VM_GC_Operation: public VM_Operation {
_gc_locked = false; _gc_locked = false;
if (full) {
_full_gc_count_before = full_gc_count_before; _full_gc_count_before = full_gc_count_before;
}
// In ParallelScavengeHeap::mem_allocate() collections can be // In ParallelScavengeHeap::mem_allocate() collections can be
// executed within a loop and _all_soft_refs_clear can be set // executed within a loop and _all_soft_refs_clear can be set
// true after they have been cleared by a collection and another // true after they have been cleared by a collection and another
......
...@@ -78,6 +78,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { ...@@ -78,6 +78,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _old_generation_too_full_to_scavenge: case _old_generation_too_full_to_scavenge:
return "Old Generation Too Full To Scavenge"; return "Old Generation Too Full To Scavenge";
case _g1_inc_collection_pause:
return "G1 Evacuation Pause";
case _last_ditch_collection: case _last_ditch_collection:
return "Last ditch collection"; return "Last ditch collection";
......
...@@ -159,6 +159,8 @@ void mutex_init() { ...@@ -159,6 +159,8 @@ void mutex_init() {
def(STS_init_lock , Mutex, leaf, true ); def(STS_init_lock , Mutex, leaf, true );
if (UseConcMarkSweepGC) { if (UseConcMarkSweepGC) {
def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
}
if (UseConcMarkSweepGC || UseG1GC) {
def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
} }
if (UseG1GC) { if (UseG1GC) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册