提交 bef9ec13 编写于 作者: Y ysr

6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection

Summary: ScavengeALot now causes an incremental (but possibly partially young, in the G1 sense) collection. Some such collections may be abandoned on account of MMU specs. Band-aided a native leak associated with abandoned pauses, as well as an MMU tracker overflow related to frequent scavenge events in the face of a large MMU denominator interval; the latter is protected by a product flag that defaults to false.
Reviewed-by: tonyp
上级 df890d3a
......@@ -1732,13 +1732,6 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
return car->free();
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
MutexLocker ml(Heap_lock);
collect_locked(cause);
}
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2");
......@@ -1755,17 +1748,31 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
// Don't want to do a GC until cleanup is completed.
wait_for_cleanup_complete();
// Read the GC count while holding the Heap_lock
int gc_count_before = SharedHeap::heap()->total_collections();
int gc_count_before;
{
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_G1CollectFull op(gc_count_before, cause);
VMThread::execute(&op);
MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock
gc_count_before = SharedHeap::heap()->total_collections();
// Don't want to do a GC until cleanup is completed.
wait_for_cleanup_complete();
} // We give up heap lock; VMThread::execute gets it back below
switch (cause) {
case GCCause::_scavenge_alot: {
// Do an incremental pause, which might sometimes be abandoned.
VM_G1IncCollectionPause op(gc_count_before, cause);
VMThread::execute(&op);
break;
}
default: {
// In all other cases, we currently do a full gc.
VM_G1CollectFull op(gc_count_before, cause);
VMThread::execute(&op);
}
}
}
......@@ -2649,8 +2656,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
if (g1_policy()->should_initiate_conc_mark())
strcat(verbose_str, " (initial-mark)");
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
// if PrintGCDetails is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
// is messy if we do.
......@@ -2802,6 +2807,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
_young_list->reset_auxilary_lists();
}
} else {
if (_in_cset_fast_test != NULL) {
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
// this is more for peace of mind; we're nulling them here and
// we're expecting them to be null at the beginning of the next GC
_in_cset_fast_test = NULL;
_in_cset_fast_test_base = NULL;
}
// This looks confusing, because the DPT should really be empty
// at this point -- since we have not done any collection work,
// there should not be any derived pointers in the table to update;
// however, there is some additional state in the DPT which is
// reset at the end of the (null) "gc" here via the following call.
// A better approach might be to split off that state resetting work
// into a separate method that asserts that the DPT is empty and call
// that here. That is deferred for now.
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
}
......
......@@ -2847,8 +2847,15 @@ choose_collection_set() {
double non_young_start_time_sec;
start_recording_regions();
guarantee(_target_pause_time_ms > -1.0,
guarantee(_target_pause_time_ms > -1.0
NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
"_target_pause_time_ms should have been set!");
#ifndef PRODUCT
if (_target_pause_time_ms <= -1.0) {
assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
_target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
}
#endif
assert(_collection_set == NULL, "Precondition");
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
......@@ -2994,7 +3001,3 @@ record_collection_pause_end(bool abandoned) {
G1CollectorPolicy::record_collection_pause_end(abandoned);
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
}
// Local Variables: ***
// c-indentation-style: gnu ***
// End: ***
......@@ -86,12 +86,22 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
// increase the array size (:-)
// remove the oldest entry (this might allow more GC time for
// the time slice than what's allowed)
// concolidate the two entries with the minimum gap between them
// (this mighte allow less GC time than what's allowed)
guarantee(0, "array full, currently we can't recover");
// consolidate the two entries with the minimum gap between them
// (this might allow less GC time than what's allowed)
guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
"array full, currently we can't recover unless +G1ForgetfulMMUTracker");
// In the case where ScavengeALot is true, such overflow is not
// uncommon; in such cases, we can, without much loss of precision
// or performance (we are GC'ing most of the time anyway!),
// simply overwrite the oldest entry in the tracker: this
// is also the behaviour when G1ForgetfulMMUTracker is enabled.
_head_index = trim_index(_head_index + 1);
assert(_head_index == _tail_index, "Because we have a full circular buffer");
_tail_index = trim_index(_tail_index + 1);
} else {
_head_index = trim_index(_head_index + 1);
++_no_entries;
}
_head_index = trim_index(_head_index + 1);
++_no_entries;
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
}
......
......@@ -99,7 +99,10 @@ private:
// The array is of fixed size and I don't think we'll need more than
// two or three entries with the current behaviour of G1 pauses.
// If the array is full, an easy fix is to look for the pauses with
// the shortest gap between them and concolidate them.
// the shortest gap between them and consolidate them.
// For now, we have taken the expedient alternative of forgetting
// the oldest entry in the event that +G1ForgetfulMMUTracker, thus
// potentially violating MMU specs for some time thereafter.
G1MMUTrackerQueueElem _array[QueueLength];
int _head_index;
......
......@@ -256,6 +256,9 @@
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \
\
product(bool, G1ForgetfulMMUTracker, false, \
"If the MMU tracker's memory is full, forget the oldest entry") \
\
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
......
......@@ -42,7 +42,7 @@ void VM_G1CollectFull::doit() {
void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
GCCauseSetter x(g1h, _gc_cause);
g1h->do_collection_pause_at_safepoint();
}
......
......@@ -68,8 +68,9 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
class VM_G1IncCollectionPause: public VM_GC_Operation {
public:
VM_G1IncCollectionPause(int gc_count_before) :
VM_GC_Operation(gc_count_before) {}
VM_G1IncCollectionPause(int gc_count_before,
GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual void doit();
virtual const char* name() const {
......
......@@ -224,10 +224,6 @@ public:
CodeBlobClosure* code_roots,
OopClosure* non_root_closure);
// Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
virtual void collect_locked(GCCause::Cause cause) = 0;
// The functions below are helper functions that a subclass of
// "SharedHeap" can use in the implementation of its virtual
// functions.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册