提交 63d24ce8 编写于 作者: B brutisso

6814390: G1: remove the concept of non-generational G1

Summary: Removed the possibility to turn off generational mode for G1.
Reviewed-by: johnc, ysr, tonyp
上级 37a0186e
...@@ -801,39 +801,6 @@ void ConcurrentMark::checkpointRootsInitialPre() { ...@@ -801,39 +801,6 @@ void ConcurrentMark::checkpointRootsInitialPre() {
reset(); reset();
} }
class CMMarkRootsClosure: public OopsInGenClosure {
private:
ConcurrentMark* _cm;
G1CollectedHeap* _g1h;
bool _do_barrier;
public:
CMMarkRootsClosure(ConcurrentMark* cm,
G1CollectedHeap* g1h,
bool do_barrier) : _cm(cm), _g1h(g1h),
_do_barrier(do_barrier) { }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(obj->is_oop() || obj->mark() == NULL,
"expected an oop, possibly with mark word displaced");
HeapWord* addr = (HeapWord*)obj;
if (_g1h->is_in_g1_reserved(addr)) {
_cm->grayRoot(obj);
}
}
if (_do_barrier) {
assert(!_g1h->is_in_g1_reserved(p),
"Should be called on external roots");
do_barrier(p);
}
}
};
void ConcurrentMark::checkpointRootsInitialPost() { void ConcurrentMark::checkpointRootsInitialPost() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
...@@ -868,50 +835,6 @@ void ConcurrentMark::checkpointRootsInitialPost() { ...@@ -868,50 +835,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// during it. No need to call it here. // during it. No need to call it here.
} }
// Checkpoint the roots into this generation from outside
// this generation. [Note this initial checkpoint need only
// be approximate -- we'll do a catch up phase subsequently.]
void ConcurrentMark::checkpointRootsInitial() {
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
double start = os::elapsedTime();
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_init_start();
checkpointRootsInitialPre();
// YSR: when concurrent precleaning is in place, we'll
// need to clear the cached card table here
ResourceMark rm;
HandleMark hm;
g1h->ensure_parsability(false);
g1h->perm_gen()->save_marks();
CMMarkRootsClosure notOlder(this, g1h, false);
CMMarkRootsClosure older(this, g1h, true);
g1h->set_marking_started();
g1h->rem_set()->prepare_for_younger_refs_iterate(false);
g1h->process_strong_roots(true, // activate StrongRootsScope
false, // fake perm gen collection
SharedHeap::SO_AllClasses,
&notOlder, // Regular roots
NULL, // do not visit active blobs
&older // Perm Gen Roots
);
checkpointRootsInitialPost();
// Statistics.
double end = os::elapsedTime();
_init_times.add((end - start) * 1000.0);
g1p->record_concurrent_mark_init_end();
}
/* /*
* Notice that in the next two methods, we actually leave the STS * Notice that in the next two methods, we actually leave the STS
* during the barrier sync and join it immediately afterwards. If we * during the barrier sync and join it immediately afterwards. If we
......
...@@ -756,9 +756,6 @@ public: ...@@ -756,9 +756,6 @@ public:
// Clear the next marking bitmap (will be called concurrently). // Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap(); void clearNextBitmap();
// main CMS steps and related support
void checkpointRootsInitial();
// These two do the work that needs to be done before and after the // These two do the work that needs to be done before and after the
// initial root checkpoint. Since this checkpoint can be done at two // initial root checkpoint. Since this checkpoint can be done at two
// different points (i.e. an explicit pause or piggy-backed on a // different points (i.e. an explicit pause or piggy-backed on a
......
...@@ -50,19 +50,6 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) : ...@@ -50,19 +50,6 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
create_and_start(); create_and_start();
} }
class CMCheckpointRootsInitialClosure: public VoidClosure {
ConcurrentMark* _cm;
public:
CMCheckpointRootsInitialClosure(ConcurrentMark* cm) :
_cm(cm) {}
void do_void(){
_cm->checkpointRootsInitial();
}
};
class CMCheckpointRootsFinalClosure: public VoidClosure { class CMCheckpointRootsFinalClosure: public VoidClosure {
ConcurrentMark* _cm; ConcurrentMark* _cm;
...@@ -116,27 +103,6 @@ void ConcurrentMarkThread::run() { ...@@ -116,27 +103,6 @@ void ConcurrentMarkThread::run() {
gclog_or_tty->print_cr("[GC concurrent-mark-start]"); gclog_or_tty->print_cr("[GC concurrent-mark-start]");
} }
if (!g1_policy->in_young_gc_mode()) {
// this ensures the flag is not set if we bail out of the marking
// cycle; normally the flag is cleared immediately after cleanup
g1h->set_marking_complete();
if (g1_policy->adaptive_young_list_length()) {
double now = os::elapsedTime();
double init_prediction_ms = g1_policy->predict_init_time_ms();
jlong sleep_time_ms = mmu_tracker->when_ms(now, init_prediction_ms);
os::sleep(current_thread, sleep_time_ms, false);
}
// We don't have to skip here if we've been asked to restart, because
// in the worst case we just enqueue a new VM operation to start a
// marking. Note that the init operation resets has_aborted()
CMCheckpointRootsInitialClosure init_cl(_cm);
strcpy(verbose_str, "GC initial-mark");
VM_CGC_Operation op(&init_cl, verbose_str);
VMThread::execute(&op);
}
int iter = 0; int iter = 0;
do { do {
iter++; iter++;
......
...@@ -1263,10 +1263,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1263,10 +1263,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy()->clear_incremental_cset(); g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building(); g1_policy()->stop_incremental_cset_building();
if (g1_policy()->in_young_gc_mode()) { empty_young_list();
empty_young_list(); g1_policy()->set_full_young_gcs(true);
g1_policy()->set_full_young_gcs(true);
}
// See the comment in G1CollectedHeap::ref_processing_init() about // See the comment in G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1. // how reference processing currently works in G1.
...@@ -1387,13 +1385,11 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1387,13 +1385,11 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|| (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
} }
if (g1_policy()->in_young_gc_mode()) { _young_list->reset_sampled_info();
_young_list->reset_sampled_info(); // At this point there should be no regions in the
// At this point there should be no regions in the // entire heap tagged as young.
// entire heap tagged as young. assert( check_young_list_empty(true /* check_heap */),
assert( check_young_list_empty(true /* check_heap */), "young list should be empty at this point");
"young list should be empty at this point");
}
// Update the number of full collections that have been completed. // Update the number of full collections that have been completed.
increment_full_collections_completed(false /* concurrent */); increment_full_collections_completed(false /* concurrent */);
...@@ -3161,12 +3157,6 @@ G1CollectedHeap::doConcurrentMark() { ...@@ -3161,12 +3157,6 @@ G1CollectedHeap::doConcurrentMark() {
} }
} }
void G1CollectedHeap::do_sync_mark() {
_cm->checkpointRootsInitial();
_cm->markFromRoots();
_cm->checkpointRootsFinal(false);
}
// <NEW PREDICTION> // <NEW PREDICTION>
double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
...@@ -3317,11 +3307,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3317,11 +3307,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
char verbose_str[128]; char verbose_str[128];
sprintf(verbose_str, "GC pause "); sprintf(verbose_str, "GC pause ");
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->full_young_gcs()) {
if (g1_policy()->full_young_gcs()) strcat(verbose_str, "(young)");
strcat(verbose_str, "(young)"); } else {
else strcat(verbose_str, "(partial)");
strcat(verbose_str, "(partial)");
} }
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
strcat(verbose_str, " (initial-mark)"); strcat(verbose_str, " (initial-mark)");
...@@ -3350,10 +3339,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3350,10 +3339,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
append_secondary_free_list_if_not_empty_with_lock(); append_secondary_free_list_if_not_empty_with_lock();
} }
if (g1_policy()->in_young_gc_mode()) { assert(check_young_list_well_formed(),
assert(check_young_list_well_formed(), "young list should be well formed");
"young list should be well formed");
}
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC { // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x; IsGCActiveMark x;
...@@ -3494,27 +3481,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3494,27 +3481,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// evacuation pause. // evacuation pause.
clear_cset_fast_test(); clear_cset_fast_test();
if (g1_policy()->in_young_gc_mode()) { _young_list->reset_sampled_info();
_young_list->reset_sampled_info();
// Don't check the whole heap at this point as the // Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged // GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list. // as survivors and moved on to the survivor list.
// Survivor regions will fail the !is_young() check. // Survivor regions will fail the !is_young() check.
assert(check_young_list_empty(false /* check_heap */), assert(check_young_list_empty(false /* check_heap */),
"young list should be empty"); "young list should be empty");
#if YOUNG_LIST_VERBOSE #if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
_young_list->print(); _young_list->print();
#endif // YOUNG_LIST_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(), g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(), _young_list->first_survivor_region(),
_young_list->last_survivor_region()); _young_list->last_survivor_region());
_young_list->reset_auxilary_lists(); _young_list->reset_auxilary_lists();
}
if (evacuation_failed()) { if (evacuation_failed()) {
_summary_bytes_used = recalculate_used(); _summary_bytes_used = recalculate_used();
...@@ -3524,8 +3509,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3524,8 +3509,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_summary_bytes_used += g1_policy()->bytes_copied_during_gc(); _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
} }
if (g1_policy()->in_young_gc_mode() && if (g1_policy()->during_initial_mark_pause()) {
g1_policy()->during_initial_mark_pause()) {
concurrent_mark()->checkpointRootsInitialPost(); concurrent_mark()->checkpointRootsInitialPost();
set_marking_started(); set_marking_started();
// CAUTION: after the doConcurrentMark() call below, // CAUTION: after the doConcurrentMark() call below,
...@@ -5091,7 +5075,6 @@ bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) ...@@ -5091,7 +5075,6 @@ bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample)
void G1CollectedHeap::empty_young_list() { void G1CollectedHeap::empty_young_list() {
assert(heap_lock_held_for_gc(), assert(heap_lock_held_for_gc(),
"the heap lock should already be held by or for this thread"); "the heap lock should already be held by or for this thread");
assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
_young_list->empty_list(); _young_list->empty_list();
} }
......
...@@ -1263,16 +1263,10 @@ public: ...@@ -1263,16 +1263,10 @@ public:
// in the young gen: for the SATB pre-barrier, there is no // in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set // pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set // update logging post-barrier, we don't maintain remembered set
// information for young gen objects. Note that non-generational // information for young gen objects.
// G1 does not have any "young" objects, should not elide
// the rs logging barrier and so should always answer false below.
// However, non-generational G1 (-XX:-G1Gen) appears to have
// bit-rotted so was not tested below.
virtual bool can_elide_initializing_store_barrier(oop new_obj) { virtual bool can_elide_initializing_store_barrier(oop new_obj) {
// Re 6920090, 6920109 above. // Re 6920090, 6920109 above.
assert(ReduceInitialCardMarksForG1, "Else cannot be here"); assert(ReduceInitialCardMarksForG1, "Else cannot be here");
assert(G1Gen || !is_in_young(new_obj),
"Non-generational G1 should never return true below");
return is_in_young(new_obj); return is_in_young(new_obj);
} }
...@@ -1389,9 +1383,6 @@ public: ...@@ -1389,9 +1383,6 @@ public:
// bitmap off to the side. // bitmap off to the side.
void doConcurrentMark(); void doConcurrentMark();
// Do a full concurrent marking, synchronously.
void do_sync_mark();
bool isMarkedPrev(oop obj) const; bool isMarkedPrev(oop obj) const;
bool isMarkedNext(oop obj) const; bool isMarkedNext(oop obj) const;
......
...@@ -170,7 +170,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -170,7 +170,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_cur_aux_times_ms(new double[_aux_num]), _cur_aux_times_ms(new double[_aux_num]),
_cur_aux_times_set(new bool[_aux_num]), _cur_aux_times_set(new bool[_aux_num]),
_concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
...@@ -201,7 +200,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -201,7 +200,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
// </NEW PREDICTION> // </NEW PREDICTION>
_in_young_gc_mode(false),
_full_young_gcs(true), _full_young_gcs(true),
_full_young_pause_num(0), _full_young_pause_num(0),
_partial_young_pause_num(0), _partial_young_pause_num(0),
...@@ -400,7 +398,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -400,7 +398,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_sigma = (double) G1ConfidencePercent / 100.0; _sigma = (double) G1ConfidencePercent / 100.0;
// start conservatively (around 50ms is about right) // start conservatively (around 50ms is about right)
_concurrent_mark_init_times_ms->add(0.05);
_concurrent_mark_remark_times_ms->add(0.05); _concurrent_mark_remark_times_ms->add(0.05);
_concurrent_mark_cleanup_times_ms->add(0.20); _concurrent_mark_cleanup_times_ms->add(0.20);
_tenuring_threshold = MaxTenuringThreshold; _tenuring_threshold = MaxTenuringThreshold;
...@@ -468,27 +465,20 @@ void G1CollectorPolicy::init() { ...@@ -468,27 +465,20 @@ void G1CollectorPolicy::init() {
initialize_gc_policy_counters(); initialize_gc_policy_counters();
if (G1Gen) { G1YoungGenSizer sizer;
_in_young_gc_mode = true; size_t initial_region_num = sizer.initial_young_region_num();
G1YoungGenSizer sizer; if (UseAdaptiveSizePolicy) {
size_t initial_region_num = sizer.initial_young_region_num(); set_adaptive_young_list_length(true);
_young_list_fixed_length = 0;
if (UseAdaptiveSizePolicy) {
set_adaptive_young_list_length(true);
_young_list_fixed_length = 0;
} else {
set_adaptive_young_list_length(false);
_young_list_fixed_length = initial_region_num;
}
_free_regions_at_end_of_collection = _g1->free_regions();
calculate_young_list_min_length();
guarantee( _young_list_min_length == 0, "invariant, not enough info" );
calculate_young_list_target_length();
} else { } else {
_young_list_fixed_length = 0; set_adaptive_young_list_length(false);
_in_young_gc_mode = false; _young_list_fixed_length = initial_region_num;
} }
_free_regions_at_end_of_collection = _g1->free_regions();
calculate_young_list_min_length();
guarantee( _young_list_min_length == 0, "invariant, not enough info" );
calculate_young_list_target_length();
// We may immediately start allocating regions and placing them on the // We may immediately start allocating regions and placing them on the
// collection set list. Initialize the per-collection set info // collection set list. Initialize the per-collection set info
...@@ -498,7 +488,7 @@ void G1CollectorPolicy::init() { ...@@ -498,7 +488,7 @@ void G1CollectorPolicy::init() {
// Create the jstat counters for the policy. // Create the jstat counters for the policy.
void G1CollectorPolicy::initialize_gc_policy_counters() void G1CollectorPolicy::initialize_gc_policy_counters()
{ {
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen); _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
} }
void G1CollectorPolicy::calculate_young_list_min_length() { void G1CollectorPolicy::calculate_young_list_min_length() {
...@@ -868,8 +858,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -868,8 +858,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print("[GC pause"); gclog_or_tty->print("[GC pause");
if (in_young_gc_mode()) gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
} }
assert(_g1->used() == _g1->recalculate_used(), assert(_g1->used() == _g1->recalculate_used(),
...@@ -921,8 +910,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -921,8 +910,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_satb_drain_time_set = false; _satb_drain_time_set = false;
_last_satb_drain_processed_buffers = -1; _last_satb_drain_processed_buffers = -1;
if (in_young_gc_mode()) _last_young_gc_full = false;
_last_young_gc_full = false;
// do that for any other surv rate groups // do that for any other surv rate groups
_short_lived_surv_rate_group->stop_adding_regions(); _short_lived_surv_rate_group->stop_adding_regions();
...@@ -935,12 +923,7 @@ void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) { ...@@ -935,12 +923,7 @@ void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
_mark_closure_time_ms = mark_closure_time_ms; _mark_closure_time_ms = mark_closure_time_ms;
} }
void G1CollectorPolicy::record_concurrent_mark_init_start() { void G1CollectorPolicy::record_concurrent_mark_init_end(double
_mark_init_start_sec = os::elapsedTime();
guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
}
void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
mark_init_elapsed_time_ms) { mark_init_elapsed_time_ms) {
_during_marking = true; _during_marking = true;
assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
...@@ -948,15 +931,6 @@ void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double ...@@ -948,15 +931,6 @@ void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
_cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
} }
void G1CollectorPolicy::record_concurrent_mark_init_end() {
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
_concurrent_mark_init_times_ms->add(elapsed_time_ms);
record_concurrent_mark_init_end_pre(elapsed_time_ms);
_mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
}
void G1CollectorPolicy::record_concurrent_mark_remark_start() { void G1CollectorPolicy::record_concurrent_mark_remark_start() {
_mark_remark_start_sec = os::elapsedTime(); _mark_remark_start_sec = os::elapsedTime();
_during_marking = false; _during_marking = false;
...@@ -1019,13 +993,11 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { ...@@ -1019,13 +993,11 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
void void
G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
if (in_young_gc_mode()) { _should_revert_to_full_young_gcs = false;
_should_revert_to_full_young_gcs = false; _last_full_young_gc = true;
_last_full_young_gc = true; _in_marking_window = false;
_in_marking_window = false; if (adaptive_young_list_length())
if (adaptive_young_list_length()) calculate_young_list_target_length();
calculate_young_list_target_length();
}
} }
void G1CollectorPolicy::record_concurrent_pause() { void G1CollectorPolicy::record_concurrent_pause() {
...@@ -1174,31 +1146,29 @@ void G1CollectorPolicy::record_collection_pause_end() { ...@@ -1174,31 +1146,29 @@ void G1CollectorPolicy::record_collection_pause_end() {
} }
#endif // PRODUCT #endif // PRODUCT
if (in_young_gc_mode()) { last_pause_included_initial_mark = during_initial_mark_pause();
last_pause_included_initial_mark = during_initial_mark_pause(); if (last_pause_included_initial_mark)
if (last_pause_included_initial_mark) record_concurrent_mark_init_end(0.0);
record_concurrent_mark_init_end_pre(0.0);
size_t min_used_targ = size_t min_used_targ =
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
if (!_g1->mark_in_progress() && !_last_full_young_gc) { if (!_g1->mark_in_progress() && !_last_full_young_gc) {
assert(!last_pause_included_initial_mark, "invariant"); assert(!last_pause_included_initial_mark, "invariant");
if (cur_used_bytes > min_used_targ && if (cur_used_bytes > min_used_targ &&
cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
assert(!during_initial_mark_pause(), "we should not see this here"); assert(!during_initial_mark_pause(), "we should not see this here");
// Note: this might have already been set, if during the last // Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of // pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK. // this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible(); set_initiate_conc_mark_if_possible();
}
} }
_prev_collection_pause_used_at_end_bytes = cur_used_bytes;
} }
_prev_collection_pause_used_at_end_bytes = cur_used_bytes;
_mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
end_time_sec, false); end_time_sec, false);
...@@ -1468,24 +1438,23 @@ void G1CollectorPolicy::record_collection_pause_end() { ...@@ -1468,24 +1438,23 @@ void G1CollectorPolicy::record_collection_pause_end() {
new_in_marking_window_im = true; new_in_marking_window_im = true;
} }
if (in_young_gc_mode()) { if (_last_full_young_gc) {
if (_last_full_young_gc) { set_full_young_gcs(false);
set_full_young_gcs(false); _last_full_young_gc = false;
_last_full_young_gc = false; }
}
if ( !_last_young_gc_full ) { if ( !_last_young_gc_full ) {
if ( _should_revert_to_full_young_gcs || if ( _should_revert_to_full_young_gcs ||
_known_garbage_ratio < 0.05 || _known_garbage_ratio < 0.05 ||
(adaptive_young_list_length() && (adaptive_young_list_length() &&
(get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) { (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
set_full_young_gcs(true); set_full_young_gcs(true);
}
} }
_should_revert_to_full_young_gcs = false; }
_should_revert_to_full_young_gcs = false;
if (_last_young_gc_full && !_during_marking) if (_last_young_gc_full && !_during_marking) {
_young_gc_eff_seq->add(cur_efficiency); _young_gc_eff_seq->add(cur_efficiency);
} }
_short_lived_surv_rate_group->start_adding_regions(); _short_lived_surv_rate_group->start_adding_regions();
...@@ -1910,18 +1879,8 @@ void G1CollectorPolicy::check_if_region_is_too_expensive(double ...@@ -1910,18 +1879,8 @@ void G1CollectorPolicy::check_if_region_is_too_expensive(double
// I don't think we need to do this when in young GC mode since // I don't think we need to do this when in young GC mode since
// marking will be initiated next time we hit the soft limit anyway... // marking will be initiated next time we hit the soft limit anyway...
if (predicted_time_ms > _expensive_region_limit_ms) { if (predicted_time_ms > _expensive_region_limit_ms) {
if (!in_young_gc_mode()) { // no point in doing another partial one
set_full_young_gcs(true); _should_revert_to_full_young_gcs = true;
// We might want to do something different here. However,
// right now we don't support the non-generational G1 mode
// (and in fact we are planning to remove the associated code,
// see CR 6814390). So, let's leave it as is and this will be
// removed some time in the future
ShouldNotReachHere();
set_during_initial_mark_pause();
} else
// no point in doing another partial one
_should_revert_to_full_young_gcs = true;
} }
} }
...@@ -2617,9 +2576,7 @@ void G1CollectorPolicy::start_incremental_cset_building() { ...@@ -2617,9 +2576,7 @@ void G1CollectorPolicy::start_incremental_cset_building() {
_inc_cset_size = 0; _inc_cset_size = 0;
_inc_cset_bytes_used_before = 0; _inc_cset_bytes_used_before = 0;
if (in_young_gc_mode()) { _inc_cset_young_index = 0;
_inc_cset_young_index = 0;
}
_inc_cset_max_finger = 0; _inc_cset_max_finger = 0;
_inc_cset_recorded_young_bytes = 0; _inc_cset_recorded_young_bytes = 0;
...@@ -2848,86 +2805,77 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set( ...@@ -2848,86 +2805,77 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
max_live_bytes = max_live_bytes + expansion_bytes; max_live_bytes = max_live_bytes + expansion_bytes;
HeapRegion* hr; HeapRegion* hr;
if (in_young_gc_mode()) { double young_start_time_sec = os::elapsedTime();
double young_start_time_sec = os::elapsedTime();
if (G1PolicyVerbose > 0) { if (G1PolicyVerbose > 0) {
gclog_or_tty->print_cr("Adding %d young regions to the CSet", gclog_or_tty->print_cr("Adding %d young regions to the CSet",
_g1->young_list()->length()); _g1->young_list()->length());
} }
_young_cset_length = 0; _young_cset_length = 0;
_last_young_gc_full = full_young_gcs() ? true : false; _last_young_gc_full = full_young_gcs() ? true : false;
if (_last_young_gc_full) if (_last_young_gc_full)
++_full_young_pause_num; ++_full_young_pause_num;
else else
++_partial_young_pause_num; ++_partial_young_pause_num;
// The young list is laid with the survivor regions from the previous // The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e. // pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause]. // [Newly Young Regions ++ Survivors from last pause].
hr = _g1->young_list()->first_survivor_region(); hr = _g1->young_list()->first_survivor_region();
while (hr != NULL) { while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list"); assert(hr->is_survivor(), "badly formed young list");
hr->set_young(); hr->set_young();
hr = hr->get_next_young_region(); hr = hr->get_next_young_region();
} }
// Clear the fields that point to the survivor list - they are // Clear the fields that point to the survivor list - they are
// all young now. // all young now.
_g1->young_list()->clear_survivors(); _g1->young_list()->clear_survivors();
if (_g1->mark_in_progress()) if (_g1->mark_in_progress())
_g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger); _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
_young_cset_length = _inc_cset_young_index; _young_cset_length = _inc_cset_young_index;
_collection_set = _inc_cset_head; _collection_set = _inc_cset_head;
_collection_set_size = _inc_cset_size; _collection_set_size = _inc_cset_size;
_collection_set_bytes_used_before = _inc_cset_bytes_used_before; _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
// For young regions in the collection set, we assume the worst // For young regions in the collection set, we assume the worst
// case of complete survival // case of complete survival
max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes; max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms; time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
// The number of recorded young regions is the incremental // The number of recorded young regions is the incremental
// collection set's current size // collection set's current size
set_recorded_young_regions(_inc_cset_size); set_recorded_young_regions(_inc_cset_size);
set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
set_recorded_young_bytes(_inc_cset_recorded_young_bytes); set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
#if PREDICTIONS_VERBOSE #if PREDICTIONS_VERBOSE
set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy); set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
#endif // PREDICTIONS_VERBOSE #endif // PREDICTIONS_VERBOSE
if (G1PolicyVerbose > 0) { if (G1PolicyVerbose > 0) {
gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.", gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
_inc_cset_size); _inc_cset_size);
gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
max_live_bytes/K); max_live_bytes/K);
} }
assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
double young_end_time_sec = os::elapsedTime(); assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
_recorded_young_cset_choice_time_ms =
(young_end_time_sec - young_start_time_sec) * 1000.0;
// We are doing young collections so reset this. double young_end_time_sec = os::elapsedTime();
non_young_start_time_sec = young_end_time_sec; _recorded_young_cset_choice_time_ms =
(young_end_time_sec - young_start_time_sec) * 1000.0;
// Note we can use either _collection_set_size or // We are doing young collections so reset this.
// _young_cset_length here non_young_start_time_sec = young_end_time_sec;
if (_collection_set_size > 0 && _last_young_gc_full) {
// don't bother adding more regions...
goto choose_collection_set_end;
}
}
if (!in_young_gc_mode() || !full_young_gcs()) { if (!full_young_gcs()) {
bool should_continue = true; bool should_continue = true;
NumberSeq seq; NumberSeq seq;
double avg_prediction = 100000000000000000.0; // something very large double avg_prediction = 100000000000000000.0; // something very large
...@@ -2960,7 +2908,6 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set( ...@@ -2960,7 +2908,6 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
_should_revert_to_full_young_gcs = true; _should_revert_to_full_young_gcs = true;
} }
choose_collection_set_end:
stop_incremental_cset_building(); stop_incremental_cset_building();
count_CS_bytes_used(); count_CS_bytes_used();
......
...@@ -141,7 +141,6 @@ protected: ...@@ -141,7 +141,6 @@ protected:
TruncatedSeq* _recent_rs_sizes; TruncatedSeq* _recent_rs_sizes;
TruncatedSeq* _concurrent_mark_init_times_ms;
TruncatedSeq* _concurrent_mark_remark_times_ms; TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms; TruncatedSeq* _concurrent_mark_cleanup_times_ms;
...@@ -178,9 +177,6 @@ protected: ...@@ -178,9 +177,6 @@ protected:
double* _par_last_gc_worker_end_times_ms; double* _par_last_gc_worker_end_times_ms;
double* _par_last_gc_worker_times_ms; double* _par_last_gc_worker_times_ms;
// indicates that we are in young GC mode
bool _in_young_gc_mode;
// indicates whether we are in full young or partially young GC mode // indicates whether we are in full young or partially young GC mode
bool _full_young_gcs; bool _full_young_gcs;
...@@ -527,10 +523,6 @@ public: ...@@ -527,10 +523,6 @@ public:
return _mmu_tracker->max_gc_time() * 1000.0; return _mmu_tracker->max_gc_time() * 1000.0;
} }
double predict_init_time_ms() {
return get_new_prediction(_concurrent_mark_init_times_ms);
}
double predict_remark_time_ms() { double predict_remark_time_ms() {
return get_new_prediction(_concurrent_mark_remark_times_ms); return get_new_prediction(_concurrent_mark_remark_times_ms);
} }
...@@ -776,7 +768,6 @@ protected: ...@@ -776,7 +768,6 @@ protected:
// This set of variables tracks the collector efficiency, in order to // This set of variables tracks the collector efficiency, in order to
// determine whether we should initiate a new marking. // determine whether we should initiate a new marking.
double _cur_mark_stop_world_time_ms; double _cur_mark_stop_world_time_ms;
double _mark_init_start_sec;
double _mark_remark_start_sec; double _mark_remark_start_sec;
double _mark_cleanup_start_sec; double _mark_cleanup_start_sec;
double _mark_closure_time_ms; double _mark_closure_time_ms;
...@@ -849,9 +840,7 @@ public: ...@@ -849,9 +840,7 @@ public:
size_t start_used); size_t start_used);
// Must currently be called while the world is stopped. // Must currently be called while the world is stopped.
virtual void record_concurrent_mark_init_start(); void record_concurrent_mark_init_end(double
virtual void record_concurrent_mark_init_end();
void record_concurrent_mark_init_end_pre(double
mark_init_elapsed_time_ms); mark_init_elapsed_time_ms);
void record_mark_closure_time(double mark_closure_time_ms); void record_mark_closure_time(double mark_closure_time_ms);
...@@ -1118,13 +1107,6 @@ public: ...@@ -1118,13 +1107,6 @@ public:
void update_region_num(bool young); void update_region_num(bool young);
bool in_young_gc_mode() {
return _in_young_gc_mode;
}
void set_in_young_gc_mode(bool in_young_gc_mode) {
_in_young_gc_mode = in_young_gc_mode;
}
bool full_young_gcs() { bool full_young_gcs() {
return _full_young_gcs; return _full_young_gcs;
} }
......
...@@ -39,8 +39,6 @@ ...@@ -39,8 +39,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \ develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \ "Overhead of concurrent marking") \
\ \
develop(bool, G1Gen, true, \
"If true, it will enable the generational G1") \
\ \
develop(intx, G1PolicyVerbose, 0, \ develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \ "The verbosity level on G1 policy decisions") \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册