提交 0a638f8d 编写于 作者: A apetrusenko

6484959: G1: introduce survivor spaces

6797754: G1: combined bugfix
Summary: Implemented a policy to control G1 survivor space parameters.
Reviewed-by: tonyp, iveresov
上级 a4e30aa0
......@@ -141,7 +141,7 @@ YoungList::YoungList(G1CollectedHeap* g1h)
_scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
_length(0), _scan_only_length(0),
_last_sampled_rs_lengths(0),
_survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0)
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
{
guarantee( check_list_empty(false), "just making sure..." );
}
......@@ -159,16 +159,15 @@ void YoungList::push_region(HeapRegion *hr) {
}
void YoungList::add_survivor_region(HeapRegion* hr) {
assert(!hr->is_survivor(), "should not already be for survived");
assert(hr->is_survivor(), "should be flagged as survivor region");
assert(hr->get_next_young_region() == NULL, "cause it should!");
hr->set_next_young_region(_survivor_head);
if (_survivor_head == NULL) {
_survivors_tail = hr;
_survivor_tail = hr;
}
_survivor_head = hr;
hr->set_survivor();
++_survivor_length;
}
......@@ -239,7 +238,7 @@ void YoungList::empty_list() {
empty_list(_survivor_head);
_survivor_head = NULL;
_survivors_tail = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
_last_sampled_rs_lengths = 0;
......@@ -391,6 +390,7 @@ YoungList::reset_auxilary_lists() {
// Add survivor regions to SurvRateGroup.
_g1h->g1_policy()->note_start_adding_survivor_regions();
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
for (HeapRegion* curr = _survivor_head;
curr != NULL;
curr = curr->get_next_young_region()) {
......@@ -401,7 +401,7 @@ YoungList::reset_auxilary_lists() {
if (_survivor_head != NULL) {
_head = _survivor_head;
_length = _survivor_length + _scan_only_length;
_survivors_tail->set_next_young_region(_scan_only_head);
_survivor_tail->set_next_young_region(_scan_only_head);
} else {
_head = _scan_only_head;
_length = _scan_only_length;
......@@ -418,9 +418,9 @@ YoungList::reset_auxilary_lists() {
_curr_scan_only = NULL;
_survivor_head = NULL;
_survivors_tail = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
_g1h->g1_policy()->finished_recalculating_age_indexes();
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
assert(check_list_well_formed(), "young list should be well formed");
}
......@@ -553,7 +553,7 @@ HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
alloc_region = newAllocRegion_work(word_size, true, zero_filled);
if (purpose == GCAllocForSurvived && alloc_region != NULL) {
_young_list->add_survivor_region(alloc_region);
alloc_region->set_survivor();
}
++_gc_alloc_region_counts[purpose];
} else {
......@@ -2593,6 +2593,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
_young_list->print();
#endif // SCAN_ONLY_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(),
_young_list->last_survivor_region());
_young_list->reset_auxilary_lists();
}
} else {
......@@ -2619,7 +2622,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
#endif // SCAN_ONLY_VERBOSE
double end_time_sec = os::elapsedTime();
g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
if (!evacuation_failed()) {
g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
}
GCOverheadReporter::recordSTWEnd(end_time_sec);
g1_policy()->record_collection_pause_end(popular_region != NULL,
abandoned);
......@@ -2754,6 +2759,13 @@ void G1CollectedHeap::forget_alloc_region_list() {
_gc_alloc_region_list = r->next_gc_alloc_region();
r->set_next_gc_alloc_region(NULL);
r->set_is_gc_alloc_region(false);
if (r->is_survivor()) {
if (r->is_empty()) {
r->set_not_young();
} else {
_young_list->add_survivor_region(r);
}
}
if (r->is_empty()) {
++_free_regions;
}
......@@ -3150,6 +3162,20 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
return block;
}
void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
bool par) {
// Another thread might have obtained alloc_region for the given
// purpose, and might be attempting to allocate in it, and might
// succeed. Therefore, we can't do the "finalization" stuff on the
// region below until we're sure the last allocation has happened.
// We ensure this by allocating the remaining space with a garbage
// object.
if (par) par_allocate_remaining_space(alloc_region);
// Now we can do the post-GC stuff on the region.
alloc_region->note_end_of_copying();
g1_policy()->record_after_bytes(alloc_region->used());
}
HeapWord*
G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
......@@ -3167,16 +3193,7 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
// Otherwise, continue; this new region is empty, too.
}
assert(alloc_region != NULL, "We better have an allocation region");
// Another thread might have obtained alloc_region for the given
// purpose, and might be attempting to allocate in it, and might
// succeed. Therefore, we can't do the "finalization" stuff on the
// region below until we're sure the last allocation has happened.
// We ensure this by allocating the remaining space with a garbage
// object.
if (par) par_allocate_remaining_space(alloc_region);
// Now we can do the post-GC stuff on the region.
alloc_region->note_end_of_copying();
g1_policy()->record_after_bytes(alloc_region->used());
retire_alloc_region(alloc_region, par);
if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
// Cannot allocate more regions for the given purpose.
......@@ -3185,7 +3202,7 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
if (purpose != alt_purpose) {
HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
// Has not the alternative region been aliased?
if (alloc_region != alt_region) {
if (alloc_region != alt_region && alt_region != NULL) {
// Try to allocate in the alternative region.
if (par) {
block = alt_region->par_allocate(word_size);
......@@ -3194,9 +3211,10 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
}
// Make an alias.
_gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
}
if (block != NULL) {
return block;
if (block != NULL) {
return block;
}
retire_alloc_region(alt_region, par);
}
// Both the allocation region and the alternative one are full
// and aliased, replace them with a new allocation region.
......@@ -3497,6 +3515,7 @@ protected:
OverflowQueue* _overflowed_refs;
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
size_t _alloc_buffer_waste;
size_t _undo_waste;
......@@ -3538,6 +3557,7 @@ public:
_refs(g1h->task_queue(queue_num)),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
_age_table(false),
#if G1_DETAILED_STATS
_pushes(0), _pops(0), _steals(0),
_steal_attempts(0), _overflow_pushes(0),
......@@ -3572,8 +3592,9 @@ public:
RefToScanQueue* refs() { return _refs; }
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
ageTable* age_table() { return &_age_table; }
inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return &_alloc_buffers[purpose];
}
......@@ -3834,7 +3855,9 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
(!from_region->is_young() && young_index == 0), "invariant" );
G1CollectorPolicy* g1p = _g1->g1_policy();
markOop m = old->mark();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(),
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
: m->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
oop obj = oop(obj_ptr);
......@@ -3872,9 +3895,12 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
obj->incr_age();
} else {
m = m->incr_age();
obj->set_mark(m);
}
_par_scan_state->age_table()->add(obj, word_sz);
} else {
obj->set_mark(m);
}
obj->set_mark(m);
// preserve "next" mark bit
if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
......@@ -4129,6 +4155,9 @@ public:
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination_time(i, term_ms);
}
if (G1UseSurvivorSpace) {
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
}
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
// Clean up any par-expanded rem sets.
......@@ -4368,7 +4397,7 @@ void G1CollectedHeap::evacuate_collection_set() {
// Is this the right thing to do here? We don't save marks
// on individual heap regions when we allocate from
// them in parallel, so this seems like the correct place for this.
all_alloc_regions_note_end_of_copying();
retire_all_alloc_regions();
{
G1IsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
......@@ -5008,7 +5037,7 @@ bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
return no_allocs;
}
void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
void G1CollectedHeap::retire_all_alloc_regions() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
HeapRegion* r = _gc_alloc_regions[ap];
if (r != NULL) {
......@@ -5021,8 +5050,7 @@ void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
}
}
if (!has_processed_alias) {
r->note_end_of_copying();
g1_policy()->record_after_bytes(r->used());
retire_alloc_region(r, false /* par */);
}
}
}
......
......@@ -90,7 +90,7 @@ private:
HeapRegion* _curr_scan_only;
HeapRegion* _survivor_head;
HeapRegion* _survivors_tail;
HeapRegion* _survivor_tail;
size_t _survivor_length;
void empty_list(HeapRegion* list);
......@@ -105,6 +105,7 @@ public:
bool is_empty() { return _length == 0; }
size_t length() { return _length; }
size_t scan_only_length() { return _scan_only_length; }
size_t survivor_length() { return _survivor_length; }
void rs_length_sampling_init();
bool rs_length_sampling_more();
......@@ -120,6 +121,7 @@ public:
HeapRegion* first_region() { return _head; }
HeapRegion* first_scan_only_region() { return _scan_only_head; }
HeapRegion* first_survivor_region() { return _survivor_head; }
HeapRegion* last_survivor_region() { return _survivor_tail; }
HeapRegion* par_get_next_scan_only_region() {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
HeapRegion* ret = _curr_scan_only;
......@@ -219,7 +221,7 @@ private:
// The to-space memory regions into which objects are being copied during
// a GC.
HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
uint _gc_alloc_region_counts[GCAllocPurposeCount];
size_t _gc_alloc_region_counts[GCAllocPurposeCount];
// A list of the regions that have been set to be alloc regions in the
// current collection.
......@@ -281,8 +283,8 @@ protected:
// Returns "true" iff none of the gc alloc regions have any allocations
// since the last call to "save_marks".
bool all_alloc_regions_no_allocs_since_save_marks();
// Calls "note_end_of_copying on all gc alloc_regions.
void all_alloc_regions_note_end_of_copying();
// Perform finalization stuff on all allocation regions.
void retire_all_alloc_regions();
// The number of regions allocated to hold humongous objects.
int _num_humongous_regions;
......@@ -351,6 +353,10 @@ protected:
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
// Retires an allocation region when it is full or at the end of a
// GC pause.
void retire_alloc_region(HeapRegion* alloc_region, bool par);
// Helper function for two callbacks below.
// "full", if true, indicates that the GC is for a System.gc() request,
// and should collect the entire heap. If "clear_all_soft_refs" is true,
......
......@@ -196,8 +196,13 @@ G1CollectorPolicy::G1CollectorPolicy() :
_short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
G1YoungSurvRateNumRegionsSummary)),
_survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
G1YoungSurvRateNumRegionsSummary))
G1YoungSurvRateNumRegionsSummary)),
// add here any more surv rate groups
_recorded_survivor_regions(0),
_recorded_survivor_head(NULL),
_recorded_survivor_tail(NULL),
_survivors_age_table(true)
{
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
......@@ -272,6 +277,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
_concurrent_mark_cleanup_times_ms->add(0.20);
_tenuring_threshold = MaxTenuringThreshold;
if (G1UseSurvivorSpace) {
// if G1FixedSurvivorSpaceSize is 0 which means the size is not
// fixed, then _max_survivor_regions will be calculated at
// calculate_young_list_target_config diring initialization
_max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
} else {
_max_survivor_regions = 0;
}
initialize_all();
}
......@@ -301,6 +315,8 @@ void G1CollectorPolicy::init() {
"-XX:+UseConcMarkSweepGC.");
}
initialize_gc_policy_counters();
if (G1Gen) {
_in_young_gc_mode = true;
......@@ -322,6 +338,12 @@ void G1CollectorPolicy::init() {
}
}
// Create the jstat counters for the policy.
void G1CollectorPolicy::initialize_gc_policy_counters()
{
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
}
void G1CollectorPolicy::calculate_young_list_min_length() {
_young_list_min_length = 0;
......@@ -352,6 +374,7 @@ void G1CollectorPolicy::calculate_young_list_target_config() {
guarantee( so_length < _young_list_target_length, "invariant" );
_young_list_so_prefix_length = so_length;
}
calculate_survivors_policy();
}
// This method calculate the optimal scan-only set for a fixed young
......@@ -448,6 +471,9 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
// we are in fully-young mode and there are free regions in the heap
double survivor_regions_evac_time =
predict_survivor_regions_evac_time();
size_t min_so_length = 0;
size_t max_so_length = 0;
......@@ -497,9 +523,8 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
scanned_cards = predict_non_young_card_num(adj_rs_lengths);
// calculate this once, so that we don't have to recalculate it in
// the innermost loop
double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
scanned_cards);
double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
+ survivor_regions_evac_time;
// the result
size_t final_young_length = 0;
size_t final_so_length = 0;
......@@ -548,14 +573,14 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
bool done = false;
// this is the outermost loop
while (!done) {
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
", incr " SIZE_FORMAT ", pass %s",
from_so_length, to_so_length, so_length_incr,
(pass == pass_type_coarse) ? "coarse" :
(pass == pass_type_fine) ? "fine" : "final");
#endif // 0
#endif // TRACE_CALC_YOUNG_CONFIG
size_t so_length = from_so_length;
size_t init_free_regions =
......@@ -651,11 +676,11 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
guarantee( so_length_incr == so_coarse_increments, "invariant" );
guarantee( final_so_length >= min_so_length, "invariant" );
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr(" coarse pass: SO length " SIZE_FORMAT,
final_so_length);
#endif // 0
#endif // TRACE_CALC_YOUNG_CONFIG
from_so_length =
(final_so_length - min_so_length > so_coarse_increments) ?
......@@ -687,12 +712,12 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
// of the optimal
size_t new_so_length = 950 * final_so_length / 1000;
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr(" fine pass: SO length " SIZE_FORMAT
", setting it to " SIZE_FORMAT,
final_so_length, new_so_length);
#endif // 0
#endif // TRACE_CALC_YOUNG_CONFIG
from_so_length = new_so_length;
to_so_length = new_so_length;
......@@ -719,7 +744,8 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
}
// we should have at least one region in the target young length
_young_list_target_length = MAX2((size_t) 1, final_young_length);
_young_list_target_length =
MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
if (final_so_length >= final_young_length)
// and we need to ensure that the S-O length is not greater than
// the target young length (this is being a bit careful)
......@@ -734,7 +760,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
", SO = " SIZE_FORMAT ", "
......@@ -747,9 +773,9 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
calculations,
full_young_gcs() ? "full" : "partial",
should_initiate_conc_mark() ? " i-m" : "",
in_marking_window(),
in_marking_window_im());
#endif // 0
_in_marking_window,
_in_marking_window_im);
#endif // TRACE_CALC_YOUNG_CONFIG
if (_young_list_target_length < _young_list_min_length) {
// bummer; this means that, if we do a pause when the optimal
......@@ -768,14 +794,14 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
// S-O length
so_length = calculate_optimal_so_length(_young_list_min_length);
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr("adjusted target length from "
SIZE_FORMAT " to " SIZE_FORMAT
", SO " SIZE_FORMAT,
_young_list_target_length, _young_list_min_length,
so_length);
#endif // 0
#endif // TRACE_CALC_YOUNG_CONFIG
_young_list_target_length =
MAX2(_young_list_min_length, (size_t)1);
......@@ -785,12 +811,12 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
// we are in a partially-young mode or we've run out of regions (due
// to evacuation failure)
#if 0
#ifdef TRACE_CALC_YOUNG_CONFIG
// leave this in for debugging, just in case
gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
", SO " SIZE_FORMAT,
_young_list_min_length, 0);
#endif // 0
#endif // TRACE_CALC_YOUNG_CONFIG
// we'll do the pause as soon as possible and with no S-O prefix
// (see above for the reasons behind the latter)
......@@ -884,6 +910,16 @@ G1CollectorPolicy::predict_gc_eff(size_t young_length,
return true;
}
double G1CollectorPolicy::predict_survivor_regions_evac_time() {
double survivor_regions_evac_time = 0.0;
for (HeapRegion * r = _recorded_survivor_head;
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
}
return survivor_regions_evac_time;
}
void G1CollectorPolicy::check_prediction_validity() {
guarantee( adaptive_young_list_length(), "should not call this otherwise" );
......@@ -995,11 +1031,15 @@ void G1CollectorPolicy::record_full_collection_end() {
_short_lived_surv_rate_group->start_adding_regions();
// also call this on any additional surv rate groups
record_survivor_regions(0, NULL, NULL);
_prev_region_num_young = _region_num_young;
_prev_region_num_tenured = _region_num_tenured;
_free_regions_at_end_of_collection = _g1->free_regions();
_scan_only_regions_at_end_of_collection = 0;
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
calculate_young_list_min_length();
calculate_young_list_target_config();
}
......@@ -1104,6 +1144,10 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
tag_scan_only(short_lived_so_length);
if (G1UseSurvivorSpace) {
_survivors_age_table.clear();
}
assert( verify_young_ages(), "region age verification" );
}
......@@ -1965,9 +2009,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
// </NEW PREDICTION>
_target_pause_time_ms = -1.0;
// TODO: calculate tenuring threshold
_tenuring_threshold = MaxTenuringThreshold;
}
// <NEW PREDICTION>
......@@ -2058,7 +2099,7 @@ G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
"invariant" );
int age = hr->age_in_surv_rate_group();
double yg_surv_rate = predict_yg_surv_rate(age);
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
}
......@@ -2091,7 +2132,7 @@ G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
}
#if PREDICTIONS_VERBOSE
if (young) {
_recorded_young_bytes += hr->asSpace()->used();
_recorded_young_bytes += hr->used();
} else {
_recorded_marked_bytes += hr->max_live_bytes();
}
......@@ -2119,11 +2160,6 @@ G1CollectorPolicy::end_recording_regions() {
predict_non_young_card_num(_predicted_rs_lengths);
_recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
_predicted_young_survival_ratio = 0.0;
for (int i = 0; i < _recorded_young_regions; ++i)
_predicted_young_survival_ratio += predict_yg_surv_rate(i);
_predicted_young_survival_ratio /= (double) _recorded_young_regions;
_predicted_scan_only_scan_time_ms =
predict_scan_only_time_ms(_recorded_scan_only_regions);
_predicted_rs_update_time_ms =
......@@ -2673,8 +2709,11 @@ G1CollectorPolicy::should_add_next_region_to_young_list() {
assert(in_young_gc_mode(), "should be in young GC mode");
bool ret;
size_t young_list_length = _g1->young_list_length();
if (young_list_length < _young_list_target_length) {
size_t young_list_max_length = _young_list_target_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
if (young_list_length < young_list_max_length) {
ret = true;
++_region_num_young;
} else {
......@@ -2710,17 +2749,39 @@ G1CollectorPolicy::checkpoint_conc_overhead() {
}
uint G1CollectorPolicy::max_regions(int purpose) {
size_t G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:
return G1MaxSurvivorRegions;
return _max_survivor_regions;
case GCAllocForTenured:
return UINT_MAX;
return REGIONS_UNLIMITED;
default:
return UINT_MAX;
ShouldNotReachHere();
return REGIONS_UNLIMITED;
};
}
// Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy()
{
if (!G1UseSurvivorSpace) {
return;
}
if (G1FixedSurvivorSpaceSize == 0) {
_max_survivor_regions = _young_list_target_length / SurvivorRatio;
} else {
_max_survivor_regions = G1FixedSurvivorSpaceSize;
}
if (G1FixedTenuringThreshold) {
_tenuring_threshold = MaxTenuringThreshold;
} else {
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
HeapRegion::GrainWords * _max_survivor_regions);
}
}
void
G1CollectorPolicy_BestRegionsFirst::
set_single_region_collection_set(HeapRegion* hr) {
......@@ -2743,7 +2804,11 @@ G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
size_t young_list_length = _g1->young_list_length();
bool reached_target_length = young_list_length >= _young_list_target_length;
size_t young_list_max_length = _young_list_target_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
bool reached_target_length = young_list_length >= young_list_max_length;
if (in_young_gc_mode()) {
if (reached_target_length) {
......
......@@ -557,6 +557,8 @@ public:
return get_new_neg_prediction(_young_gc_eff_seq);
}
double predict_survivor_regions_evac_time();
// </NEW PREDICTION>
public:
......@@ -599,8 +601,8 @@ public:
// Returns an estimate of the survival rate of the region at yg-age
// "yg_age".
double predict_yg_surv_rate(int age) {
TruncatedSeq* seq = _short_lived_surv_rate_group->get_seq(age);
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
TruncatedSeq* seq = surv_rate_group->get_seq(age);
if (seq->num() == 0)
gclog_or_tty->print("BARF! age is %d", age);
guarantee( seq->num() > 0, "invariant" );
......@@ -610,6 +612,10 @@ public:
return pred;
}
double predict_yg_surv_rate(int age) {
return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
}
double accum_yg_surv_rate_pred(int age) {
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
}
......@@ -822,6 +828,9 @@ public:
virtual void init();
// Create jstat counters for the policy.
virtual void initialize_gc_policy_counters();
virtual HeapWord* mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
......@@ -1047,8 +1056,12 @@ public:
// Print stats on young survival ratio
void print_yg_surv_rate_info() const;
void finished_recalculating_age_indexes() {
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
void finished_recalculating_age_indexes(bool is_survivors) {
if (is_survivors) {
_survivor_surv_rate_group->finished_recalculating_age_indexes();
} else {
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
}
// do that for any other surv rate groups
}
......@@ -1097,6 +1110,17 @@ protected:
// maximum amount of suvivors regions.
int _tenuring_threshold;
// The limit on the number of regions allocated for survivors.
size_t _max_survivor_regions;
// The amount of survor regions after a collection.
size_t _recorded_survivor_regions;
// List of survivor regions.
HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail;
ageTable _survivors_age_table;
public:
inline GCAllocPurpose
......@@ -1116,7 +1140,9 @@ public:
return GCAllocForTenured;
}
uint max_regions(int purpose);
static const size_t REGIONS_UNLIMITED = ~(size_t)0;
size_t max_regions(int purpose);
// The limit on regions for a particular purpose is reached.
void note_alloc_region_limit_reached(int purpose) {
......@@ -1132,6 +1158,23 @@ public:
void note_stop_adding_survivor_regions() {
_survivor_surv_rate_group->stop_adding_regions();
}
void record_survivor_regions(size_t regions,
HeapRegion* head,
HeapRegion* tail) {
_recorded_survivor_regions = regions;
_recorded_survivor_head = head;
_recorded_survivor_tail = tail;
}
void record_thread_age_table(ageTable* age_table)
{
_survivors_age_table.merge_par(age_table);
}
// Calculates survivor space parameters.
void calculate_survivors_policy();
};
// This encapsulates a particular strategy for a g1 Collector.
......
......@@ -572,6 +572,9 @@ prepare_for_oops_into_collection_set_do() {
}
guarantee( _cards_scanned == NULL, "invariant" );
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
for (uint i = 0; i < n_workers(); ++i) {
_cards_scanned[i] = 0;
}
_total_cards_scanned = 0;
}
......
......@@ -281,7 +281,17 @@
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
"Forces flushing of log buffers before verification.") \
\
product(intx, G1MaxSurvivorRegions, 0, \
"The maximum number of survivor regions")
product(bool, G1UseSurvivorSpace, true, \
"When true, use survivor space.") \
\
product(bool, G1FixedTenuringThreshold, false, \
"When set, G1 will not adjust the tenuring threshold") \
\
product(bool, G1FixedEdenSize, false, \
"When set, G1 will not allocate unused survivor space regions") \
\
product(uintx, G1FixedSurvivorSpaceSize, 0, \
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
......@@ -566,7 +566,11 @@ class HeapRegion: public G1OffsetTableContigSpace {
void note_end_of_copying() {
assert(top() >= _next_top_at_mark_start,
"Increase only");
_next_top_at_mark_start = top();
// Survivor regions will be scanned on the start of concurrent
// marking.
if (!is_survivor()) {
_next_top_at_mark_start = top();
}
}
// Returns "false" iff no object in the region was allocated when the
......
......@@ -65,9 +65,11 @@ protected:
// We need access in order to union things into the base table.
BitMap* bm() { return &_bm; }
#if PRT_COUNT_OCCUPIED
void recount_occupied() {
_occupied = (jint) bm()->count_one_bits();
}
#endif
PerRegionTable(HeapRegion* hr) :
_hr(hr),
......@@ -1144,7 +1146,9 @@ void HeapRegionRemSet::clear_outgoing_entries() {
size_t i = _outgoing_region_map.get_next_one_offset(0);
while (i < _outgoing_region_map.size()) {
HeapRegion* to_region = g1h->region_at(i);
to_region->rem_set()->clear_incoming_entry(hr());
if (!to_region->in_collection_set()) {
to_region->rem_set()->clear_incoming_entry(hr());
}
i = _outgoing_region_map.get_next_one_offset(i+1);
}
}
......
......@@ -29,23 +29,14 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
const char* name,
size_t summary_surv_rates_len) :
_g1p(g1p), _name(name),
_all_regions_allocated(0),
_curr_length(0), _scan_only_prefix(0), _setup_seq_num(0),
_array_length(0), _surv_rate(NULL), _accum_surv_rate_pred(NULL),
_accum_surv_rate(0.0), _surv_rate_pred(NULL), _last_pred(0.0),
_summary_surv_rates_len(summary_surv_rates_len),
_summary_surv_rates_max_len(0),
_summary_surv_rates(NULL) {
// the following will set up the arrays with length 1
_curr_length = 1;
stop_adding_regions();
guarantee( _array_length == 1, "invariant" );
guarantee( _surv_rate_pred[0] != NULL, "invariant" );
_surv_rate_pred[0]->add(0.4);
all_surviving_words_recorded(false);
_curr_length = 0;
_summary_surv_rates(NULL),
_surv_rate(NULL),
_accum_surv_rate_pred(NULL),
_surv_rate_pred(NULL)
{
reset();
if (summary_surv_rates_len > 0) {
size_t length = summary_surv_rates_len;
_summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
......@@ -60,61 +51,80 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
start_adding_regions();
}
void SurvRateGroup::reset()
{
_all_regions_allocated = 0;
_scan_only_prefix = 0;
_setup_seq_num = 0;
_stats_arrays_length = 0;
_accum_surv_rate = 0.0;
_last_pred = 0.0;
// the following will set up the arrays with length 1
_region_num = 1;
stop_adding_regions();
guarantee( _stats_arrays_length == 1, "invariant" );
guarantee( _surv_rate_pred[0] != NULL, "invariant" );
_surv_rate_pred[0]->add(0.4);
all_surviving_words_recorded(false);
_region_num = 0;
}
void
SurvRateGroup::start_adding_regions() {
_setup_seq_num = _array_length;
_curr_length = _scan_only_prefix;
_setup_seq_num = _stats_arrays_length;
_region_num = _scan_only_prefix;
_accum_surv_rate = 0.0;
#if 0
gclog_or_tty->print_cr("start adding regions, seq num %d, length %d",
_setup_seq_num, _curr_length);
gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d",
_name, _setup_seq_num, _region_num);
#endif // 0
}
void
SurvRateGroup::stop_adding_regions() {
size_t length = _curr_length;
#if 0
gclog_or_tty->print_cr("stop adding regions, length %d", length);
gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num);
#endif // 0
if (length > _array_length) {
if (_region_num > _stats_arrays_length) {
double* old_surv_rate = _surv_rate;
double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
_surv_rate = NEW_C_HEAP_ARRAY(double, length);
_surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
if (_surv_rate == NULL) {
vm_exit_out_of_memory(sizeof(double) * length,
vm_exit_out_of_memory(sizeof(double) * _region_num,
"Not enough space for surv rate array.");
}
_accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, length);
_accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
if (_accum_surv_rate_pred == NULL) {
vm_exit_out_of_memory(sizeof(double) * length,
vm_exit_out_of_memory(sizeof(double) * _region_num,
"Not enough space for accum surv rate pred array.");
}
_surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, length);
_surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
if (_surv_rate == NULL) {
vm_exit_out_of_memory(sizeof(TruncatedSeq*) * length,
vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num,
"Not enough space for surv rate pred array.");
}
for (size_t i = 0; i < _array_length; ++i)
for (size_t i = 0; i < _stats_arrays_length; ++i)
_surv_rate_pred[i] = old_surv_rate_pred[i];
#if 0
gclog_or_tty->print_cr("stop adding regions, new seqs %d to %d",
_array_length, length - 1);
gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d",
_name, _array_length, _region_num - 1);
#endif // 0
for (size_t i = _array_length; i < length; ++i) {
for (size_t i = _stats_arrays_length; i < _region_num; ++i) {
_surv_rate_pred[i] = new TruncatedSeq(10);
// _surv_rate_pred[i]->add(last_pred);
}
_array_length = length;
_stats_arrays_length = _region_num;
if (old_surv_rate != NULL)
FREE_C_HEAP_ARRAY(double, old_surv_rate);
......@@ -124,7 +134,7 @@ SurvRateGroup::stop_adding_regions() {
FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred);
}
for (size_t i = 0; i < _array_length; ++i)
for (size_t i = 0; i < _stats_arrays_length; ++i)
_surv_rate[i] = 0.0;
}
......@@ -135,7 +145,7 @@ SurvRateGroup::accum_surv_rate(size_t adjustment) {
double ret = _accum_surv_rate;
if (adjustment > 0) {
TruncatedSeq* seq = get_seq(_curr_length+1);
TruncatedSeq* seq = get_seq(_region_num+1);
double surv_rate = _g1p->get_new_prediction(seq);
ret += surv_rate;
}
......@@ -145,23 +155,23 @@ SurvRateGroup::accum_surv_rate(size_t adjustment) {
int
SurvRateGroup::next_age_index() {
TruncatedSeq* seq = get_seq(_curr_length);
TruncatedSeq* seq = get_seq(_region_num);
double surv_rate = _g1p->get_new_prediction(seq);
_accum_surv_rate += surv_rate;
++_curr_length;
++_region_num;
return (int) ++_all_regions_allocated;
}
void
SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
guarantee( scan_only_prefix <= _curr_length, "pre-condition" );
guarantee( scan_only_prefix <= _region_num, "pre-condition" );
_scan_only_prefix = scan_only_prefix;
}
void
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
guarantee( 0 <= age_in_group && (size_t) age_in_group < _curr_length,
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
"pre-condition" );
guarantee( _surv_rate[age_in_group] <= 0.00001,
"should only update each slot once" );
......@@ -178,15 +188,15 @@ SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
void
SurvRateGroup::all_surviving_words_recorded(bool propagate) {
if (propagate && _curr_length > 0) { // conservative
double surv_rate = _surv_rate_pred[_curr_length-1]->last();
if (propagate && _region_num > 0) { // conservative
double surv_rate = _surv_rate_pred[_region_num-1]->last();
#if 0
gclog_or_tty->print_cr("propagating %1.2lf from %d to %d",
surv_rate, _curr_length, _array_length - 1);
#endif // 0
for (size_t i = _curr_length; i < _array_length; ++i) {
for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
guarantee( _surv_rate[i] <= 0.00001,
"the slot should not have been updated" );
_surv_rate_pred[i]->add(surv_rate);
......@@ -195,7 +205,7 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
double accum = 0.0;
double pred = 0.0;
for (size_t i = 0; i < _array_length; ++i) {
for (size_t i = 0; i < _stats_arrays_length; ++i) {
pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
if (pred > 1.0) pred = 1.0;
accum += pred;
......@@ -209,8 +219,8 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
void
SurvRateGroup::print() {
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
_name, _curr_length, _scan_only_prefix);
for (size_t i = 0; i < _curr_length; ++i) {
_name, _region_num, _scan_only_prefix);
for (size_t i = 0; i < _region_num; ++i) {
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s",
i, _surv_rate[i] * 100.0,
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
......
......@@ -29,7 +29,7 @@ private:
G1CollectorPolicy* _g1p;
const char* _name;
size_t _array_length;
size_t _stats_arrays_length;
double* _surv_rate;
double* _accum_surv_rate_pred;
double _last_pred;
......@@ -40,7 +40,7 @@ private:
size_t _summary_surv_rates_max_len;
int _all_regions_allocated;
size_t _curr_length;
size_t _region_num;
size_t _scan_only_prefix;
size_t _setup_seq_num;
......@@ -48,6 +48,7 @@ public:
SurvRateGroup(G1CollectorPolicy* g1p,
const char* name,
size_t summary_surv_rates_len);
void reset();
void start_adding_regions();
void stop_adding_regions();
void record_scan_only_prefix(size_t scan_only_prefix);
......@@ -55,22 +56,21 @@ public:
void all_surviving_words_recorded(bool propagate);
const char* name() { return _name; }
size_t region_num() { return _curr_length; }
size_t region_num() { return _region_num; }
size_t scan_only_length() { return _scan_only_prefix; }
double accum_surv_rate_pred(int age) {
assert(age >= 0, "must be");
if ((size_t)age < _array_length)
if ((size_t)age < _stats_arrays_length)
return _accum_surv_rate_pred[age];
else {
double diff = (double) (age - _array_length + 1);
return _accum_surv_rate_pred[_array_length-1] + diff * _last_pred;
double diff = (double) (age - _stats_arrays_length + 1);
return _accum_surv_rate_pred[_stats_arrays_length-1] + diff * _last_pred;
}
}
double accum_surv_rate(size_t adjustment);
TruncatedSeq* get_seq(size_t age) {
guarantee( 0 <= age, "pre-condition" );
if (age >= _setup_seq_num) {
guarantee( _setup_seq_num > 0, "invariant" );
age = _setup_seq_num-1;
......
......@@ -172,6 +172,7 @@ g1CollectorPolicy.cpp g1CollectedHeap.inline.hpp
g1CollectorPolicy.cpp g1CollectorPolicy.hpp
g1CollectorPolicy.cpp heapRegionRemSet.hpp
g1CollectorPolicy.cpp mutexLocker.hpp
g1CollectorPolicy.cpp gcPolicyCounters.hpp
g1CollectorPolicy.hpp collectorPolicy.hpp
g1CollectorPolicy.hpp collectionSetChooser.hpp
......@@ -272,6 +273,7 @@ heapRegion.hpp g1BlockOffsetTable.inline.hpp
heapRegion.hpp watermark.hpp
heapRegion.hpp g1_specialized_oop_closures.hpp
heapRegion.hpp survRateGroup.hpp
heapRegion.hpp ageTable.hpp
heapRegionRemSet.hpp sparsePRT.hpp
......
......@@ -67,6 +67,12 @@ void ageTable::merge(ageTable* subTable) {
}
}
void ageTable::merge_par(ageTable* subTable) {
for (int i = 0; i < table_size; i++) {
Atomic::add_ptr(subTable->sizes[i], &sizes[i]);
}
}
int ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
size_t total = 0;
......
......@@ -56,6 +56,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
// Merge another age table with the current one. Used
// for parallel young generation gc.
void merge(ageTable* subTable);
void merge_par(ageTable* subTable);
// calculate new tenuring threshold based on age information
int compute_tenuring_threshold(size_t survivor_capacity);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册