提交 8c6a3df6 编写于 作者: B brutisso

Merge

...@@ -106,10 +106,10 @@ void VM_Version::initialize() { ...@@ -106,10 +106,10 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4); FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
} }
// When using CMS, we cannot use memset() in BOT updates because // When using CMS or G1, we cannot use memset() in BOT updates
// the sun4v/CMT version in libc_psr uses BIS which exposes // because the sun4v/CMT version in libc_psr uses BIS which
// "phantom zeros" to concurrent readers. See 6948537. // exposes "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) { if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
FLAG_SET_DEFAULT(UseMemSetInBOT, false); FLAG_SET_DEFAULT(UseMemSetInBOT, false);
} }
#ifdef _LP64 #ifdef _LP64
......
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -159,14 +159,30 @@ private: ...@@ -159,14 +159,30 @@ private:
"right address out of range"); "right address out of range");
assert(left < right, "Heap addresses out of order"); assert(left < right, "Heap addresses out of order");
size_t num_cards = pointer_delta(right, left) >> LogN_words; size_t num_cards = pointer_delta(right, left) >> LogN_words;
memset(&_offset_array[index_for(left)], offset, num_cards); if (UseMemSetInBOT) {
memset(&_offset_array[index_for(left)], offset, num_cards);
} else {
size_t i = index_for(left);
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
} }
void set_offset_array(size_t left, size_t right, u_char offset) { void set_offset_array(size_t left, size_t right, u_char offset) {
assert(right < _vs.committed_size(), "right address out of range"); assert(right < _vs.committed_size(), "right address out of range");
assert(left <= right, "indexes out of order"); assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1; size_t num_cards = right - left + 1;
memset(&_offset_array[left], offset, num_cards); if (UseMemSetInBOT) {
memset(&_offset_array[left], offset, num_cards);
} else {
size_t i = left;
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
} }
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
......
...@@ -1934,6 +1934,14 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : ...@@ -1934,6 +1934,14 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
clear_cset_start_regions(); clear_cset_start_regions();
guarantee(_task_queues != NULL, "task_queues allocation failure."); guarantee(_task_queues != NULL, "task_queues allocation failure.");
#ifdef SPARC
// Issue a stern warning, but allow use for experimentation and debugging.
if (VM_Version::is_sun4v() && UseMemSetInBOT) {
assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
" on sun4v; please understand that you are using at your own risk!");
}
#endif
} }
jint G1CollectedHeap::initialize() { jint G1CollectedHeap::initialize() {
...@@ -3582,15 +3590,11 @@ size_t G1CollectedHeap::pending_card_num() { ...@@ -3582,15 +3590,11 @@ size_t G1CollectedHeap::pending_card_num() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size(); size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num(); size_t buffer_num = dcqs.completed_buffers_num();
return buffer_size * buffer_num + extra_cards;
}
size_t G1CollectedHeap::max_pending_card_num() { // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); // in bytes - not the number of 'entries'. We need to convert
size_t buffer_size = dcqs.buffer_size(); // into a number of cards.
size_t buffer_num = dcqs.completed_buffers_num(); return (buffer_size * buffer_num + extra_cards) / oopSize;
int thread_num = Threads::number_of_threads();
return (buffer_num + thread_num) * buffer_size;
} }
size_t G1CollectedHeap::cards_scanned() { size_t G1CollectedHeap::cards_scanned() {
......
...@@ -1706,7 +1706,6 @@ public: ...@@ -1706,7 +1706,6 @@ public:
void stop_conc_gc_threads(); void stop_conc_gc_threads();
size_t pending_card_num(); size_t pending_card_num();
size_t max_pending_card_num();
size_t cards_scanned(); size_t cards_scanned();
protected: protected:
......
...@@ -90,7 +90,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -90,7 +90,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0), _prev_collection_pause_end_ms(0.0),
_pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
...@@ -197,7 +196,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -197,7 +196,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
int index = MIN2(_parallel_gc_threads - 1, 7); int index = MIN2(_parallel_gc_threads - 1, 7);
_pending_card_diff_seq->add(0.0);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]); _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
_young_cards_per_entry_ratio_seq->add( _young_cards_per_entry_ratio_seq->add(
...@@ -657,7 +655,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() { ...@@ -657,7 +655,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() {
for (HeapRegion * r = _recorded_survivor_head; for (HeapRegion * r = _recorded_survivor_head;
r != NULL && r != _recorded_survivor_tail->get_next_young_region(); r != NULL && r != _recorded_survivor_tail->get_next_young_region();
r = r->get_next_young_region()) { r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true); survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
} }
return survivor_regions_evac_time; return survivor_regions_evac_time;
} }
...@@ -801,9 +799,8 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -801,9 +799,8 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_cur_collection_pause_used_at_start_bytes = start_used; _cur_collection_pause_used_at_start_bytes = start_used;
_cur_collection_pause_used_regions_at_start = _g1->used_regions(); _cur_collection_pause_used_regions_at_start = _g1->used_regions();
_pending_cards = _g1->pending_card_num(); _pending_cards = _g1->pending_card_num();
_max_pending_cards = _g1->max_pending_card_num();
_bytes_in_collection_set_before_gc = 0; _collection_set_bytes_used_before = 0;
_bytes_copied_during_gc = 0; _bytes_copied_during_gc = 0;
YoungList* young_list = _g1->young_list(); YoungList* young_list = _g1->young_list();
...@@ -1036,12 +1033,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { ...@@ -1036,12 +1033,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
// do that for any other surv rate groupsx // do that for any other surv rate groupsx
if (update_stats) { if (update_stats) {
size_t diff = 0;
if (_max_pending_cards >= _pending_cards) {
diff = _max_pending_cards - _pending_cards;
}
_pending_card_diff_seq->add((double) diff);
double cost_per_card_ms = 0.0; double cost_per_card_ms = 0.0;
if (_pending_cards > 0) { if (_pending_cards > 0) {
cost_per_card_ms = phase_times()->_update_rs_time / (double) _pending_cards; cost_per_card_ms = phase_times()->_update_rs_time / (double) _pending_cards;
...@@ -1126,9 +1117,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { ...@@ -1126,9 +1117,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_constant_other_time_ms_seq->add(constant_other_time_ms); _constant_other_time_ms_seq->add(constant_other_time_ms);
double survival_ratio = 0.0; double survival_ratio = 0.0;
if (_bytes_in_collection_set_before_gc > 0) { if (_collection_set_bytes_used_before > 0) {
survival_ratio = (double) _bytes_copied_during_gc / survival_ratio = (double) _bytes_copied_during_gc /
(double) _bytes_in_collection_set_before_gc; (double) _collection_set_bytes_used_before;
} }
_pending_cards_seq->add((double) _pending_cards); _pending_cards_seq->add((double) _pending_cards);
...@@ -1228,6 +1219,15 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, ...@@ -1228,6 +1219,15 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
dcqs.notify_if_necessary(); dcqs.notify_if_necessary();
} }
double
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
size_t scanned_cards) {
return
predict_rs_update_time_ms(pending_cards) +
predict_rs_scan_time_ms(scanned_cards) +
predict_constant_other_time_ms();
}
double double
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
size_t rs_length = predict_rs_length_diff(); size_t rs_length = predict_rs_length_diff();
...@@ -1240,21 +1240,28 @@ G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { ...@@ -1240,21 +1240,28 @@ G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
return predict_base_elapsed_time_ms(pending_cards, card_num); return predict_base_elapsed_time_ms(pending_cards, card_num);
} }
double size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, size_t bytes_to_copy;
size_t scanned_cards) { if (hr->is_marked())
return bytes_to_copy = hr->max_live_bytes();
predict_rs_update_time_ms(pending_cards) + else {
predict_rs_scan_time_ms(scanned_cards) + assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
predict_constant_other_time_ms(); int age = hr->age_in_surv_rate_group();
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
}
return bytes_to_copy;
} }
double double
G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
bool young) { bool for_young_gc) {
size_t rs_length = hr->rem_set()->occupied(); size_t rs_length = hr->rem_set()->occupied();
size_t card_num; size_t card_num;
if (gcs_are_young()) {
// Predicting the number of cards is based on which type of GC
// we're predicting for.
if (for_young_gc) {
card_num = predict_young_card_num(rs_length); card_num = predict_young_card_num(rs_length);
} else { } else {
card_num = predict_non_young_card_num(rs_length); card_num = predict_non_young_card_num(rs_length);
...@@ -1265,25 +1272,14 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, ...@@ -1265,25 +1272,14 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
predict_rs_scan_time_ms(card_num) + predict_rs_scan_time_ms(card_num) +
predict_object_copy_time_ms(bytes_to_copy); predict_object_copy_time_ms(bytes_to_copy);
if (young) // The prediction of the "other" time for this region is based
// upon the region type and NOT the GC type.
if (hr->is_young()) {
region_elapsed_time_ms += predict_young_other_time_ms(1); region_elapsed_time_ms += predict_young_other_time_ms(1);
else } else {
region_elapsed_time_ms += predict_non_young_other_time_ms(1); region_elapsed_time_ms += predict_non_young_other_time_ms(1);
return region_elapsed_time_ms;
}
size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
size_t bytes_to_copy;
if (hr->is_marked())
bytes_to_copy = hr->max_live_bytes();
else {
assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
int age = hr->age_in_surv_rate_group();
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
} }
return bytes_to_copy; return region_elapsed_time_ms;
} }
void void
...@@ -1342,22 +1338,6 @@ size_t G1CollectorPolicy::expansion_amount() { ...@@ -1342,22 +1338,6 @@ size_t G1CollectorPolicy::expansion_amount() {
} }
} }
class CountCSClosure: public HeapRegionClosure {
G1CollectorPolicy* _g1_policy;
public:
CountCSClosure(G1CollectorPolicy* g1_policy) :
_g1_policy(g1_policy) {}
bool doHeapRegion(HeapRegion* r) {
_g1_policy->_bytes_in_collection_set_before_gc += r->used();
return false;
}
};
void G1CollectorPolicy::count_CS_bytes_used() {
CountCSClosure cs_closure(this);
_g1->collection_set_iterate(&cs_closure);
}
void G1CollectorPolicy::print_tracing_info() const { void G1CollectorPolicy::print_tracing_info() const {
_trace_gen0_time_data.print(); _trace_gen0_time_data.print();
_trace_gen1_time_data.print(); _trace_gen1_time_data.print();
...@@ -1696,7 +1676,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l ...@@ -1696,7 +1676,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
// retiring the current allocation region) or a concurrent // retiring the current allocation region) or a concurrent
// refine thread (RSet sampling). // refine thread (RSet sampling).
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
size_t used_bytes = hr->used(); size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length; _inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
...@@ -1731,7 +1711,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, ...@@ -1731,7 +1711,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
...@@ -1854,8 +1834,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, ...@@ -1854,8 +1834,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
} }
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
// Set this here - in case we're not doing young collections. double young_start_time_sec = os::elapsedTime();
double non_young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list(); YoungList* young_list = _g1->young_list();
finalize_incremental_cset_building(); finalize_incremental_cset_building();
...@@ -1869,17 +1848,14 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -1869,17 +1848,14 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double predicted_pause_time_ms = base_time_ms; double predicted_pause_time_ms = base_time_ms;
double time_remaining_ms = target_pause_time_ms - base_time_ms; double time_remaining_ms = target_pause_time_ms - base_time_ms;
ergo_verbose3(ErgoCSetConstruction | ErgoHigh, ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
"start choosing CSet", "start choosing CSet",
ergo_format_size("_pending_cards")
ergo_format_ms("predicted base time") ergo_format_ms("predicted base time")
ergo_format_ms("remaining time") ergo_format_ms("remaining time")
ergo_format_ms("target pause time"), ergo_format_ms("target pause time"),
base_time_ms, time_remaining_ms, target_pause_time_ms); _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
HeapRegion* hr;
double young_start_time_sec = os::elapsedTime();
_collection_set_bytes_used_before = 0;
_last_gc_was_young = gcs_are_young() ? true : false; _last_gc_was_young = gcs_are_young() ? true : false;
if (_last_gc_was_young) { if (_last_gc_was_young) {
...@@ -1895,7 +1871,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -1895,7 +1871,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint survivor_region_length = young_list->survivor_length(); uint survivor_region_length = young_list->survivor_length();
uint eden_region_length = young_list->length() - survivor_region_length; uint eden_region_length = young_list->length() - survivor_region_length;
init_cset_region_lengths(eden_region_length, survivor_region_length); init_cset_region_lengths(eden_region_length, survivor_region_length);
hr = young_list->first_survivor_region();
HeapRegion* hr = young_list->first_survivor_region();
while (hr != NULL) { while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list"); assert(hr->is_survivor(), "badly formed young list");
hr->set_young(); hr->set_young();
...@@ -1926,8 +1903,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -1926,8 +1903,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
phase_times()->_recorded_young_cset_choice_time_ms = phase_times()->_recorded_young_cset_choice_time_ms =
(young_end_time_sec - young_start_time_sec) * 1000.0; (young_end_time_sec - young_start_time_sec) * 1000.0;
// We are doing young collections so reset this. // Set the start of the non-young choice time.
non_young_start_time_sec = young_end_time_sec; double non_young_start_time_sec = young_end_time_sec;
if (!gcs_are_young()) { if (!gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser; CollectionSetChooser* cset_chooser = _collectionSetChooser;
...@@ -1937,6 +1914,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -1937,6 +1914,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint expensive_region_num = 0; uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length(); bool check_time_remaining = adaptive_young_list_length();
HeapRegion* hr = cset_chooser->peek(); HeapRegion* hr = cset_chooser->peek();
while (hr != NULL) { while (hr != NULL) {
if (old_cset_region_length() >= max_old_cset_length) { if (old_cset_region_length() >= max_old_cset_length) {
...@@ -1950,7 +1928,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -1950,7 +1928,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
break; break;
} }
double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
if (check_time_remaining) { if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) { if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet. // Too expensive for the current CSet.
...@@ -2025,8 +2003,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { ...@@ -2025,8 +2003,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
stop_incremental_cset_building(); stop_incremental_cset_building();
count_CS_bytes_used();
ergo_verbose5(ErgoCSetConstruction, ergo_verbose5(ErgoCSetConstruction,
"finish choosing CSet", "finish choosing CSet",
ergo_format_region("eden") ergo_format_region("eden")
......
...@@ -228,7 +228,6 @@ private: ...@@ -228,7 +228,6 @@ private:
TruncatedSeq* _alloc_rate_ms_seq; TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms; double _prev_collection_pause_end_ms;
TruncatedSeq* _pending_card_diff_seq;
TruncatedSeq* _rs_length_diff_seq; TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq; TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq; TruncatedSeq* _young_cards_per_entry_ratio_seq;
...@@ -295,7 +294,6 @@ private: ...@@ -295,7 +294,6 @@ private:
double _pause_time_target_ms; double _pause_time_target_ms;
size_t _pending_cards; size_t _pending_cards;
size_t _max_pending_cards;
public: public:
// Accessors // Accessors
...@@ -325,28 +323,6 @@ public: ...@@ -325,28 +323,6 @@ public:
_max_rs_lengths = rs_lengths; _max_rs_lengths = rs_lengths;
} }
size_t predict_pending_card_diff() {
double prediction = get_new_neg_prediction(_pending_card_diff_seq);
if (prediction < 0.00001) {
return 0;
} else {
return (size_t) prediction;
}
}
size_t predict_pending_cards() {
size_t max_pending_card_num = _g1->max_pending_card_num();
size_t diff = predict_pending_card_diff();
size_t prediction;
if (diff > max_pending_card_num) {
prediction = max_pending_card_num;
} else {
prediction = max_pending_card_num - diff;
}
return prediction;
}
size_t predict_rs_length_diff() { size_t predict_rs_length_diff() {
return (size_t) get_new_prediction(_rs_length_diff_seq); return (size_t) get_new_prediction(_rs_length_diff_seq);
} }
...@@ -439,7 +415,7 @@ public: ...@@ -439,7 +415,7 @@ public:
double predict_base_elapsed_time_ms(size_t pending_cards, double predict_base_elapsed_time_ms(size_t pending_cards,
size_t scanned_cards); size_t scanned_cards);
size_t predict_bytes_to_copy(HeapRegion* hr); size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
void set_recorded_rs_lengths(size_t rs_lengths); void set_recorded_rs_lengths(size_t rs_lengths);
...@@ -495,12 +471,6 @@ public: ...@@ -495,12 +471,6 @@ public:
} }
private: private:
size_t _bytes_in_collection_set_before_gc;
size_t _bytes_copied_during_gc;
// Used to count used bytes in CS.
friend class CountCSClosure;
// Statistics kept per GC stoppage, pause or full. // Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
...@@ -514,9 +484,13 @@ private: ...@@ -514,9 +484,13 @@ private:
// The number of bytes in the collection set before the pause. Set from // The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation // the incrementally built collection set at the start of an evacuation
// pause. // pause, and incremented in finalize_cset() when adding old regions
// (if any) to the collection set.
size_t _collection_set_bytes_used_before; size_t _collection_set_bytes_used_before;
// The number of bytes copied during the GC.
size_t _bytes_copied_during_gc;
// The associated information that is maintained while the incremental // The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate // collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause. // the recorded info for the evacuation pause.
...@@ -646,9 +620,6 @@ private: ...@@ -646,9 +620,6 @@ private:
bool predict_will_fit(uint young_length, double base_time_ms, bool predict_will_fit(uint young_length, double base_time_ms,
uint base_free_regions, double target_pause_time_ms); uint base_free_regions, double target_pause_time_ms);
// Count the number of bytes used in the CS.
void count_CS_bytes_used();
public: public:
G1CollectorPolicy(); G1CollectorPolicy();
...@@ -666,10 +637,6 @@ public: ...@@ -666,10 +637,6 @@ public:
// higher, recalculate the young list target length prediction. // higher, recalculate the young list target length prediction.
void revise_young_list_target_length_if_necessary(); void revise_young_list_target_length_if_necessary();
size_t bytes_in_collection_set() {
return _bytes_in_collection_set_before_gc;
}
// This should be called after the heap is resized. // This should be called after the heap is resized.
void record_new_heap_size(uint new_number_of_regions); void record_new_heap_size(uint new_number_of_regions);
......
...@@ -125,6 +125,7 @@ public: ...@@ -125,6 +125,7 @@ public:
#define ergo_format_double(_name_) ", " _name_ ": %1.2f" #define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%" #define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
#define ergo_format_ms(_name_) ", " _name_ ": %1.2f ms" #define ergo_format_ms(_name_) ", " _name_ ": %1.2f ms"
#define ergo_format_size(_name_) ", " _name_ ": "SIZE_FORMAT
// Double parameter format strings // Double parameter format strings
#define ergo_format_byte_perc(_name_) \ #define ergo_format_byte_perc(_name_) \
......
...@@ -287,17 +287,17 @@ ...@@ -287,17 +287,17 @@
"The number of times we'll force an overflow during " \ "The number of times we'll force an overflow during " \
"concurrent marking") \ "concurrent marking") \
\ \
develop(uintx, G1DefaultMinNewGenPercent, 20, \ experimental(uintx, G1DefaultMinNewGenPercent, 20, \
"Percentage (0-100) of the heap size to use as minimum " \ "Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \ "young gen size.") \
\ \
develop(uintx, G1DefaultMaxNewGenPercent, 80, \ experimental(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \ "Percentage (0-100) of the heap size to use as maximum " \
"young gen size.") \ "young gen size.") \
\ \
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90, \ experimental(uintx, G1OldCSetRegionLiveThresholdPercent, 90, \
"Threshold for regions to be added to the collection set. " \ "Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \ "Regions with more live bytes than this will not be collected.") \
\ \
product(uintx, G1HeapWastePercent, 5, \ product(uintx, G1HeapWastePercent, 5, \
"Amount of space, expressed as a percentage of the heap size, " \ "Amount of space, expressed as a percentage of the heap size, " \
...@@ -306,7 +306,7 @@ ...@@ -306,7 +306,7 @@
product(uintx, G1MixedGCCountTarget, 4, \ product(uintx, G1MixedGCCountTarget, 4, \
"The target number of mixed GCs after a marking cycle.") \ "The target number of mixed GCs after a marking cycle.") \
\ \
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \ experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \ "An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.") \ "as a percentage of the heap size.") \
\ \
......
...@@ -384,10 +384,17 @@ void HeapRegion::par_clear() { ...@@ -384,10 +384,17 @@ void HeapRegion::par_clear() {
} }
void HeapRegion::calc_gc_efficiency() { void HeapRegion::calc_gc_efficiency() {
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy(); G1CollectorPolicy* g1p = g1h->g1_policy();
_gc_efficiency = (double) reclaimable_bytes() /
g1p->predict_region_elapsed_time_ms(this, false); // Retrieve a prediction of the elapsed time for this region for
// a mixed gc because the region will only be evacuated during a
// mixed gc.
double region_elapsed_time_ms =
g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
} }
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册