提交 38f2f704 编写于 作者: T tonyp

7132029: G1: mixed GC phase lasts for longer than it should

Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today).
Reviewed-by: johnc, brutisso
上级 ab4137af
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -48,6 +48,8 @@ void CSetChooserCache::clear() {
#ifndef PRODUCT
bool CSetChooserCache::verify() {
guarantee(false, "CSetChooserCache::verify(): don't call this any more");
int index = _first;
HeapRegion *prev = NULL;
for (int i = 0; i < _occupancy; ++i) {
......@@ -75,6 +77,8 @@ bool CSetChooserCache::verify() {
#endif // PRODUCT
void CSetChooserCache::insert(HeapRegion *hr) {
guarantee(false, "CSetChooserCache::insert(): don't call this any more");
assert(!is_full(), "cache should not be empty");
hr->calc_gc_efficiency();
......@@ -104,6 +108,9 @@ void CSetChooserCache::insert(HeapRegion *hr) {
}
HeapRegion *CSetChooserCache::remove_first() {
guarantee(false, "CSetChooserCache::remove_first(): "
"don't call this any more");
if (_occupancy > 0) {
assert(_cache[_first] != NULL, "cache should have at least one region");
HeapRegion *ret = _cache[_first];
......@@ -118,16 +125,35 @@ HeapRegion *CSetChooserCache::remove_first() {
}
}
static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
// Even though we don't use the GC efficiency in our heuristics as
// much as we used to, we still order according to GC efficiency. This
// will cause regions with a lot of live objects and large RSets to
// end up at the end of the array. Given that we might skip collecting
// the last few old regions, if after a few mixed GCs the remaining
// have reclaimable bytes under a certain threshold, the hope is that
// the ones we'll skip are ones with both large RSets and a lot of
// live objects, not the ones with just a lot of live objects if we
// ordered according to the amount of reclaimable bytes per region.
static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
if (hr1 == NULL) {
if (hr2 == NULL) return 0;
else return 1;
if (hr2 == NULL) {
return 0;
} else {
return 1;
}
} else if (hr2 == NULL) {
return -1;
}
if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
else return 0;
double gc_eff1 = hr1->gc_efficiency();
double gc_eff2 = hr2->gc_efficiency();
if (gc_eff1 > gc_eff2) {
return -1;
} if (gc_eff1 < gc_eff2) {
return 1;
} else {
return 0;
}
}
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
......@@ -151,51 +177,61 @@ CollectionSetChooser::CollectionSetChooser() :
//
_markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
ResourceObj::C_HEAP),
100),
true),
_curMarkedIndex(0),
_numMarkedRegions(0),
_unmarked_age_1_returned_as_new(false),
_first_par_unreserved_idx(0)
{}
100), true /* C_Heap */),
_curr_index(0), _length(0),
_regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
_first_par_unreserved_idx(0) {
_regionLiveThresholdBytes =
HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
}
#ifndef PRODUCT
bool CollectionSetChooser::verify() {
guarantee(_length >= 0, err_msg("_length: %d", _length));
guarantee(0 <= _curr_index && _curr_index <= _length,
err_msg("_curr_index: %d _length: %d", _curr_index, _length));
int index = 0;
guarantee(_curMarkedIndex <= _numMarkedRegions,
"_curMarkedIndex should be within bounds");
while (index < _curMarkedIndex) {
guarantee(_markedRegions.at(index++) == NULL,
"all entries before _curMarkedIndex should be NULL");
size_t sum_of_reclaimable_bytes = 0;
while (index < _curr_index) {
guarantee(_markedRegions.at(index) == NULL,
"all entries before _curr_index should be NULL");
index += 1;
}
HeapRegion *prev = NULL;
while (index < _numMarkedRegions) {
while (index < _length) {
HeapRegion *curr = _markedRegions.at(index++);
guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
int si = curr->sort_index();
guarantee(!curr->is_young(), "should not be young!");
guarantee(!curr->isHumongous(), "should not be humongous!");
guarantee(si > -1 && si == (index-1), "sort index invariant");
if (prev != NULL) {
guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
guarantee(orderRegions(prev, curr) != 1,
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
prev->gc_efficiency(), curr->gc_efficiency()));
}
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
prev = curr;
}
return _cache.verify();
guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
err_msg("reclaimable bytes inconsistent, "
"remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
_remainingReclaimableBytes, sum_of_reclaimable_bytes));
return true;
}
#endif
void
CollectionSetChooser::fillCache() {
while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) {
HeapRegion* hr = _markedRegions.at(_curMarkedIndex);
void CollectionSetChooser::fillCache() {
guarantee(false, "fillCache: don't call this any more");
while (!_cache.is_full() && (_curr_index < _length)) {
HeapRegion* hr = _markedRegions.at(_curr_index);
assert(hr != NULL,
err_msg("Unexpected NULL hr in _markedRegions at index %d",
_curMarkedIndex));
_curMarkedIndex += 1;
_curr_index));
_curr_index += 1;
assert(!hr->is_young(), "should not be young!");
assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
_markedRegions.at_put(hr->sort_index(), NULL);
_cache.insert(hr);
assert(!_cache.is_empty(), "cache should not be empty");
......@@ -203,9 +239,7 @@ CollectionSetChooser::fillCache() {
assert(verify(), "cache should be consistent");
}
void
CollectionSetChooser::sortMarkedHeapRegions() {
guarantee(_cache.is_empty(), "cache should be empty");
void CollectionSetChooser::sortMarkedHeapRegions() {
// First trim any unused portion of the top in the parallel case.
if (_first_par_unreserved_idx > 0) {
if (G1PrintParCleanupStats) {
......@@ -217,43 +251,78 @@ CollectionSetChooser::sortMarkedHeapRegions() {
_markedRegions.trunc_to(_first_par_unreserved_idx);
}
_markedRegions.sort(orderRegions);
assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
assert(_numMarkedRegions == 0
|| _markedRegions.at(_numMarkedRegions-1) != NULL,
"Testing _numMarkedRegions");
assert(_numMarkedRegions == _markedRegions.length()
|| _markedRegions.at(_numMarkedRegions) == NULL,
"Testing _numMarkedRegions");
assert(_length <= _markedRegions.length(), "Requirement");
assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
"Testing _length");
assert(_length == _markedRegions.length() ||
_markedRegions.at(_length) == NULL, "Testing _length");
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions);
gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
}
for (int i = 0; i < _numMarkedRegions; i++) {
for (int i = 0; i < _length; i++) {
assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
_markedRegions.at(i)->set_sort_index(i);
}
if (G1PrintRegionLivenessInfo) {
G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
for (int i = 0; i < _numMarkedRegions; ++i) {
for (int i = 0; i < _length; ++i) {
HeapRegion* r = _markedRegions.at(i);
cl.doHeapRegion(r);
}
}
assert(verify(), "should now be sorted");
assert(verify(), "CSet chooser verification");
}
size_t CollectionSetChooser::calcMinOldCSetLength() {
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
// sure we go through the available old regions in no more than the
// maximum desired number of mixed GCs.
//
// The calculation is based on the number of marked regions we added
// to the CSet chooser in the first place, not how many remain, so
// that the result is the same during all mixed GCs that follow a cycle.
const size_t region_num = (size_t) _length;
const size_t gc_num = (size_t) G1MaxMixedGCNum;
size_t result = region_num / gc_num;
// emulate ceiling
if (result * gc_num < region_num) {
result += 1;
}
return result;
}
void
CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
size_t CollectionSetChooser::calcMaxOldCSetLength() {
// The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many
// of them are available.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
const size_t region_num = g1h->n_regions();
const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
size_t result = region_num * perc / 100;
// emulate ceiling
if (100 * result < region_num * perc) {
result += 1;
}
return result;
}
void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
assert(!hr->isHumongous(),
"Humongous regions shouldn't be added to the collection set");
assert(!hr->is_young(), "should not be young!");
_markedRegions.append(hr);
_numMarkedRegions++;
_length++;
_remainingReclaimableBytes += hr->reclaimable_bytes();
hr->calc_gc_efficiency();
}
void
CollectionSetChooser::
prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
size_t chunkSize) {
_first_par_unreserved_idx = 0;
int n_threads = ParallelGCThreads;
if (UseDynamicNumberOfGCThreads) {
......@@ -274,8 +343,7 @@ prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
_markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
}
jint
CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
// Don't do this assert because this can be called at a point
// where the loop up stream will not execute again but might
// try to claim more chunks (loop test has not been done yet).
......@@ -287,83 +355,37 @@ CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
return res - n_regions;
}
void
CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
assert(_markedRegions.at(index) == NULL, "precondition");
assert(!hr->is_young(), "should not be young!");
_markedRegions.at_put(index, hr);
hr->calc_gc_efficiency();
}
void
CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
(void)Atomic::add(inc_by, &_numMarkedRegions);
void CollectionSetChooser::updateTotals(jint region_num,
size_t reclaimable_bytes) {
// Only take the lock if we actually need to update the totals.
if (region_num > 0) {
assert(reclaimable_bytes > 0, "invariant");
// We could have just used atomics instead of taking the
// lock. However, we currently don't have an atomic add for size_t.
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_length += (int) region_num;
_remainingReclaimableBytes += reclaimable_bytes;
} else {
assert(reclaimable_bytes == 0, "invariant");
}
}
void
CollectionSetChooser::clearMarkedHeapRegions(){
void CollectionSetChooser::clearMarkedHeapRegions() {
for (int i = 0; i < _markedRegions.length(); i++) {
HeapRegion* r = _markedRegions.at(i);
if (r != NULL) r->set_sort_index(-1);
HeapRegion* r = _markedRegions.at(i);
if (r != NULL) {
r->set_sort_index(-1);
}
}
_markedRegions.clear();
_curMarkedIndex = 0;
_numMarkedRegions = 0;
_cache.clear();
_curr_index = 0;
_length = 0;
_remainingReclaimableBytes = 0;
};
void
CollectionSetChooser::updateAfterFullCollection() {
clearMarkedHeapRegions();
}
// if time_remaining < 0.0, then this method should try to return
// a region, whether it fits within the remaining time or not
HeapRegion*
CollectionSetChooser::getNextMarkedRegion(double time_remaining,
double avg_prediction) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
fillCache();
if (_cache.is_empty()) {
assert(_curMarkedIndex == _numMarkedRegions,
"if cache is empty, list should also be empty");
ergo_verbose0(ErgoCSetConstruction,
"stop adding old regions to CSet",
ergo_format_reason("cache is empty"));
return NULL;
}
HeapRegion *hr = _cache.get_first();
assert(hr != NULL, "if cache not empty, first entry should be non-null");
double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
if (g1p->adaptive_young_list_length()) {
if (time_remaining - predicted_time < 0.0) {
g1h->check_if_region_is_too_expensive(predicted_time);
ergo_verbose2(ErgoCSetConstruction,
"stop adding old regions to CSet",
ergo_format_reason("predicted old region time higher than remaining time")
ergo_format_ms("predicted old region time")
ergo_format_ms("remaining time"),
predicted_time, time_remaining);
return NULL;
}
} else {
double threshold = 2.0 * avg_prediction;
if (predicted_time > threshold) {
ergo_verbose2(ErgoCSetConstruction,
"stop adding old regions to CSet",
ergo_format_reason("predicted old region time higher than threshold")
ergo_format_ms("predicted old region time")
ergo_format_ms("threshold"),
predicted_time, threshold);
return NULL;
}
}
HeapRegion *hr2 = _cache.remove_first();
assert(hr == hr2, "cache contents should not have changed");
return hr;
}
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -28,28 +28,6 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
// We need to sort heap regions by collection desirability.
// This sorting is currently done in two "stages". An initial sort is
// done following a cleanup pause as soon as all of the marked but
// non-empty regions have been identified and the completely empty
// ones reclaimed.
// This gives us a global sort on a GC efficiency metric
// based on predictive data available at that time. However,
// any of these regions that are collected will only be collected
// during a future GC pause, by which time it is possible that newer
// data might allow us to revise and/or refine the earlier
// pause predictions, leading to changes in expected gc efficiency
// order. To somewhat mitigate this obsolescence, more so in the
// case of regions towards the end of the list, which will be
// picked later, these pre-sorted regions from the _markedRegions
// array are not used as is, but a small prefix thereof is
// insertion-sorted again into a small cache, based on more
// recent remembered set information. Regions are then drawn
// from this cache to construct the collection set at each
// incremental GC.
// This scheme and/or its implementation may be subject to
// revision in the future.
class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
private:
enum {
......@@ -103,24 +81,82 @@ public:
class CollectionSetChooser: public CHeapObj {
GrowableArray<HeapRegion*> _markedRegions;
int _curMarkedIndex;
int _numMarkedRegions;
CSetChooserCache _cache;
// True iff last collection pause ran of out new "age 0" regions, and
// returned an "age 1" region.
bool _unmarked_age_1_returned_as_new;
// The index of the next candidate old region to be considered for
// addition to the CSet.
int _curr_index;
// The number of candidate old regions added to the CSet chooser.
int _length;
CSetChooserCache _cache;
jint _first_par_unreserved_idx;
// If a region has more live bytes than this threshold, it will not
// be added to the CSet chooser and will not be a candidate for
// collection.
size_t _regionLiveThresholdBytes;
// The sum of reclaimable bytes over all the regions in the CSet chooser.
size_t _remainingReclaimableBytes;
public:
HeapRegion* getNextMarkedRegion(double time_so_far, double avg_prediction);
// Return the current candidate region to be considered for
// collection without removing it from the CSet chooser.
HeapRegion* peek() {
HeapRegion* res = NULL;
if (_curr_index < _length) {
res = _markedRegions.at(_curr_index);
assert(res != NULL,
err_msg("Unexpected NULL hr in _markedRegions at index %d",
_curr_index));
}
return res;
}
// Remove the given region from the CSet chooser and move to the
// next one. The given region should be the current candidate region
// in the CSet chooser.
void remove_and_move_to_next(HeapRegion* hr) {
assert(hr != NULL, "pre-condition");
assert(_curr_index < _length, "pre-condition");
assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
hr->set_sort_index(-1);
_markedRegions.at_put(_curr_index, NULL);
assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
err_msg("remaining reclaimable bytes inconsistent "
"from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
hr->reclaimable_bytes(), _remainingReclaimableBytes));
_remainingReclaimableBytes -= hr->reclaimable_bytes();
_curr_index += 1;
}
CollectionSetChooser();
void sortMarkedHeapRegions();
void fillCache();
// Determine whether to add the given region to the CSet chooser or
// not. Currently, we skip humongous regions (we never add them to
// the CSet, we only reclaim them during cleanup) and regions whose
// live bytes are over the threshold.
bool shouldAdd(HeapRegion* hr) {
assert(hr->is_marked(), "pre-condition");
assert(!hr->is_young(), "should never consider young regions");
return !hr->isHumongous() &&
hr->live_bytes() < _regionLiveThresholdBytes;
}
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
size_t calcMinOldCSetLength();
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
size_t calcMaxOldCSetLength();
// Serial version.
void addMarkedHeapRegion(HeapRegion *hr);
// Must be called before calls to getParMarkedHeapRegionChunk.
......@@ -133,14 +169,21 @@ public:
// Set the marked array entry at index to hr. Careful to claim the index
// first if in parallel.
void setMarkedHeapRegion(jint index, HeapRegion* hr);
// Atomically increment the number of claimed regions by "inc_by".
void incNumMarkedHeapRegions(jint inc_by);
// Atomically increment the number of added regions by region_num
// and the amount of reclaimable bytes by reclaimable_bytes.
void updateTotals(jint region_num, size_t reclaimable_bytes);
void clearMarkedHeapRegions();
void updateAfterFullCollection();
// Return the number of candidate regions that remain to be collected.
size_t remainingRegions() { return _length - _curr_index; }
// Determine whether the CSet chooser has more candidate regions or not.
bool isEmpty() { return remainingRegions() == 0; }
bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; }
// Return the reclaimable bytes that remain to be collected on
// all the candidate regions in the CSet chooser.
size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
// Returns true if the used portion of "_markedRegions" is properly
// sorted, otherwise asserts false.
......@@ -148,9 +191,17 @@ public:
bool verify(void);
bool regionProperlyOrdered(HeapRegion* r) {
int si = r->sort_index();
return (si == -1) ||
(si > -1 && _markedRegions.at(si) == r) ||
(si < -1 && _cache.region_in_cache(r));
if (si > -1) {
guarantee(_curr_index <= si && si < _length,
err_msg("curr: %d sort index: %d: length: %d",
_curr_index, si, _length));
guarantee(_markedRegions.at(si) == r,
err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
si, _markedRegions.at(si), r));
} else {
guarantee(si == -1, err_msg("sort index: %d", si));
}
return true;
}
#endif
......
......@@ -3447,16 +3447,6 @@ G1CollectedHeap::doConcurrentMark() {
}
}
double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
bool young) {
return _g1_policy->predict_region_elapsed_time_ms(hr, young);
}
void G1CollectedHeap::check_if_region_is_too_expensive(double
predicted_time_ms) {
_g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
}
size_t G1CollectedHeap::pending_card_num() {
size_t extra_cards = 0;
JavaThread *curr = Threads::first();
......@@ -3728,12 +3718,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
g1_policy()->choose_collection_set(target_pause_time_ms);
g1_policy()->finalize_cset(target_pause_time_ms);
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
// GC). We also call this after choose_collection_set() to
// GC). We also call this after finalize_cset() to
// ensure that the CSet has been finalized.
_cm->verify_no_cset_oops(true /* verify_stacks */,
true /* verify_enqueued_buffers */,
......
......@@ -1182,6 +1182,12 @@ public:
bool free_regions_coming() { return _free_regions_coming; }
void wait_while_free_regions_coming();
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr) {
return hr == _retained_old_gc_alloc_region;
}
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
// "CollectedHeap" supports.
......@@ -1662,8 +1668,6 @@ public:
public:
void stop_conc_gc_threads();
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
void check_if_region_is_too_expensive(double predicted_time_ms);
size_t pending_card_num();
size_t max_pending_card_num();
size_t cards_scanned();
......
......@@ -312,16 +312,13 @@ private:
double _recorded_non_young_free_cset_time_ms;
double _sigma;
double _expensive_region_limit_ms;
size_t _rs_lengths_prediction;
size_t _known_garbage_bytes;
double _known_garbage_ratio;
double sigma() {
return _sigma;
}
double sigma() { return _sigma; }
// A function that prevents us putting too much stock in small sample
// sets. Returns a number between 2.0 and 1.0, depending on the number
......@@ -491,8 +488,6 @@ public:
get_new_prediction(_non_young_other_cost_per_region_ms_seq);
}
void check_if_region_is_too_expensive(double predicted_time_ms);
double predict_young_collection_elapsed_time_ms(size_t adjustment);
double predict_base_elapsed_time_ms(size_t pending_cards);
double predict_base_elapsed_time_ms(size_t pending_cards,
......@@ -707,7 +702,6 @@ private:
// initial-mark work.
volatile bool _during_initial_mark_pause;
bool _should_revert_to_young_gcs;
bool _last_young_gc;
// This set of variables tracks the collector efficiency, in order to
......@@ -946,10 +940,17 @@ public:
return _bytes_copied_during_gc;
}
// Determine whether the next GC should be mixed. Called to determine
// whether to start mixed GCs or whether to carry on doing mixed
// GCs. The two action strings are used in the ergo output when the
// method returns true or false.
bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str);
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
void choose_collection_set(double target_pause_time_ms);
void finalize_cset(double target_pause_time_ms);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
......
......@@ -131,8 +131,8 @@ public:
", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
// Generates the format string
#define ergo_format(_action_, _extra_format_) \
" %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]"
#define ergo_format(_extra_format_) \
" %1.3f: [G1Ergonomics (%s) %s" _extra_format_ "]"
// Conditionally, prints an ergonomic decision record. _extra_format_
// is the format string for the optional items we'd like to print
......@@ -145,20 +145,21 @@ public:
// them to the print method. For convenience, we have wrapper macros
// below which take a specific number of arguments and set the rest to
// a default value.
#define ergo_verbose_common(_tag_, _action_, _extra_format_, \
#define ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
do { \
if (G1ErgoVerbose::enabled((_tag_))) { \
gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_), \
os::elapsedTime(), \
G1ErgoVerbose::to_string((_tag_)), \
(_arg0_), (_arg1_), (_arg2_), \
(_arg3_), (_arg4_), (_arg5_)); \
} \
do { \
if (G1ErgoVerbose::enabled((_tag_))) { \
gclog_or_tty->print_cr(ergo_format(_extra_format_), \
os::elapsedTime(), \
G1ErgoVerbose::to_string((_tag_)), \
(_action_), \
(_arg0_), (_arg1_), (_arg2_), \
(_arg3_), (_arg4_), (_arg5_)); \
} \
} while (0)
#define ergo_verbose(_tag_, _action_) \
#define ergo_verbose(_tag_, _action_) \
ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
#define ergo_verbose0(_tag_, _action_, _extra_format_) \
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -297,7 +297,23 @@
\
develop(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.")
"young gen size.") \
\
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95, \
"Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \
\
develop(uintx, G1OldReclaimableThresholdPercent, 1, \
"Threshold for the remaining old reclaimable bytes, expressed " \
"as a percentage of the heap size. If the old reclaimable bytes " \
"are under this we will not collect them with more mixed GCs.") \
\
develop(uintx, G1MaxMixedGCNum, 4, \
"The maximum desired number of mixed GCs after a marking cycle.") \
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
......
......@@ -387,13 +387,12 @@ void HeapRegion::par_clear() {
ct_bs->clear(MemRegion(bottom(), end()));
}
// <PREDICTION>
void HeapRegion::calc_gc_efficiency() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_gc_efficiency = (double) garbage_bytes() /
g1h->predict_region_elapsed_time_ms(this, false);
G1CollectorPolicy* g1p = g1h->g1_policy();
_gc_efficiency = (double) reclaimable_bytes() /
g1p->predict_region_elapsed_time_ms(this, false);
}
// </PREDICTION>
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
assert(!isHumongous(), "sanity / pre-condition");
......
......@@ -415,6 +415,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
return used_at_mark_start_bytes - marked_bytes();
}
// Return the amount of bytes we'll reclaim if we collect this
// region. This includes not only the known garbage bytes in the
// region but also any unallocated space in it, i.e., [top, end),
// since it will also be reclaimed if we collect the region.
size_t reclaimable_bytes() {
size_t known_live_bytes = live_bytes();
assert(known_live_bytes <= capacity(), "sanity");
return capacity() - known_live_bytes;
}
// An upper bound on the number of live bytes in the region.
size_t max_live_bytes() { return used() - garbage_bytes(); }
......@@ -648,10 +658,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
init_top_at_mark_start();
}
// <PREDICTION>
void calc_gc_efficiency(void);
double gc_efficiency() { return _gc_efficiency;}
// </PREDICTION>
bool is_young() const { return _young_type != NotYoung; }
bool is_survivor() const { return _young_type == Survivor; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册