提交 0c52c8e5 编写于 作者: T tonyp

7145441: G1: collection set chooser-related cleanup

Summary: Cleanup of the CSet chooser class: standardize on uints for region num and indexes (instead of int, jint, etc.), make the method / field naming style more consistent, remove a lot of dead code.
Reviewed-by: johnc, brutisso
上级 5e686237
......@@ -29,102 +29,6 @@
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "memory/space.inline.hpp"
CSetChooserCache::CSetChooserCache() {
for (int i = 0; i < CacheLength; ++i)
_cache[i] = NULL;
clear();
}
void CSetChooserCache::clear() {
_occupancy = 0;
_first = 0;
for (int i = 0; i < CacheLength; ++i) {
HeapRegion *hr = _cache[i];
if (hr != NULL)
hr->set_sort_index(-1);
_cache[i] = NULL;
}
}
#ifndef PRODUCT
bool CSetChooserCache::verify() {
guarantee(false, "CSetChooserCache::verify(): don't call this any more");
int index = _first;
HeapRegion *prev = NULL;
for (int i = 0; i < _occupancy; ++i) {
guarantee(_cache[index] != NULL, "cache entry should not be empty");
HeapRegion *hr = _cache[index];
guarantee(!hr->is_young(), "should not be young!");
if (prev != NULL) {
guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
"cache should be correctly ordered");
}
guarantee(hr->sort_index() == get_sort_index(index),
"sort index should be correct");
index = trim_index(index + 1);
prev = hr;
}
for (int i = 0; i < (CacheLength - _occupancy); ++i) {
guarantee(_cache[index] == NULL, "cache entry should be empty");
index = trim_index(index + 1);
}
guarantee(index == _first, "we should have reached where we started from");
return true;
}
#endif // PRODUCT
void CSetChooserCache::insert(HeapRegion *hr) {
guarantee(false, "CSetChooserCache::insert(): don't call this any more");
assert(!is_full(), "cache should not be empty");
hr->calc_gc_efficiency();
int empty_index;
if (_occupancy == 0) {
empty_index = _first;
} else {
empty_index = trim_index(_first + _occupancy);
assert(_cache[empty_index] == NULL, "last slot should be empty");
int last_index = trim_index(empty_index - 1);
HeapRegion *last = _cache[last_index];
assert(last != NULL,"as the cache is not empty, last should not be empty");
while (empty_index != _first &&
last->gc_efficiency() < hr->gc_efficiency()) {
_cache[empty_index] = last;
last->set_sort_index(get_sort_index(empty_index));
empty_index = last_index;
last_index = trim_index(last_index - 1);
last = _cache[last_index];
}
}
_cache[empty_index] = hr;
hr->set_sort_index(get_sort_index(empty_index));
++_occupancy;
assert(verify(), "cache should be consistent");
}
HeapRegion *CSetChooserCache::remove_first() {
guarantee(false, "CSetChooserCache::remove_first(): "
"don't call this any more");
if (_occupancy > 0) {
assert(_cache[_first] != NULL, "cache should have at least one region");
HeapRegion *ret = _cache[_first];
_cache[_first] = NULL;
ret->set_sort_index(-1);
--_occupancy;
_first = trim_index(_first + 1);
assert(verify(), "cache should be consistent");
return ret;
} else {
return NULL;
}
}
// Even though we don't use the GC efficiency in our heuristics as
// much as we used to, we still order according to GC efficiency. This
// will cause regions with a lot of live objects and large RSets to
......@@ -134,7 +38,7 @@ HeapRegion *CSetChooserCache::remove_first() {
// the ones we'll skip are ones with both large RSets and a lot of
// live objects, not the ones with just a lot of live objects if we
// ordered according to the amount of reclaimable bytes per region.
static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
if (hr1 == NULL) {
if (hr2 == NULL) {
return 0;
......@@ -156,8 +60,8 @@ static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
}
}
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
return orderRegions(*hr1p, *hr2p);
static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
return order_regions(*hr1p, *hr2p);
}
CollectionSetChooser::CollectionSetChooser() :
......@@ -175,105 +79,74 @@ CollectionSetChooser::CollectionSetChooser() :
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
_markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
_regions((ResourceObj::set_allocation_type((address) &_regions,
ResourceObj::C_HEAP),
100), true /* C_Heap */),
_curr_index(0), _length(0),
_regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
_first_par_unreserved_idx(0) {
_regionLiveThresholdBytes =
_curr_index(0), _length(0), _first_par_unreserved_idx(0),
_region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
_region_live_threshold_bytes =
HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
}
#ifndef PRODUCT
bool CollectionSetChooser::verify() {
guarantee(_length >= 0, err_msg("_length: %d", _length));
guarantee(0 <= _curr_index && _curr_index <= _length,
err_msg("_curr_index: %d _length: %d", _curr_index, _length));
int index = 0;
void CollectionSetChooser::verify() {
guarantee(_length <= regions_length(),
err_msg("_length: %u regions length: %u", _length, regions_length()));
guarantee(_curr_index <= _length,
err_msg("_curr_index: %u _length: %u", _curr_index, _length));
uint index = 0;
size_t sum_of_reclaimable_bytes = 0;
while (index < _curr_index) {
guarantee(_markedRegions.at(index) == NULL,
guarantee(regions_at(index) == NULL,
"all entries before _curr_index should be NULL");
index += 1;
}
HeapRegion *prev = NULL;
while (index < _length) {
HeapRegion *curr = _markedRegions.at(index++);
guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
int si = curr->sort_index();
HeapRegion *curr = regions_at(index++);
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
guarantee(!curr->is_young(), "should not be young!");
guarantee(!curr->isHumongous(), "should not be humongous!");
guarantee(si > -1 && si == (index-1), "sort index invariant");
if (prev != NULL) {
guarantee(orderRegions(prev, curr) != 1,
guarantee(order_regions(prev, curr) != 1,
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
prev->gc_efficiency(), curr->gc_efficiency()));
}
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
prev = curr;
}
guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
err_msg("reclaimable bytes inconsistent, "
"remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
_remainingReclaimableBytes, sum_of_reclaimable_bytes));
return true;
}
#endif
void CollectionSetChooser::fillCache() {
guarantee(false, "fillCache: don't call this any more");
while (!_cache.is_full() && (_curr_index < _length)) {
HeapRegion* hr = _markedRegions.at(_curr_index);
assert(hr != NULL,
err_msg("Unexpected NULL hr in _markedRegions at index %d",
_curr_index));
_curr_index += 1;
assert(!hr->is_young(), "should not be young!");
assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
_markedRegions.at_put(hr->sort_index(), NULL);
_cache.insert(hr);
assert(!_cache.is_empty(), "cache should not be empty");
}
assert(verify(), "cache should be consistent");
_remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
}
#endif // !PRODUCT
void CollectionSetChooser::sortMarkedHeapRegions() {
void CollectionSetChooser::sort_regions() {
// First trim any unused portion of the top in the parallel case.
if (_first_par_unreserved_idx > 0) {
if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
_markedRegions.length(), _first_par_unreserved_idx);
}
assert(_first_par_unreserved_idx <= _markedRegions.length(),
assert(_first_par_unreserved_idx <= regions_length(),
"Or we didn't reserved enough length");
_markedRegions.trunc_to(_first_par_unreserved_idx);
}
_markedRegions.sort(orderRegions);
assert(_length <= _markedRegions.length(), "Requirement");
assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
"Testing _length");
assert(_length == _markedRegions.length() ||
_markedRegions.at(_length) == NULL, "Testing _length");
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
regions_trunc_to(_first_par_unreserved_idx);
}
for (int i = 0; i < _length; i++) {
assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
_markedRegions.at(i)->set_sort_index(i);
_regions.sort(order_regions);
assert(_length <= regions_length(), "Requirement");
#ifdef ASSERT
for (uint i = 0; i < _length; i++) {
assert(regions_at(i) != NULL, "Should be true by sorting!");
}
#endif // ASSERT
if (G1PrintRegionLivenessInfo) {
G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
for (int i = 0; i < _length; ++i) {
HeapRegion* r = _markedRegions.at(i);
for (uint i = 0; i < _length; ++i) {
HeapRegion* r = regions_at(i);
cl.doHeapRegion(r);
}
}
assert(verify(), "CSet chooser verification");
verify();
}
uint CollectionSetChooser::calcMinOldCSetLength() {
uint CollectionSetChooser::calc_min_old_cset_length() {
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
......@@ -294,7 +167,7 @@ uint CollectionSetChooser::calcMinOldCSetLength() {
return (uint) result;
}
uint CollectionSetChooser::calcMaxOldCSetLength() {
uint CollectionSetChooser::calc_max_old_cset_length() {
// The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many
......@@ -311,18 +184,18 @@ uint CollectionSetChooser::calcMaxOldCSetLength() {
return (uint) result;
}
void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->isHumongous(),
"Humongous regions shouldn't be added to the collection set");
assert(!hr->is_young(), "should not be young!");
_markedRegions.append(hr);
_regions.append(hr);
_length++;
_remainingReclaimableBytes += hr->reclaimable_bytes();
_remaining_reclaimable_bytes += hr->reclaimable_bytes();
hr->calc_gc_efficiency();
}
void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions,
uint chunkSize) {
void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
uint chunk_size) {
_first_par_unreserved_idx = 0;
uint n_threads = (uint) ParallelGCThreads;
if (UseDynamicNumberOfGCThreads) {
......@@ -335,56 +208,46 @@ void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions,
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1U);
}
uint max_waste = n_threads * chunkSize;
// it should be aligned with respect to chunkSize
uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize;
assert(aligned_n_regions % chunkSize == 0, "should be aligned");
_markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL);
uint max_waste = n_threads * chunk_size;
// it should be aligned with respect to chunk_size
uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
assert(aligned_n_regions % chunk_size == 0, "should be aligned");
regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
}
jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
// Don't do this assert because this can be called at a point
// where the loop up stream will not execute again but might
// try to claim more chunks (loop test has not been done yet).
// assert(_markedRegions.length() > _first_par_unreserved_idx,
// "Striding beyond the marked regions");
jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
assert(_markedRegions.length() > res + n_regions - 1,
uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
uint res = (uint) Atomic::add((jint) chunk_size,
(volatile jint*) &_first_par_unreserved_idx);
assert(regions_length() > res + chunk_size - 1,
"Should already have been expanded");
return res - n_regions;
return res - chunk_size;
}
void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
assert(_markedRegions.at(index) == NULL, "precondition");
void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
assert(regions_at(index) == NULL, "precondition");
assert(!hr->is_young(), "should not be young!");
_markedRegions.at_put(index, hr);
regions_at_put(index, hr);
hr->calc_gc_efficiency();
}
void CollectionSetChooser::updateTotals(jint region_num,
size_t reclaimable_bytes) {
void CollectionSetChooser::update_totals(uint region_num,
size_t reclaimable_bytes) {
// Only take the lock if we actually need to update the totals.
if (region_num > 0) {
assert(reclaimable_bytes > 0, "invariant");
// We could have just used atomics instead of taking the
// lock. However, we currently don't have an atomic add for size_t.
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_length += (int) region_num;
_remainingReclaimableBytes += reclaimable_bytes;
_length += region_num;
_remaining_reclaimable_bytes += reclaimable_bytes;
} else {
assert(reclaimable_bytes == 0, "invariant");
}
}
void CollectionSetChooser::clearMarkedHeapRegions() {
for (int i = 0; i < _markedRegions.length(); i++) {
HeapRegion* r = _markedRegions.at(i);
if (r != NULL) {
r->set_sort_index(-1);
}
}
_markedRegions.clear();
void CollectionSetChooser::clear() {
_regions.clear();
_curr_index = 0;
_length = 0;
_remainingReclaimableBytes = 0;
_remaining_reclaimable_bytes = 0;
};
......@@ -28,77 +28,42 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
private:
enum {
CacheLength = 16
} PrivateConstants;
HeapRegion* _cache[CacheLength];
int _occupancy; // number of regions in cache
int _first; // (index of) "first" region in the cache
// adding CacheLength to deal with negative values
inline int trim_index(int index) {
return (index + CacheLength) % CacheLength;
}
inline int get_sort_index(int index) {
return -index-2;
}
inline int get_index(int sort_index) {
return -sort_index-2;
}
class CollectionSetChooser: public CHeapObj {
public:
CSetChooserCache(void);
GrowableArray<HeapRegion*> _regions;
inline int occupancy(void) { return _occupancy; }
inline bool is_full() { return _occupancy == CacheLength; }
inline bool is_empty() { return _occupancy == 0; }
// Unfortunately, GrowableArray uses ints for length and indexes. To
// avoid excessive casting in the rest of the class the following
// wrapper methods are provided that use uints.
void clear(void);
void insert(HeapRegion *hr);
HeapRegion *remove_first(void);
inline HeapRegion *get_first(void) {
return _cache[_first];
uint regions_length() { return (uint) _regions.length(); }
HeapRegion* regions_at(uint i) { return _regions.at((int) i); }
void regions_at_put(uint i, HeapRegion* hr) {
_regions.at_put((int) i, hr);
}
#ifndef PRODUCT
bool verify (void);
bool region_in_cache(HeapRegion *hr) {
int sort_index = hr->sort_index();
if (sort_index < -1) {
int index = get_index(sort_index);
guarantee(index < CacheLength, "should be within bounds");
return _cache[index] == hr;
} else
return 0;
void regions_at_put_grow(uint i, HeapRegion* hr) {
_regions.at_put_grow((int) i, hr);
}
#endif // PRODUCT
};
class CollectionSetChooser: public CHeapObj {
GrowableArray<HeapRegion*> _markedRegions;
void regions_trunc_to(uint i) { _regions.trunc_to((uint) i); }
// The index of the next candidate old region to be considered for
// addition to the CSet.
int _curr_index;
uint _curr_index;
// The number of candidate old regions added to the CSet chooser.
int _length;
uint _length;
CSetChooserCache _cache;
jint _first_par_unreserved_idx;
// Keeps track of the start of the next array chunk to be claimed by
// parallel GC workers.
uint _first_par_unreserved_idx;
// If a region has more live bytes than this threshold, it will not
// be added to the CSet chooser and will not be a candidate for
// collection.
size_t _regionLiveThresholdBytes;
size_t _region_live_threshold_bytes;
// The sum of reclaimable bytes over all the regions in the CSet chooser.
size_t _remainingReclaimableBytes;
size_t _remaining_reclaimable_bytes;
public:
......@@ -107,9 +72,9 @@ public:
HeapRegion* peek() {
HeapRegion* res = NULL;
if (_curr_index < _length) {
res = _markedRegions.at(_curr_index);
res = regions_at(_curr_index);
assert(res != NULL,
err_msg("Unexpected NULL hr in _markedRegions at index %d",
err_msg("Unexpected NULL hr in _regions at index %u",
_curr_index));
}
return res;
......@@ -121,90 +86,71 @@ public:
void remove_and_move_to_next(HeapRegion* hr) {
assert(hr != NULL, "pre-condition");
assert(_curr_index < _length, "pre-condition");
assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
hr->set_sort_index(-1);
_markedRegions.at_put(_curr_index, NULL);
assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
assert(regions_at(_curr_index) == hr, "pre-condition");
regions_at_put(_curr_index, NULL);
assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
err_msg("remaining reclaimable bytes inconsistent "
"from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
hr->reclaimable_bytes(), _remainingReclaimableBytes));
_remainingReclaimableBytes -= hr->reclaimable_bytes();
hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
_remaining_reclaimable_bytes -= hr->reclaimable_bytes();
_curr_index += 1;
}
CollectionSetChooser();
void sortMarkedHeapRegions();
void fillCache();
void sort_regions();
// Determine whether to add the given region to the CSet chooser or
// not. Currently, we skip humongous regions (we never add them to
// the CSet, we only reclaim them during cleanup) and regions whose
// live bytes are over the threshold.
bool shouldAdd(HeapRegion* hr) {
bool should_add(HeapRegion* hr) {
assert(hr->is_marked(), "pre-condition");
assert(!hr->is_young(), "should never consider young regions");
return !hr->isHumongous() &&
hr->live_bytes() < _regionLiveThresholdBytes;
hr->live_bytes() < _region_live_threshold_bytes;
}
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
uint calcMinOldCSetLength();
uint calc_min_old_cset_length();
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
uint calcMaxOldCSetLength();
uint calc_max_old_cset_length();
// Serial version.
void addMarkedHeapRegion(HeapRegion *hr);
void add_region(HeapRegion *hr);
// Must be called before calls to getParMarkedHeapRegionChunk.
// "n_regions" is the number of regions, "chunkSize" the chunk size.
void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize);
// Returns the first index in a contiguous chunk of "n_regions" indexes
// Must be called before calls to claim_array_chunk().
// n_regions is the number of regions, chunk_size the chunk size.
void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
// Returns the first index in a contiguous chunk of chunk_size indexes
// that the calling thread has reserved. These must be set by the
// calling thread using "setMarkedHeapRegion" (to NULL if necessary).
jint getParMarkedHeapRegionChunk(jint n_regions);
// calling thread using set_region() (to NULL if necessary).
uint claim_array_chunk(uint chunk_size);
// Set the marked array entry at index to hr. Careful to claim the index
// first if in parallel.
void setMarkedHeapRegion(jint index, HeapRegion* hr);
void set_region(uint index, HeapRegion* hr);
// Atomically increment the number of added regions by region_num
// and the amount of reclaimable bytes by reclaimable_bytes.
void updateTotals(jint region_num, size_t reclaimable_bytes);
void update_totals(uint region_num, size_t reclaimable_bytes);
void clearMarkedHeapRegions();
void clear();
// Return the number of candidate regions that remain to be collected.
uint remainingRegions() { return (uint) (_length - _curr_index); }
uint remaining_regions() { return _length - _curr_index; }
// Determine whether the CSet chooser has more candidate regions or not.
bool isEmpty() { return remainingRegions() == 0; }
bool is_empty() { return remaining_regions() == 0; }
// Return the reclaimable bytes that remain to be collected on
// all the candidate regions in the CSet chooser.
size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; }
// Returns true if the used portion of "_markedRegions" is properly
// Returns true if the used portion of "_regions" is properly
// sorted, otherwise asserts false.
#ifndef PRODUCT
bool verify(void);
bool regionProperlyOrdered(HeapRegion* r) {
int si = r->sort_index();
if (si > -1) {
guarantee(_curr_index <= si && si < _length,
err_msg("curr: %d sort index: %d: length: %d",
_curr_index, si, _length));
guarantee(_markedRegions.at(si) == r,
err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
si, _markedRegions.at(si), r));
} else {
guarantee(si == -1, err_msg("sort index: %d", si));
}
return true;
}
#endif
void verify() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
......@@ -1192,11 +1192,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
BitMap* _region_bm;
BitMap* _card_bm;
// Debugging
size_t _tot_words_done;
size_t _tot_live;
size_t _tot_used;
size_t _region_marked_bytes;
intptr_t _bottom_card_num;
......@@ -1215,9 +1210,7 @@ public:
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm) :
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
_region_marked_bytes(0), _tot_words_done(0),
_tot_live(0), _tot_used(0),
_bottom_card_num(cm->heap_bottom_card_num()) { }
_region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
......@@ -1262,9 +1255,6 @@ public:
"start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
start, nextTop, hr->end()));
// Record the number of word's we'll examine.
size_t words_done = (nextTop - start);
// Find the first marked object at or after "start".
start = _bm->getNextMarkedWordAddress(start, nextTop);
......@@ -1343,19 +1333,10 @@ public:
// it can be queried by a calling verificiation routine
_region_marked_bytes = marked_bytes;
_tot_live += hr->next_live_bytes();
_tot_used += hr->used();
_tot_words_done = words_done;
return false;
}
size_t region_marked_bytes() const { return _region_marked_bytes; }
// Debugging
size_t tot_words_done() const { return _tot_words_done; }
size_t tot_live() const { return _tot_live; }
size_t tot_used() const { return _tot_used; }
};
// Heap region closure used for verifying the counting data
......@@ -1574,10 +1555,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
BitMap* _region_bm;
BitMap* _card_bm;
size_t _total_live_bytes;
size_t _total_used_bytes;
size_t _total_words_done;
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
assert(start_idx <= last_idx, "sanity");
......@@ -1621,8 +1598,7 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
FinalCountDataUpdateClosure(ConcurrentMark* cm,
BitMap* region_bm,
BitMap* card_bm) :
_cm(cm), _region_bm(region_bm), _card_bm(card_bm),
_total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
bool doHeapRegion(HeapRegion* hr) {
......@@ -1644,8 +1620,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
assert(hr->bottom() <= start && start <= hr->end() &&
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
size_t words_done = ntams - hr->bottom();
if (start < ntams) {
// Region was changed between remark and cleanup pauses
// We need to add (ntams - start) to the marked bytes
......@@ -1676,16 +1650,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
set_bit_for_region(hr);
}
_total_words_done += words_done;
_total_used_bytes += hr->used();
_total_live_bytes += hr->next_marked_bytes();
return false;
}
size_t total_words_done() const { return _total_words_done; }
size_t total_live_bytes() const { return _total_live_bytes; }
size_t total_used_bytes() const { return _total_used_bytes; }
};
class G1ParFinalCountTask: public AbstractGangTask {
......@@ -1697,9 +1663,6 @@ protected:
uint _n_workers;
size_t *_live_bytes;
size_t *_used_bytes;
public:
G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
: AbstractGangTask("G1 final counting"),
......@@ -1707,8 +1670,7 @@ public:
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
_n_workers(0) {
// Use the value already set as the number of active threads
// in the call to run_task(). Needed for the allocation of
// _live_bytes and _used_bytes.
// in the call to run_task().
if (G1CollectedHeap::use_parallel_gc_threads()) {
assert( _g1h->workers()->active_workers() > 0,
"Should have been previously set");
......@@ -1716,14 +1678,6 @@ public:
} else {
_n_workers = 1;
}
_live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
_used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
}
~G1ParFinalCountTask() {
FREE_C_HEAP_ARRAY(size_t, _live_bytes);
FREE_C_HEAP_ARRAY(size_t, _used_bytes);
}
void work(uint worker_id) {
......@@ -1741,23 +1695,6 @@ public:
} else {
_g1h->heap_region_iterate(&final_update_cl);
}
_live_bytes[worker_id] = final_update_cl.total_live_bytes();
_used_bytes[worker_id] = final_update_cl.total_used_bytes();
}
size_t live_bytes() {
size_t live_bytes = 0;
for (uint i = 0; i < _n_workers; ++i)
live_bytes += _live_bytes[i];
return live_bytes;
}
size_t used_bytes() {
size_t used_bytes = 0;
for (uint i = 0; i < _n_workers; ++i)
used_bytes += _used_bytes[i];
return used_bytes;
}
};
......@@ -1892,15 +1829,6 @@ public:
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
}
double end = os::elapsedTime();
if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
"claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
worker_id, start, end, (end-start)*1000.0,
g1_note_end.regions_claimed(),
g1_note_end.claimed_region_time_sec()*1000.0,
g1_note_end.max_region_time_sec()*1000.0);
}
}
size_t max_live_bytes() { return _max_live_bytes; }
size_t freed_bytes() { return _freed_bytes; }
......@@ -2011,29 +1939,11 @@ void ConcurrentMark::cleanup() {
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
}
size_t known_garbage_bytes =
g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
g1p->set_known_garbage_bytes(known_garbage_bytes);
size_t start_used_bytes = g1h->used();
g1h->set_marking_complete();
ergo_verbose4(ErgoConcCycles,
"finish cleanup",
ergo_format_byte("occupancy")
ergo_format_byte("capacity")
ergo_format_byte_perc("known garbage"),
start_used_bytes, g1h->capacity(),
known_garbage_bytes,
((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
double count_end = os::elapsedTime();
double this_final_counting_time = (count_end - start);
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr("Cleanup:");
gclog_or_tty->print_cr(" Finalize counting: %8.3f ms",
this_final_counting_time*1000.0);
}
_total_counting_time += this_final_counting_time;
if (G1PrintRegionLivenessInfo) {
......@@ -2047,7 +1957,6 @@ void ConcurrentMark::cleanup() {
g1h->reset_gc_time_stamp();
// Note end of marking in all heap regions.
double note_end_start = os::elapsedTime();
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
if (G1CollectedHeap::use_parallel_gc_threads()) {
g1h->set_par_threads((int)n_workers);
......@@ -2066,11 +1975,6 @@ void ConcurrentMark::cleanup() {
// regions that there will be more free regions coming soon.
g1h->set_free_regions_coming();
}
double note_end_end = os::elapsedTime();
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
(note_end_end - note_end_start)*1000.0);
}
// call below, since it affects the metric by which we sort the heap
// regions.
......@@ -2109,9 +2013,6 @@ void ConcurrentMark::cleanup() {
g1h->capacity());
}
size_t cleaned_up_bytes = start_used_bytes - g1h->used();
g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
// Clean up will have freed any regions completely full of garbage.
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
......
......@@ -4064,7 +4064,6 @@ void G1CollectedHeap::finalize_for_evac_failure() {
void G1CollectedHeap::remove_self_forwarding_pointers() {
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
......@@ -4082,7 +4081,6 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
reset_cset_heap_region_claim_values();
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
// Now restore saved marks, if any.
if (_objs_with_preserved_marks != NULL) {
......
......@@ -192,11 +192,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_in_marking_window(false),
_in_marking_window_im(false),
_known_garbage_ratio(0.0),
_known_garbage_bytes(0),
_young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
_recent_prev_end_times_for_all_gcs_sec(
new TruncatedSeq(NumPrevPausesForHeuristics)),
......@@ -868,8 +863,6 @@ void G1CollectorPolicy::record_full_collection_end() {
_last_young_gc = false;
clear_initiate_conc_mark_if_possible();
clear_during_initial_mark_pause();
_known_garbage_bytes = 0;
_known_garbage_ratio = 0.0;
_in_marking_window = false;
_in_marking_window_im = false;
......@@ -882,7 +875,7 @@ void G1CollectorPolicy::record_full_collection_end() {
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
update_young_list_target_length();
_collectionSetChooser->clearMarkedHeapRegions();
_collectionSetChooser->clear();
}
void G1CollectorPolicy::record_stop_world_start() {
......@@ -1456,16 +1449,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
}
}
// Update the efficiency-since-mark vars.
double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
if (elapsed_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
// We'll just set it to something (arbitrarily) small.
proc_ms = 1.0;
}
double cur_efficiency = (double) freed_bytes / proc_ms;
bool new_in_marking_window = _in_marking_window;
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
......@@ -1500,10 +1483,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
}
}
if (_last_gc_was_young && !_during_marking) {
_young_gc_eff_seq->add(cur_efficiency);
}
_short_lived_surv_rate_group->start_adding_regions();
// do that for any other surv rate groupsx
......@@ -1618,7 +1597,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
_collectionSetChooser->verify();
}
#define EXT_SIZE_FORMAT "%d%s"
......@@ -2065,28 +2044,6 @@ void G1CollectorPolicy::update_survivors_policy() {
HeapRegion::GrainWords * _max_survivor_regions);
}
#ifndef PRODUCT
class HRSortIndexIsOKClosure: public HeapRegionClosure {
CollectionSetChooser* _chooser;
public:
HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
_chooser(chooser) {}
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
}
return false;
}
};
bool G1CollectorPolicy::assertMarkedBytesDataOK() {
HRSortIndexIsOKClosure cl(_collectionSetChooser);
_g1->heap_region_iterate(&cl);
return true;
}
#endif
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
GCCause::Cause gc_cause) {
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
......@@ -2184,8 +2141,8 @@ public:
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
_hrSorted->addMarkedHeapRegion(r);
if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
_hrSorted->add_region(r);
}
}
return false;
......@@ -2195,16 +2152,14 @@ public:
class ParKnownGarbageHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CollectionSetChooser* _hrSorted;
jint _marked_regions_added;
uint _marked_regions_added;
size_t _reclaimable_bytes_added;
jint _chunk_size;
jint _cur_chunk_idx;
jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
int _worker;
int _invokes;
uint _chunk_size;
uint _cur_chunk_idx;
uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
void get_new_chunk() {
_cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
_cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
_cur_chunk_end = _cur_chunk_idx + _chunk_size;
}
void add_region(HeapRegion* r) {
......@@ -2212,7 +2167,7 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure {
get_new_chunk();
}
assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
_hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
_hrSorted->set_region(_cur_chunk_idx, r);
_marked_regions_added++;
_reclaimable_bytes_added += r->reclaimable_bytes();
_cur_chunk_idx++;
......@@ -2220,78 +2175,55 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure {
public:
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
jint chunk_size,
int worker) :
uint chunk_size) :
_g1h(G1CollectedHeap::heap()),
_hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_marked_regions_added(0), _reclaimable_bytes_added(0),
_cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
_cur_chunk_idx(0), _cur_chunk_end(0) { }
bool doHeapRegion(HeapRegion* r) {
// We only include humongous regions in collection
// sets when concurrent mark shows that their contained object is
// unreachable.
_invokes++;
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
add_region(r);
}
}
return false;
}
jint marked_regions_added() { return _marked_regions_added; }
uint marked_regions_added() { return _marked_regions_added; }
size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
int invokes() { return _invokes; }
};
class ParKnownGarbageTask: public AbstractGangTask {
CollectionSetChooser* _hrSorted;
jint _chunk_size;
uint _chunk_size;
G1CollectedHeap* _g1;
public:
ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
AbstractGangTask("ParKnownGarbageTask"),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_g1(G1CollectedHeap::heap()) { }
void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
_chunk_size,
worker_id);
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
// Back to zero for the claim value.
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
_g1->workers()->active_workers(),
HeapRegion::InitialClaimValue);
jint regions_added = parKnownGarbageCl.marked_regions_added();
uint regions_added = parKnownGarbageCl.marked_regions_added();
size_t reclaimable_bytes_added =
parKnownGarbageCl.reclaimable_bytes_added();
_hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
worker_id, parKnownGarbageCl.invokes(), regions_added);
}
_hrSorted->update_totals(regions_added, reclaimable_bytes_added);
}
};
void
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
double start_sec;
if (G1PrintParCleanupStats) {
start_sec = os::elapsedTime();
}
_collectionSetChooser->clearMarkedHeapRegions();
double clear_marked_end_sec;
if (G1PrintParCleanupStats) {
clear_marked_end_sec = os::elapsedTime();
gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
(clear_marked_end_sec - start_sec) * 1000.0);
}
_collectionSetChooser->clear();
uint region_num = _g1->n_regions();
if (G1CollectedHeap::use_parallel_gc_threads()) {
......@@ -2314,8 +2246,8 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
MinWorkUnit);
}
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
WorkUnit);
_collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
(int) WorkUnit);
_g1->workers()->run_task(&parKnownGarbageTask);
......@@ -2326,20 +2258,10 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
_g1->heap_region_iterate(&knownGarbagecl);
}
double known_garbage_end_sec;
if (G1PrintParCleanupStats) {
known_garbage_end_sec = os::elapsedTime();
gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
(known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
}
_collectionSetChooser->sortMarkedHeapRegions();
double end_sec = os::elapsedTime();
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" sorting: %8.3f ms.",
(end_sec - known_garbage_end_sec) * 1000.0);
}
_collectionSetChooser->sort_regions();
double end_sec = os::elapsedTime();
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
_cur_mark_stop_world_time_ms += elapsed_time_ms;
......@@ -2555,13 +2477,13 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
if (cset_chooser->isEmpty()) {
if (cset_chooser->is_empty()) {
ergo_verbose0(ErgoMixedGCs,
false_action_str,
ergo_format_reason("candidate old regions not available"));
return false;
}
size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
size_t capacity_bytes = _g1->capacity();
double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
double threshold = (double) G1HeapWastePercent;
......@@ -2572,7 +2494,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
cset_chooser->remainingRegions(),
cset_chooser->remaining_regions(),
reclaimable_bytes, perc, threshold);
return false;
}
......@@ -2583,7 +2505,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
cset_chooser->remainingRegions(),
cset_chooser->remaining_regions(),
reclaimable_bytes, perc, threshold);
return true;
}
......@@ -2666,9 +2588,9 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
if (!gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
assert(cset_chooser->verify(), "CSet Chooser verification - pre");
const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength();
const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
cset_chooser->verify();
const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length();
......@@ -2755,7 +2677,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
time_remaining_ms);
}
assert(cset_chooser->verify(), "CSet Chooser verification - post");
cset_chooser->verify();
}
stop_incremental_cset_building();
......
......@@ -288,8 +288,6 @@ private:
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
TruncatedSeq* _young_gc_eff_seq;
G1YoungGenSizer* _young_gen_sizer;
uint _eden_cset_region_length;
......@@ -315,9 +313,6 @@ private:
size_t _rs_lengths_prediction;
size_t _known_garbage_bytes;
double _known_garbage_ratio;
double sigma() { return _sigma; }
// A function that prevents us putting too much stock in small sample
......@@ -509,10 +504,6 @@ public:
_recorded_non_young_free_cset_time_ms = time_ms;
}
double predict_young_gc_eff() {
return get_new_neg_prediction(_young_gc_eff_seq);
}
double predict_survivor_regions_evac_time();
void cset_regions_freed() {
......@@ -522,20 +513,6 @@ public:
// also call it on any more surv rate groups
}
void set_known_garbage_bytes(size_t known_garbage_bytes) {
_known_garbage_bytes = known_garbage_bytes;
size_t heap_bytes = _g1->capacity();
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
}
void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
_known_garbage_bytes -= known_garbage_bytes;
size_t heap_bytes = _g1->capacity();
_known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
}
G1MMUTracker* mmu_tracker() {
return _mmu_tracker;
}
......@@ -1026,12 +1003,6 @@ public:
// exceeded the desired limit, return an amount to expand by.
size_t expansion_amount();
#ifndef PRODUCT
// Check any appropriate marked bytes info, asserting false if
// something's wrong, else returning "true".
bool assertMarkedBytesDataOK();
#endif
// Print tracing information.
void print_tracing_info() const;
......@@ -1074,19 +1045,6 @@ public:
return _young_gen_sizer->adaptive_young_list_length();
}
inline double get_gc_eff_factor() {
double ratio = _known_garbage_ratio;
double square = ratio * ratio;
// square = square * square;
double ret = square * 9.0 + 1.0;
#if 0
gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
#endif // 0
guarantee(0.0 <= ret && ret < 10.0, "invariant!");
return ret;
}
private:
//
// Survivor regions policy.
......
......@@ -127,9 +127,6 @@
"Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \
\
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\
......
......@@ -370,7 +370,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
_claimed = InitialClaimValue;
}
zero_marked_bytes();
set_sort_index(-1);
_offsets.resize(HeapRegion::GrainWords);
init_top_at_mark_start();
......@@ -491,8 +490,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
_gc_efficiency(0.0),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
#ifdef ASSERT
......
......@@ -281,12 +281,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
// See "sort_index" method. -1 means is not in the array.
int _sort_index;
// <PREDICTION>
// The calculated GC efficiency of the region.
double _gc_efficiency;
// </PREDICTION>
enum YoungType {
NotYoung, // a region is not young
......@@ -629,16 +625,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// last mark phase ended.
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
// If "is_marked()" is true, then this is the index of the region in
// an array constructed at the end of marking of the regions in a
// "desirability" order.
int sort_index() {
return _sort_index;
}
void set_sort_index(int i) {
_sort_index = i;
}
void init_top_at_conc_mark_count() {
_top_at_conc_mark_count = bottom();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册