提交 c56ce949 编写于 作者: J jmasa

8008508: CMS does not correctly reduce heap size after a Full GC

Reviewed-by: johnc, ysr
上级 86a31080
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp" #include "prims/jvmtiExport.hpp"
#include "runtime/globals_extension.hpp" #include "runtime/globals_extension.hpp"
...@@ -916,7 +917,31 @@ void ConcurrentMarkSweepGeneration::compute_new_size() { ...@@ -916,7 +917,31 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
return; return;
} }
size_t expand_bytes = 0; // Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
CardGeneration::compute_new_size();
// Reset again after a possible resizing
cmsSpace()->reset_after_compaction();
assert(used() == used_after_gc && used_after_gc <= capacity(),
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
}
void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
assert_locked_or_safepoint(Heap_lock);
// If incremental collection failed, we just want to expand
// to the limit.
if (incremental_collection_failed()) {
clear_incremental_collection_failed();
grow_to_reserved();
return;
}
double free_percentage = ((double) free()) / capacity(); double free_percentage = ((double) free()) / capacity();
double desired_free_percentage = (double) MinHeapFreeRatio / 100; double desired_free_percentage = (double) MinHeapFreeRatio / 100;
double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
...@@ -925,9 +950,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size() { ...@@ -925,9 +950,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
if (free_percentage < desired_free_percentage) { if (free_percentage < desired_free_percentage) {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
assert(desired_capacity >= capacity(), "invalid expansion size"); assert(desired_capacity >= capacity(), "invalid expansion size");
expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
}
if (expand_bytes > 0) {
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
gclog_or_tty->print_cr("\nFrom compute_new_size: "); gclog_or_tty->print_cr("\nFrom compute_new_size: ");
...@@ -961,6 +984,14 @@ void ConcurrentMarkSweepGeneration::compute_new_size() { ...@@ -961,6 +984,14 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
gclog_or_tty->print_cr(" Expanded free fraction %f", gclog_or_tty->print_cr(" Expanded free fraction %f",
((double) free()) / capacity()); ((double) free()) / capacity());
} }
} else {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
assert(desired_capacity <= capacity(), "invalid expansion size");
size_t shrink_bytes = capacity() - desired_capacity;
// Don't shrink unless the delta is greater than the minimum shrink we want
if (shrink_bytes >= MinHeapDeltaBytes) {
shrink_free_list_by(shrink_bytes);
}
} }
} }
...@@ -1872,7 +1903,7 @@ void CMSCollector::compute_new_size() { ...@@ -1872,7 +1903,7 @@ void CMSCollector::compute_new_size() {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
FreelistLocker z(this); FreelistLocker z(this);
MetaspaceGC::compute_new_size(); MetaspaceGC::compute_new_size();
_cmsGen->compute_new_size(); _cmsGen->compute_new_size_free_list();
} }
// A work method used by foreground collection to determine // A work method used by foreground collection to determine
...@@ -2601,6 +2632,10 @@ void CMSCollector::gc_prologue(bool full) { ...@@ -2601,6 +2632,10 @@ void CMSCollector::gc_prologue(bool full) {
} }
void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
_capacity_at_prologue = capacity();
_used_at_prologue = used();
// Delegate to CMScollector which knows how to coordinate between // Delegate to CMScollector which knows how to coordinate between
// this and any other CMS generations that it is responsible for // this and any other CMS generations that it is responsible for
// collecting. // collecting.
...@@ -3300,6 +3335,26 @@ bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( ...@@ -3300,6 +3335,26 @@ bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
} }
void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
assert_locked_or_safepoint(ExpandHeap_lock);
// Shrink committed space
_virtual_space.shrink_by(bytes);
// Shrink space; this also shrinks the space's BOT
_cmsSpace->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(_cmsSpace->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(_cmsSpace->bottom(), new_word_size);
// Shrink the card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
}
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
...@@ -3351,7 +3406,7 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() { ...@@ -3351,7 +3406,7 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
return success; return success;
} }
void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock()); assert_lock_strong(freelistLock());
// XXX Fix when compaction is implemented. // XXX Fix when compaction is implemented.
...@@ -9074,51 +9129,6 @@ void ASConcurrentMarkSweepGeneration::update_counters(size_t used) { ...@@ -9074,51 +9129,6 @@ void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
} }
} }
// The desired expansion delta is computed so that:
// . desired free percentage or greater is used
void ASConcurrentMarkSweepGeneration::compute_new_size() {
assert_locked_or_safepoint(Heap_lock);
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
// If incremental collection failed, we just want to expand
// to the limit.
if (incremental_collection_failed()) {
clear_incremental_collection_failed();
grow_to_reserved();
return;
}
assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
int prev_level = level() - 1;
assert(prev_level >= 0, "The cms generation is the lowest generation");
Generation* prev_gen = gch->get_gen(prev_level);
assert(prev_gen->kind() == Generation::ASParNew,
"Wrong type of young generation");
ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
size_t cur_eden = younger_gen->eden()->capacity();
CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
size_t cur_promo = free();
size_policy->compute_tenured_generation_free_space(cur_promo,
max_available(),
cur_eden);
resize(cur_promo, size_policy->promo_size());
// Record the new size of the space in the cms generation
// that is available for promotions. This is temporary.
// It should be the desired promo size.
size_policy->avg_cms_promo()->sample(free());
size_policy->avg_old_live()->sample(used());
if (UsePerfData) {
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
counters->update_cms_capacity_counter(capacity());
}
}
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock()); assert_lock_strong(freelistLock());
......
...@@ -60,6 +60,7 @@ class CompactibleFreeListSpace; ...@@ -60,6 +60,7 @@ class CompactibleFreeListSpace;
class FreeChunk; class FreeChunk;
class PromotionInfo; class PromotionInfo;
class ScanMarkedObjectsAgainCarefullyClosure; class ScanMarkedObjectsAgainCarefullyClosure;
class TenuredGeneration;
// A generic CMS bit map. It's the basis for both the CMS marking bit map // A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the // as well as for the mod union table (in each case only a subset of the
...@@ -810,9 +811,6 @@ class CMSCollector: public CHeapObj<mtGC> { ...@@ -810,9 +811,6 @@ class CMSCollector: public CHeapObj<mtGC> {
// used regions of each generation to limit the extent of sweep // used regions of each generation to limit the extent of sweep
void save_sweep_limits(); void save_sweep_limits();
// Resize the generations included in the collector.
void compute_new_size();
// A work method used by foreground collection to determine // A work method used by foreground collection to determine
// what type of collection (compacting or not, continuing or fresh) // what type of collection (compacting or not, continuing or fresh)
// it should do. // it should do.
...@@ -909,6 +907,9 @@ class CMSCollector: public CHeapObj<mtGC> { ...@@ -909,6 +907,9 @@ class CMSCollector: public CHeapObj<mtGC> {
void releaseFreelistLocks() const; void releaseFreelistLocks() const;
bool haveFreelistLocks() const; bool haveFreelistLocks() const;
// Adjust size of underlying generation
void compute_new_size();
// GC prologue and epilogue // GC prologue and epilogue
void gc_prologue(bool full); void gc_prologue(bool full);
void gc_epilogue(bool full); void gc_epilogue(bool full);
...@@ -1082,7 +1083,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1082,7 +1083,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
protected: protected:
// Shrink generation by specified size (returns false if unable to shrink) // Shrink generation by specified size (returns false if unable to shrink)
virtual void shrink_by(size_t bytes); void shrink_free_list_by(size_t bytes);
// Update statistics for GC // Update statistics for GC
virtual void update_gc_stats(int level, bool full); virtual void update_gc_stats(int level, bool full);
...@@ -1233,6 +1234,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1233,6 +1234,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
CMSExpansionCause::Cause cause); CMSExpansionCause::Cause cause);
virtual bool expand(size_t bytes, size_t expand_bytes); virtual bool expand(size_t bytes, size_t expand_bytes);
void shrink(size_t bytes); void shrink(size_t bytes);
void shrink_by(size_t bytes);
HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
bool expand_and_ensure_spooling_space(PromotionInfo* promo); bool expand_and_ensure_spooling_space(PromotionInfo* promo);
...@@ -1293,7 +1295,13 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1293,7 +1295,13 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
bool must_be_youngest() const { return false; } bool must_be_youngest() const { return false; }
bool must_be_oldest() const { return true; } bool must_be_oldest() const { return true; }
void compute_new_size(); // Resize the generation after a compacting GC. The
// generation can be treated as a contiguous space
// after the compaction.
virtual void compute_new_size();
// Resize the generation after a non-compacting
// collection.
void compute_new_size_free_list();
CollectionTypes debug_collection_type() { return _debug_collection_type; } CollectionTypes debug_collection_type() { return _debug_collection_type; }
void rotate_debug_collection_type(); void rotate_debug_collection_type();
...@@ -1315,7 +1323,6 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { ...@@ -1315,7 +1323,6 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
virtual void shrink_by(size_t bytes); virtual void shrink_by(size_t bytes);
public: public:
virtual void compute_new_size();
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct, int level, CardTableRS* ct,
bool use_adaptive_freelists, bool use_adaptive_freelists,
......
...@@ -382,7 +382,9 @@ void Generation::compact() { ...@@ -382,7 +382,9 @@ void Generation::compact() {
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, int level,
GenRemSet* remset) : GenRemSet* remset) :
Generation(rs, initial_byte_size, level), _rs(remset) Generation(rs, initial_byte_size, level), _rs(remset),
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue()
{ {
HeapWord* start = (HeapWord*)rs.base(); HeapWord* start = (HeapWord*)rs.base();
size_t reserved_byte_size = rs.size(); size_t reserved_byte_size = rs.size();
...@@ -406,6 +408,9 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, ...@@ -406,6 +408,9 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
// the end if we try. // the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
} }
_min_heap_delta_bytes = MinHeapDeltaBytes;
_capacity_at_prologue = initial_byte_size;
_used_at_prologue = 0;
} }
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
...@@ -457,6 +462,160 @@ void CardGeneration::invalidate_remembered_set() { ...@@ -457,6 +462,160 @@ void CardGeneration::invalidate_remembered_set() {
} }
void CardGeneration::compute_new_size() {
assert(_shrink_factor <= 100, "invalid shrink factor");
size_t current_shrink_factor = _shrink_factor;
_shrink_factor = 0;
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
const double min_tmp = used_after_gc / maximum_used_percentage;
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
// Don't shrink less than the initial generation size
minimum_desired_capacity = MAX2(minimum_desired_capacity,
spec()->init_size());
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
if (PrintGC && Verbose) {
const size_t free_after_gc = free();
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
gclog_or_tty->print_cr(" "
" minimum_free_percentage: %6.2f"
" maximum_used_percentage: %6.2f",
minimum_free_percentage,
maximum_used_percentage);
gclog_or_tty->print_cr(" "
" free_after_gc : %6.1fK"
" used_after_gc : %6.1fK"
" capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
gclog_or_tty->print_cr(" "
" free_percentage: %6.2f",
free_percentage);
}
if (capacity_after_gc < minimum_desired_capacity) {
// If we have less free space than we want then expand
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
expand(expand_bytes, 0); // safe if expansion fails
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" expanding:"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
" _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
}
return;
}
// No expansion, now see if we want to shrink
size_t shrink_bytes = 0;
// We would never want to shrink more than this
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
if (MaxHeapFreeRatio < 100) {
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const double max_tmp = used_after_gc / minimum_used_percentage;
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
maximum_desired_capacity = MAX2(maximum_desired_capacity,
spec()->init_size());
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" maximum_free_percentage: %6.2f"
" minimum_used_percentage: %6.2f",
maximum_free_percentage,
minimum_used_percentage);
gclog_or_tty->print_cr(" "
" _capacity_at_prologue: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
}
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
// We don't want shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" shrinking:"
" initSize: %.1fK"
" maximum_desired_capacity: %.1fK",
spec()->init_size() / (double) K,
maximum_desired_capacity / (double) K);
gclog_or_tty->print_cr(" "
" shrink_bytes: %.1fK"
" current_shrink_factor: %d"
" new shrink factor: %d"
" _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
current_shrink_factor,
_shrink_factor,
_min_heap_delta_bytes / (double) K);
}
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
// We have two shrinking computations, take the largest
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" aggressive shrinking:"
" _capacity_at_prologue: %.1fK"
" capacity_after_gc: %.1fK"
" expansion_for_promotion: %.1fK"
" shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
shrink(shrink_bytes);
}
}
// Currently nothing to do. // Currently nothing to do.
void CardGeneration::prepare_for_verify() {} void CardGeneration::prepare_for_verify() {}
......
...@@ -634,6 +634,17 @@ class CardGeneration: public Generation { ...@@ -634,6 +634,17 @@ class CardGeneration: public Generation {
// This is local to this generation. // This is local to this generation.
BlockOffsetSharedArray* _bts; BlockOffsetSharedArray* _bts;
// current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
size_t _min_heap_delta_bytes; // Minimum amount to expand.
// Some statistics from before gc started.
// These are gathered in the gc_prologue (and should_collect)
// to control growing/shrinking policy in spite of promotions.
size_t _capacity_at_prologue;
size_t _used_at_prologue;
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
GenRemSet* remset); GenRemSet* remset);
...@@ -644,6 +655,11 @@ class CardGeneration: public Generation { ...@@ -644,6 +655,11 @@ class CardGeneration: public Generation {
// necessarily the full "bytes") was done. // necessarily the full "bytes") was done.
virtual bool expand(size_t bytes, size_t expand_bytes); virtual bool expand(size_t bytes, size_t expand_bytes);
// Shrink generation with specified size (returns false if unable to shrink)
virtual void shrink(size_t bytes) = 0;
virtual void compute_new_size();
virtual void clear_remembered_set(); virtual void clear_remembered_set();
virtual void invalidate_remembered_set(); virtual void invalidate_remembered_set();
...@@ -667,7 +683,6 @@ class OneContigSpaceCardGeneration: public CardGeneration { ...@@ -667,7 +683,6 @@ class OneContigSpaceCardGeneration: public CardGeneration {
friend class VM_PopulateDumpSharedSpace; friend class VM_PopulateDumpSharedSpace;
protected: protected:
size_t _min_heap_delta_bytes; // Minimum amount to expand.
ContiguousSpace* _the_space; // actual space holding objects ContiguousSpace* _the_space; // actual space holding objects
WaterMark _last_gc; // watermark between objects allocated before WaterMark _last_gc; // watermark between objects allocated before
// and after last GC. // and after last GC.
...@@ -688,11 +703,10 @@ class OneContigSpaceCardGeneration: public CardGeneration { ...@@ -688,11 +703,10 @@ class OneContigSpaceCardGeneration: public CardGeneration {
public: public:
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
size_t min_heap_delta_bytes,
int level, GenRemSet* remset, int level, GenRemSet* remset,
ContiguousSpace* space) : ContiguousSpace* space) :
CardGeneration(rs, initial_byte_size, level, remset), CardGeneration(rs, initial_byte_size, level, remset),
_the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) _the_space(space)
{} {}
inline bool is_in(const void* p) const; inline bool is_in(const void* p) const;
......
...@@ -39,7 +39,7 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs, ...@@ -39,7 +39,7 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level, size_t initial_byte_size, int level,
GenRemSet* remset) : GenRemSet* remset) :
OneContigSpaceCardGeneration(rs, initial_byte_size, OneContigSpaceCardGeneration(rs, initial_byte_size,
MinHeapDeltaBytes, level, remset, NULL) level, remset, NULL)
{ {
HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high(); HeapWord* end = (HeapWord*) _virtual_space.high();
...@@ -86,162 +86,6 @@ const char* TenuredGeneration::name() const { ...@@ -86,162 +86,6 @@ const char* TenuredGeneration::name() const {
return "tenured generation"; return "tenured generation";
} }
void TenuredGeneration::compute_new_size() {
assert(_shrink_factor <= 100, "invalid shrink factor");
size_t current_shrink_factor = _shrink_factor;
_shrink_factor = 0;
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
const double min_tmp = used_after_gc / maximum_used_percentage;
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
// Don't shrink less than the initial generation size
minimum_desired_capacity = MAX2(minimum_desired_capacity,
spec()->init_size());
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
if (PrintGC && Verbose) {
const size_t free_after_gc = free();
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
gclog_or_tty->print_cr(" "
" minimum_free_percentage: %6.2f"
" maximum_used_percentage: %6.2f",
minimum_free_percentage,
maximum_used_percentage);
gclog_or_tty->print_cr(" "
" free_after_gc : %6.1fK"
" used_after_gc : %6.1fK"
" capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
gclog_or_tty->print_cr(" "
" free_percentage: %6.2f",
free_percentage);
}
if (capacity_after_gc < minimum_desired_capacity) {
// If we have less free space than we want then expand
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
expand(expand_bytes, 0); // safe if expansion fails
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" expanding:"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
" _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
}
return;
}
// No expansion, now see if we want to shrink
size_t shrink_bytes = 0;
// We would never want to shrink more than this
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
if (MaxHeapFreeRatio < 100) {
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const double max_tmp = used_after_gc / minimum_used_percentage;
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
maximum_desired_capacity = MAX2(maximum_desired_capacity,
spec()->init_size());
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" maximum_free_percentage: %6.2f"
" minimum_used_percentage: %6.2f",
maximum_free_percentage,
minimum_used_percentage);
gclog_or_tty->print_cr(" "
" _capacity_at_prologue: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
}
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
// We don't want shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" shrinking:"
" initSize: %.1fK"
" maximum_desired_capacity: %.1fK",
spec()->init_size() / (double) K,
maximum_desired_capacity / (double) K);
gclog_or_tty->print_cr(" "
" shrink_bytes: %.1fK"
" current_shrink_factor: %d"
" new shrink factor: %d"
" _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
current_shrink_factor,
_shrink_factor,
_min_heap_delta_bytes / (double) K);
}
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
// We have two shrinking computations, take the largest
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" aggressive shrinking:"
" _capacity_at_prologue: %.1fK"
" capacity_after_gc: %.1fK"
" expansion_for_promotion: %.1fK"
" shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
shrink(shrink_bytes);
}
assert(used() == used_after_gc && used_after_gc <= capacity(),
"sanity check");
}
void TenuredGeneration::gc_prologue(bool full) { void TenuredGeneration::gc_prologue(bool full) {
_capacity_at_prologue = capacity(); _capacity_at_prologue = capacity();
_used_at_prologue = used(); _used_at_prologue = used();
...@@ -312,6 +156,19 @@ void TenuredGeneration::collect(bool full, ...@@ -312,6 +156,19 @@ void TenuredGeneration::collect(bool full,
size, is_tlab); size, is_tlab);
} }
void TenuredGeneration::compute_new_size() {
assert_locked_or_safepoint(Heap_lock);
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
CardGeneration::compute_new_size();
assert(used() == used_after_gc && used_after_gc <= capacity(),
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
}
void TenuredGeneration::update_gc_stats(int current_level, void TenuredGeneration::update_gc_stats(int current_level,
bool full) { bool full) {
// If the next lower level(s) has been collected, gather any statistics // If the next lower level(s) has been collected, gather any statistics
......
...@@ -38,13 +38,6 @@ class ParGCAllocBufferWithBOT; ...@@ -38,13 +38,6 @@ class ParGCAllocBufferWithBOT;
class TenuredGeneration: public OneContigSpaceCardGeneration { class TenuredGeneration: public OneContigSpaceCardGeneration {
friend class VMStructs; friend class VMStructs;
protected: protected:
// current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
// Some statistics from before gc started.
// These are gathered in the gc_prologue (and should_collect)
// to control growing/shrinking policy in spite of promotions.
size_t _capacity_at_prologue;
size_t _used_at_prologue;
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
// To support parallel promotion: an array of parallel allocation // To support parallel promotion: an array of parallel allocation
...@@ -80,9 +73,6 @@ class TenuredGeneration: public OneContigSpaceCardGeneration { ...@@ -80,9 +73,6 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
return !CollectGen0First; return !CollectGen0First;
} }
// Mark sweep support
void compute_new_size();
virtual void gc_prologue(bool full); virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);
bool should_collect(bool full, bool should_collect(bool full,
...@@ -93,6 +83,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration { ...@@ -93,6 +83,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool is_tlab); bool is_tlab);
virtual void compute_new_size();
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
// Overrides. // Overrides.
......
...@@ -478,6 +478,9 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary; ...@@ -478,6 +478,9 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\ \
nonstatic_field(CardGeneration, _rs, GenRemSet*) \ nonstatic_field(CardGeneration, _rs, GenRemSet*) \
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \ nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
nonstatic_field(CardGeneration, _shrink_factor, size_t) \
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
\ \
nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \ nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \ nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
...@@ -548,8 +551,6 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary; ...@@ -548,8 +551,6 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Space, _bottom, HeapWord*) \ nonstatic_field(Space, _bottom, HeapWord*) \
nonstatic_field(Space, _end, HeapWord*) \ nonstatic_field(Space, _end, HeapWord*) \
\ \
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册