提交 33933575 编写于 作者: J jwilhelm

Merge

...@@ -193,7 +193,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( ...@@ -193,7 +193,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct), CardGeneration(rs, initial_byte_size, level, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_debug_collection_type(Concurrent_collection_type) _debug_collection_type(Concurrent_collection_type),
_did_compact(false)
{ {
HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high(); HeapWord* end = (HeapWord*) _virtual_space.high();
...@@ -917,18 +918,15 @@ void ConcurrentMarkSweepGeneration::compute_new_size() { ...@@ -917,18 +918,15 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
return; return;
} }
// Compute some numbers about the state of the heap. // The heap has been compacted but not reset yet.
const size_t used_after_gc = used(); // Any metric such as free() or used() will be incorrect.
const size_t capacity_after_gc = capacity();
CardGeneration::compute_new_size(); CardGeneration::compute_new_size();
// Reset again after a possible resizing // Reset again after a possible resizing
cmsSpace()->reset_after_compaction(); if (did_compact()) {
cmsSpace()->reset_after_compaction();
assert(used() == used_after_gc && used_after_gc <= capacity(), }
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
} }
void ConcurrentMarkSweepGeneration::compute_new_size_free_list() { void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
...@@ -1578,6 +1576,8 @@ bool CMSCollector::shouldConcurrentCollect() { ...@@ -1578,6 +1576,8 @@ bool CMSCollector::shouldConcurrentCollect() {
return false; return false;
} }
void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
// Clear _expansion_cause fields of constituent generations // Clear _expansion_cause fields of constituent generations
void CMSCollector::clear_expansion_cause() { void CMSCollector::clear_expansion_cause() {
_cmsGen->clear_expansion_cause(); _cmsGen->clear_expansion_cause();
...@@ -1675,7 +1675,6 @@ void CMSCollector::collect(bool full, ...@@ -1675,7 +1675,6 @@ void CMSCollector::collect(bool full,
} }
acquire_control_and_collect(full, clear_all_soft_refs); acquire_control_and_collect(full, clear_all_soft_refs);
_full_gcs_since_conc_gc++; _full_gcs_since_conc_gc++;
} }
void CMSCollector::request_full_gc(unsigned int full_gc_count) { void CMSCollector::request_full_gc(unsigned int full_gc_count) {
...@@ -1857,6 +1856,7 @@ NOT_PRODUCT( ...@@ -1857,6 +1856,7 @@ NOT_PRODUCT(
} }
} }
set_did_compact(should_compact);
if (should_compact) { if (should_compact) {
// If the collection is being acquired from the background // If the collection is being acquired from the background
// collector, there may be references on the discovered // collector, there may be references on the discovered
...@@ -2718,6 +2718,7 @@ void CMSCollector::gc_epilogue(bool full) { ...@@ -2718,6 +2718,7 @@ void CMSCollector::gc_epilogue(bool full) {
Chunk::clean_chunk_pool(); Chunk::clean_chunk_pool();
} }
set_did_compact(false);
_between_prologue_and_epilogue = false; // ready for next cycle _between_prologue_and_epilogue = false; // ready for next cycle
} }
......
...@@ -604,6 +604,8 @@ class CMSCollector: public CHeapObj<mtGC> { ...@@ -604,6 +604,8 @@ class CMSCollector: public CHeapObj<mtGC> {
ConcurrentMarkSweepPolicy* _collector_policy; ConcurrentMarkSweepPolicy* _collector_policy;
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
void set_did_compact(bool v);
// XXX Move these to CMSStats ??? FIX ME !!! // XXX Move these to CMSStats ??? FIX ME !!!
elapsedTimer _inter_sweep_timer; // time between sweeps elapsedTimer _inter_sweep_timer; // time between sweeps
elapsedTimer _intra_sweep_timer; // time _in_ sweeps elapsedTimer _intra_sweep_timer; // time _in_ sweeps
...@@ -1081,6 +1083,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1081,6 +1083,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
CollectionTypes _debug_collection_type; CollectionTypes _debug_collection_type;
// True if a compactiing collection was done.
bool _did_compact;
bool did_compact() { return _did_compact; }
// Fraction of current occupancy at which to start a CMS collection which // Fraction of current occupancy at which to start a CMS collection which
// will collect this generation (at least). // will collect this generation (at least).
double _initiating_occupancy; double _initiating_occupancy;
...@@ -1121,6 +1127,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1121,6 +1127,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Adaptive size policy // Adaptive size policy
CMSAdaptiveSizePolicy* size_policy(); CMSAdaptiveSizePolicy* size_policy();
void set_did_compact(bool v) { _did_compact = v; }
bool refs_discovery_is_atomic() const { return false; } bool refs_discovery_is_atomic() const { return false; }
bool refs_discovery_is_mt() const { bool refs_discovery_is_mt() const {
// Note: CMS does MT-discovery during the parallel-remark // Note: CMS does MT-discovery during the parallel-remark
......
...@@ -1843,33 +1843,32 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { ...@@ -1843,33 +1843,32 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
ReservedSpace::page_align_size_down(shrink_bytes); ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
uint num_regions_deleted = 0; uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
HeapWord* old_end = (HeapWord*) _g1_storage.high(); HeapWord* old_end = (HeapWord*) _g1_storage.high();
assert(mr.end() == old_end, "post-condition"); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
ergo_verbose3(ErgoHeapSizing, ergo_verbose3(ErgoHeapSizing,
"shrink the heap", "shrink the heap",
ergo_format_byte("requested shrinking amount") ergo_format_byte("requested shrinking amount")
ergo_format_byte("aligned shrinking amount") ergo_format_byte("aligned shrinking amount")
ergo_format_byte("attempted shrinking amount"), ergo_format_byte("attempted shrinking amount"),
shrink_bytes, aligned_shrink_bytes, mr.byte_size()); shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
if (mr.byte_size() > 0) { if (num_regions_removed > 0) {
_g1_storage.shrink_by(shrunk_bytes);
HeapWord* new_end = (HeapWord*) _g1_storage.high();
if (_hr_printer.is_active()) { if (_hr_printer.is_active()) {
HeapWord* curr = mr.end(); HeapWord* curr = old_end;
while (curr > mr.start()) { while (curr > new_end) {
HeapWord* curr_end = curr; HeapWord* curr_end = curr;
curr -= HeapRegion::GrainWords; curr -= HeapRegion::GrainWords;
_hr_printer.uncommit(curr, curr_end); _hr_printer.uncommit(curr, curr_end);
} }
assert(curr == mr.start(), "post-condition");
} }
_g1_storage.shrink_by(mr.byte_size()); _expansion_regions += num_regions_removed;
HeapWord* new_end = (HeapWord*) _g1_storage.high();
assert(mr.start() == new_end, "post-condition");
_expansion_regions += num_regions_deleted;
update_committed_space(old_end, new_end); update_committed_space(old_end, new_end);
HeapRegionRemSet::shrink_heap(n_regions()); HeapRegionRemSet::shrink_heap(n_regions());
g1_policy()->record_new_heap_size(n_regions()); g1_policy()->record_new_heap_size(n_regions());
......
...@@ -309,7 +309,8 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -309,7 +309,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
void G1CollectorPolicy::initialize_flags() { void G1CollectorPolicy::initialize_flags() {
set_min_alignment(HeapRegion::GrainBytes); set_min_alignment(HeapRegion::GrainBytes);
set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
set_max_alignment(MAX2(card_table_alignment, min_alignment()));
if (SurvivorRatio < 1) { if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified"); vm_exit_during_initialization("Invalid survivor ratio specified");
} }
......
...@@ -124,11 +124,11 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, ...@@ -124,11 +124,11 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
} }
assert(_regions[index] == NULL, "invariant"); assert(_regions[index] == NULL, "invariant");
_regions[index] = new_hr; _regions[index] = new_hr;
increment_length(&_allocated_length); increment_allocated_length();
} }
// Have to increment the length first, otherwise we will get an // Have to increment the length first, otherwise we will get an
// assert failure at(index) below. // assert failure at(index) below.
increment_length(&_length); increment_length();
HeapRegion* hr = at(index); HeapRegion* hr = at(index);
list->add_as_tail(hr); list->add_as_tail(hr);
...@@ -201,45 +201,29 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { ...@@ -201,45 +201,29 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
} }
} }
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
uint* num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that // Reset this in case it's currently pointing into the regions that
// we just removed. // we just removed.
_next_search_index = 0; _next_search_index = 0;
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
assert(length() > 0, "the region sequence should not be empty"); assert(length() > 0, "the region sequence should not be empty");
assert(length() <= _allocated_length, "invariant"); assert(length() <= _allocated_length, "invariant");
assert(_allocated_length > 0, "we should have at least one region committed"); assert(_allocated_length > 0, "we should have at least one region committed");
assert(num_regions_to_remove < length(), "We should never remove all regions");
// around the loop, i will be the next region to be removed uint i = 0;
uint i = length() - 1; for (; i < num_regions_to_remove; i++) {
assert(i > 0, "we should never remove all regions"); HeapRegion* cur = at(length() - 1);
// [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord* end = at(i)->end();
HeapWord* last_start = end;
*num_regions_deleted = 0;
while (shrink_bytes > 0) {
HeapRegion* cur = at(i);
// We should leave the humongous regions where they are.
if (cur->isHumongous()) break;
// We should stop shrinking if we come across a non-empty region.
if (!cur->is_empty()) break;
i -= 1; if (!cur->is_empty()) {
*num_regions_deleted += 1; // We have to give up if the region can not be moved
shrink_bytes -= cur->capacity(); break;
last_start = cur->bottom(); }
decrement_length(&_length); assert(!cur->isHumongous(), "Humongous regions should not be empty");
// We will reclaim the HeapRegion. _allocated_length should be
// covering this index. So, even though we removed the region from decrement_length();
// the active set by decreasing _length, we still have it
// available in the future if we need to re-use it.
assert(i > 0, "we should never remove all regions");
assert(length() > 0, "we should never remove all regions");
} }
return MemRegion(last_start, end); return i;
} }
#ifndef PRODUCT #ifndef PRODUCT
......
...@@ -92,14 +92,19 @@ class HeapRegionSeq: public CHeapObj<mtGC> { ...@@ -92,14 +92,19 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// address is valid. // address is valid.
inline uintx addr_to_index_biased(HeapWord* addr) const; inline uintx addr_to_index_biased(HeapWord* addr) const;
void increment_length(uint* length) { void increment_allocated_length() {
assert(*length < _max_length, "pre-condition"); assert(_allocated_length < _max_length, "pre-condition");
*length += 1; _allocated_length++;
} }
void decrement_length(uint* length) { void increment_length() {
assert(*length > 0, "pre-condition"); assert(_length < _max_length, "pre-condition");
*length -= 1; _length++;
}
void decrement_length() {
assert(_length > 0, "pre-condition");
_length--;
} }
public: public:
...@@ -153,11 +158,9 @@ class HeapRegionSeq: public CHeapObj<mtGC> { ...@@ -153,11 +158,9 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const; void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
// Tag as uncommitted as many regions that are completely free as // Tag as uncommitted as many regions that are completely free as
// possible, up to shrink_bytes, from the suffix of the committed // possible, up to num_regions_to_remove, from the suffix of the committed
// sequence. Return a MemRegion that corresponds to the address // sequence. Return the actual number of removed regions.
// range of the uncommitted regions. Assume shrink_bytes is page and uint shrink_by(uint num_regions_to_remove);
// heap region aligned.
MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
// Do some sanity checking. // Do some sanity checking.
void verify_optional() PRODUCT_RETURN; void verify_optional() PRODUCT_RETURN;
......
...@@ -48,6 +48,17 @@ ...@@ -48,6 +48,17 @@
// CollectorPolicy methods. // CollectorPolicy methods.
void CollectorPolicy::initialize_flags() { void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(),
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment()));
assert(max_alignment() % min_alignment() == 0,
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment()));
if (MaxHeapSize < InitialHeapSize) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
if (MetaspaceSize > MaxMetaspaceSize) { if (MetaspaceSize > MaxMetaspaceSize) {
MaxMetaspaceSize = MetaspaceSize; MaxMetaspaceSize = MetaspaceSize;
} }
...@@ -71,21 +82,9 @@ void CollectorPolicy::initialize_flags() { ...@@ -71,21 +82,9 @@ void CollectorPolicy::initialize_flags() {
} }
void CollectorPolicy::initialize_size_info() { void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms are aligned // User inputs from -mx and ms must be aligned
set_initial_heap_byte_size(InitialHeapSize); set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
if (initial_heap_byte_size() == 0) { set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
set_initial_heap_byte_size(NewSize + OldSize);
}
set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
min_alignment()));
set_min_heap_byte_size(Arguments::min_heap_size());
if (min_heap_byte_size() == 0) {
set_min_heap_byte_size(NewSize + OldSize);
}
set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
min_alignment()));
set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
// Check heap parameter properties // Check heap parameter properties
...@@ -201,9 +200,6 @@ void GenCollectorPolicy::initialize_flags() { ...@@ -201,9 +200,6 @@ void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity. // All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain); set_min_alignment((uintx) Generation::GenGrain);
set_max_alignment(compute_max_alignment()); set_max_alignment(compute_max_alignment());
assert(max_alignment() >= min_alignment() &&
max_alignment() % min_alignment() == 0,
"invalid alignment constraints");
CollectorPolicy::initialize_flags(); CollectorPolicy::initialize_flags();
...@@ -233,9 +229,6 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -233,9 +229,6 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
GenCollectorPolicy::initialize_flags(); GenCollectorPolicy::initialize_flags();
OldSize = align_size_down(OldSize, min_alignment()); OldSize = align_size_down(OldSize, min_alignment());
if (NewSize + OldSize > MaxHeapSize) {
MaxHeapSize = NewSize + OldSize;
}
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
// NewRatio will be used later to set the young generation size so we use // NewRatio will be used later to set the young generation size so we use
...@@ -250,6 +243,27 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -250,6 +243,27 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
} }
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
// adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) {
if (FLAG_IS_CMDLINE(MaxHeapSize)) {
// somebody set a maximum heap size with the intention that we should not
// exceed it. Adjust New/OldSize as necessary.
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
// align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
// OldSize is already aligned because above we aligned MaxHeapSize to
// max_alignment(), and we just made sure that NewSize is aligned to
// min_alignment(). In initialize_flags() we verified that max_alignment()
// is a multiple of min_alignment().
OldSize = MaxHeapSize - NewSize;
} else {
MaxHeapSize = NewSize + OldSize;
}
}
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
always_do_update_barrier = UseConcMarkSweepGC; always_do_update_barrier = UseConcMarkSweepGC;
// Check validity of heap flags // Check validity of heap flags
......
...@@ -93,6 +93,15 @@ WB_ENTRY(jboolean, WB_IsClassAlive(JNIEnv* env, jobject target, jstring name)) ...@@ -93,6 +93,15 @@ WB_ENTRY(jboolean, WB_IsClassAlive(JNIEnv* env, jobject target, jstring name))
return closure.found(); return closure.found();
WB_END WB_END
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
CollectorPolicy * p = Universe::heap()->collector_policy();
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
p->min_alignment(), p->max_alignment());
}
WB_END
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj)) WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
G1CollectedHeap* g1 = G1CollectedHeap::heap(); G1CollectedHeap* g1 = G1CollectedHeap::heap();
...@@ -386,6 +395,7 @@ static JNINativeMethod methods[] = { ...@@ -386,6 +395,7 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
(void*) &WB_ParseCommandLine (void*) &WB_ParseCommandLine
}, },
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark}, {CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous }, {CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
......
...@@ -747,16 +747,16 @@ void Arguments::add_string(char*** bldarray, int* count, const char* arg) { ...@@ -747,16 +747,16 @@ void Arguments::add_string(char*** bldarray, int* count, const char* arg) {
return; return;
} }
int index = *count; int new_count = *count + 1;
// expand the array and add arg to the last element // expand the array and add arg to the last element
(*count)++;
if (*bldarray == NULL) { if (*bldarray == NULL) {
*bldarray = NEW_C_HEAP_ARRAY(char*, *count, mtInternal); *bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtInternal);
} else { } else {
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count, mtInternal); *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
} }
(*bldarray)[index] = strdup(arg); (*bldarray)[*count] = strdup(arg);
*count = new_count;
} }
void Arguments::build_jvm_args(const char* arg) { void Arguments::build_jvm_args(const char* arg) {
...@@ -1617,30 +1617,38 @@ void Arguments::set_heap_size() { ...@@ -1617,30 +1617,38 @@ void Arguments::set_heap_size() {
FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max); FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
} }
// If the initial_heap_size has not been set with InitialHeapSize // If the minimum or initial heap_size have not been set or requested to be set
// or -Xms, then set it as fraction of the size of physical memory, // ergonomically, set them accordingly.
// respecting the maximum and minimum sizes of the heap. if (InitialHeapSize == 0 || min_heap_size() == 0) {
if (FLAG_IS_DEFAULT(InitialHeapSize)) {
julong reasonable_minimum = (julong)(OldSize + NewSize); julong reasonable_minimum = (julong)(OldSize + NewSize);
reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize); reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize);
reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum); reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum);
julong reasonable_initial = phys_mem / InitialRAMFraction; if (InitialHeapSize == 0) {
julong reasonable_initial = phys_mem / InitialRAMFraction;
reasonable_initial = MAX2(reasonable_initial, reasonable_minimum); reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)min_heap_size());
reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize); reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
reasonable_initial = limit_by_allocatable_memory(reasonable_initial); reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet. // Cannot use gclog_or_tty yet.
tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial); tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
tty->print_cr(" Minimum heap size " SIZE_FORMAT, (uintx)reasonable_minimum); }
FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
}
// If the minimum heap size has not been set (via -Xms),
// synchronize with InitialHeapSize to avoid errors with the default value.
if (min_heap_size() == 0) {
set_min_heap_size(MIN2((uintx)reasonable_minimum, InitialHeapSize));
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
tty->print_cr(" Minimum heap size " SIZE_FORMAT, min_heap_size());
}
} }
FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
set_min_heap_size((uintx)reasonable_minimum);
} }
} }
...@@ -2426,7 +2434,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, ...@@ -2426,7 +2434,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// -Xms // -Xms
} else if (match_option(option, "-Xms", &tail)) { } else if (match_option(option, "-Xms", &tail)) {
julong long_initial_heap_size = 0; julong long_initial_heap_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1); // an initial heap size of 0 means automatically determine
ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 0);
if (errcode != arg_in_range) { if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(), jio_fprintf(defaultStream::error_stream(),
"Invalid initial heap size: %s\n", option->optionString); "Invalid initial heap size: %s\n", option->optionString);
...@@ -2437,7 +2446,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, ...@@ -2437,7 +2446,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// Currently the minimum size and the initial heap sizes are the same. // Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(InitialHeapSize); set_min_heap_size(InitialHeapSize);
// -Xmx // -Xmx
} else if (match_option(option, "-Xmx", &tail)) { } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
julong long_max_heap_size = 0; julong long_max_heap_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1); ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1);
if (errcode != arg_in_range) { if (errcode != arg_in_range) {
......
...@@ -2968,7 +2968,7 @@ class CommandLineFlags { ...@@ -2968,7 +2968,7 @@ class CommandLineFlags {
\ \
/* gc parameters */ \ /* gc parameters */ \
product(uintx, InitialHeapSize, 0, \ product(uintx, InitialHeapSize, 0, \
"Initial heap size (in bytes); zero means OldSize + NewSize") \ "Initial heap size (in bytes); zero means use ergonomics") \
\ \
product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \ product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \
"Maximum heap size (in bytes)") \ "Maximum heap size (in bytes)") \
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test CheckAllocateAndSystemGC
* @summary CMS: assert(used() == used_after_gc && used_after_gc <= capacity()) failed: used: 0 used_after_gc: 292080 capacity: 1431699456
* @bug 8013032
* @key gc
* @key regression
* @library /testlibrary
* @run main/othervm CheckAllocateAndSystemGC
* @author jon.masamitsu@oracle.com
*/
import com.oracle.java.testlibrary.*;
public class CheckAllocateAndSystemGC {
public static void main(String args[]) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-showversion",
"-XX:+UseConcMarkSweepGC",
"-Xmn4m",
"-XX:MaxTenuringThreshold=1",
"-XX:-UseCMSCompactAtFullCollection",
"CheckAllocateAndSystemGC$AllocateAndSystemGC"
);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
static class AllocateAndSystemGC {
public static void main(String [] args) {
Integer x[] = new Integer [1000];
// Allocate enough objects to cause a minor collection.
// These allocations suffice for a 4m young geneneration.
for (int i = 0; i < 100; i++) {
Integer y[] = new Integer[10000];
}
System.gc();
}
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test SystemGCOnForegroundCollector
* @summary CMS: Call reset_after_compaction() only if a compaction has been done
* @bug 8013184
* @key gc
* @key regression
* @library /testlibrary
* @run main/othervm SystemGCOnForegroundCollector
* @author jon.masamitsu@oracle.com
*/
import com.oracle.java.testlibrary.*;
public class SystemGCOnForegroundCollector {
public static void main(String args[]) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-showversion",
"-XX:+UseConcMarkSweepGC",
"-XX:MaxTenuringThreshold=1",
"-XX:-UseCMSCompactAtFullCollection",
ThreePlusMSSystemGC.class.getName()
);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
static class ThreePlusMSSystemGC {
public static void main(String [] args) {
// From running this test 3 System.gc() were always
// enough to see the failure but the cause of the failure
// depends on how objects are allocated in the CMS generation
// which is non-deterministic. Use 30 iterations for a more
// reliable test.
for (int i = 0; i < 30; i++) {
System.gc();
}
}
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestRegionAlignment.java
* @bug 8013791
* @summary Make sure that G1 ergonomics pick a heap size that is aligned with the region size
* @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -XX:MaxRAM=555m TestRegionAlignment
*
* When G1 ergonomically picks a maximum heap size it must be aligned to the region size.
* This test tries to get the VM to pick a small and unaligned heap size (by using MaxRAM=555) and a
* large region size (by using -XX:G1HeapRegionSize=32m). This will fail without the fix for 8013791.
*/
public class TestRegionAlignment {
public static void main(String[] args) { }
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestShrinkToOneRegion.java
* @bug 8013872
* @summary Shrinking the heap down to one region used to hit an assert
* @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -Xmx256m TestShrinkToOneRegion
*
* Doing a System.gc() without having allocated many objects will shrink the heap.
* With a large region size we will shrink the heap to one region.
*/
public class TestShrinkToOneRegion {
public static void main(String[] args) {
System.gc();
}
}
...@@ -61,6 +61,9 @@ public class WhiteBox { ...@@ -61,6 +61,9 @@ public class WhiteBox {
registerNatives(); registerNatives();
} }
// Arguments
public native void printHeapSizes();
// Memory // Memory
public native long getObjectAddress(Object o); public native long getObjectAddress(Object o);
public native int getHeapOopSize(); public native int getHeapOopSize();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册