提交 045c76e2 编写于 作者: C coleenp

Merge

...@@ -131,3 +131,7 @@ bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115 ...@@ -131,3 +131,7 @@ bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115
806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117 806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117
698b7b727e12de44139d8cca6ab9a494ead13253 jdk7-b118 698b7b727e12de44139d8cca6ab9a494ead13253 jdk7-b118
3ef7426b4deac5dcfd4afb35cabe9ab3d666df91 hs20-b02 3ef7426b4deac5dcfd4afb35cabe9ab3d666df91 hs20-b02
5484e7c53fa7da5e869902437ee08a9ae10c1c69 jdk7-b119
f5603a6e50422046ebc0d2f1671d55cb8f1bf1e9 jdk7-b120
3f3653ab7af8dc1ddb9fa75dad56bf94f89e81a8 jdk7-b121
5484e7c53fa7da5e869902437ee08a9ae10c1c69 hs20-b03
...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=20 HS_MAJOR_VER=20
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=03 HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7
......
...@@ -896,7 +896,7 @@ bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promoti ...@@ -896,7 +896,7 @@ bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promoti
size_t available = max_available(); size_t available = max_available();
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
if (PrintGC && Verbose) { if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT")," "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
"max_promo("SIZE_FORMAT")", "max_promo("SIZE_FORMAT")",
...@@ -1562,8 +1562,8 @@ bool CMSCollector::shouldConcurrentCollect() { ...@@ -1562,8 +1562,8 @@ bool CMSCollector::shouldConcurrentCollect() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->collector_policy()->is_two_generation_policy(), assert(gch->collector_policy()->is_two_generation_policy(),
"You may want to check the correctness of the following"); "You may want to check the correctness of the following");
if (gch->incremental_collection_will_fail()) { if (gch->incremental_collection_will_fail(true /* consult_young */)) {
if (PrintGCDetails && Verbose) { if (Verbose && PrintGCDetails) {
gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
} }
return true; return true;
...@@ -1927,7 +1927,7 @@ void CMSCollector::decide_foreground_collection_type( ...@@ -1927,7 +1927,7 @@ void CMSCollector::decide_foreground_collection_type(
"You may want to check the correctness of the following"); "You may want to check the correctness of the following");
// Inform cms gen if this was due to partial collection failing. // Inform cms gen if this was due to partial collection failing.
// The CMS gen may use this fact to determine its expansion policy. // The CMS gen may use this fact to determine its expansion policy.
if (gch->incremental_collection_will_fail()) { if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
assert(!_cmsGen->incremental_collection_failed(), assert(!_cmsGen->incremental_collection_failed(),
"Should have been noticed, reacted to and cleared"); "Should have been noticed, reacted to and cleared");
_cmsGen->set_incremental_collection_failed(); _cmsGen->set_incremental_collection_failed();
...@@ -1936,7 +1936,7 @@ void CMSCollector::decide_foreground_collection_type( ...@@ -1936,7 +1936,7 @@ void CMSCollector::decide_foreground_collection_type(
UseCMSCompactAtFullCollection && UseCMSCompactAtFullCollection &&
((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
GCCause::is_user_requested_gc(gch->gc_cause()) || GCCause::is_user_requested_gc(gch->gc_cause()) ||
gch->incremental_collection_will_fail()); gch->incremental_collection_will_fail(true /* consult_young */));
*should_start_over = false; *should_start_over = false;
if (clear_all_soft_refs && !*should_compact) { if (clear_all_soft_refs && !*should_compact) {
// We are about to do a last ditch collection attempt // We are about to do a last ditch collection attempt
......
...@@ -287,7 +287,7 @@ inline bool CMSCollector::should_abort_preclean() const { ...@@ -287,7 +287,7 @@ inline bool CMSCollector::should_abort_preclean() const {
// scavenge is done or foreground GC wants to take over collection // scavenge is done or foreground GC wants to take over collection
return _collectorState == AbortablePreclean && return _collectorState == AbortablePreclean &&
(_abort_preclean || _foregroundGCIsActive || (_abort_preclean || _foregroundGCIsActive ||
GenCollectedHeap::heap()->incremental_collection_will_fail()); GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
} }
inline size_t CMSCollector::get_eden_used() const { inline size_t CMSCollector::get_eden_used() const {
......
...@@ -619,15 +619,19 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { ...@@ -619,15 +619,19 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
HeapWord* HeapWord*
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint, bool at_safepoint,
bool do_dirtying) { bool do_dirtying,
bool can_expand) {
assert_heap_locked_or_at_safepoint(); assert_heap_locked_or_at_safepoint();
assert(_cur_alloc_region == NULL, assert(_cur_alloc_region == NULL,
"replace_cur_alloc_region_and_allocate() should only be called " "replace_cur_alloc_region_and_allocate() should only be called "
"after retiring the previous current alloc region"); "after retiring the previous current alloc region");
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
"at_safepoint and is_at_safepoint() should be a tautology"); "at_safepoint and is_at_safepoint() should be a tautology");
assert(!can_expand || g1_policy()->can_expand_young_list(),
"we should not call this method with can_expand == true if "
"we are not allowed to expand the young gen");
if (!g1_policy()->is_young_list_full()) { if (can_expand || !g1_policy()->is_young_list_full()) {
if (!at_safepoint) { if (!at_safepoint) {
// The cleanup operation might update _summary_bytes_used // The cleanup operation might update _summary_bytes_used
// concurrently with this method. So, right now, if we don't // concurrently with this method. So, right now, if we don't
...@@ -738,11 +742,26 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) { ...@@ -738,11 +742,26 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
} }
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// We are locked out of GC because of the GC locker. Right now, // We are locked out of GC because of the GC locker. We can
// we'll just stall until the GC locker-induced GC // allocate a new region only if we can expand the young gen.
// completes. This will be fixed in the near future by extending
// the eden while waiting for the GC locker to schedule the GC if (g1_policy()->can_expand_young_list()) {
// (see CR 6994056). // Yes, we are allowed to expand the young gen. Let's try to
// allocate a new current alloc region.
HeapWord* result =
replace_cur_alloc_region_and_allocate(word_size,
false, /* at_safepoint */
true, /* do_dirtying */
true /* can_expand */);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
}
// We could not expand the young gen further (or we could but we
// failed to allocate a new region). We'll stall until the GC
// locker forces a GC.
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
...@@ -950,7 +969,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, ...@@ -950,7 +969,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
"at this point we should have no cur alloc region"); "at this point we should have no cur alloc region");
return replace_cur_alloc_region_and_allocate(word_size, return replace_cur_alloc_region_and_allocate(word_size,
true, /* at_safepoint */ true, /* at_safepoint */
false /* do_dirtying */); false /* do_dirtying */,
false /* can_expand */);
} else { } else {
return attempt_allocation_humongous(word_size, return attempt_allocation_humongous(word_size,
true /* at_safepoint */); true /* at_safepoint */);
...@@ -2040,7 +2060,6 @@ void G1CollectedHeap::ref_processing_init() { ...@@ -2040,7 +2060,6 @@ void G1CollectedHeap::ref_processing_init() {
_ref_processor = ReferenceProcessor::create_ref_processor( _ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span mr, // span
false, // Reference discovery is not atomic false, // Reference discovery is not atomic
// (though it shouldn't matter here.)
true, // mt_discovery true, // mt_discovery
NULL, // is alive closure: need to fill this in for efficiency NULL, // is alive closure: need to fill this in for efficiency
ParallelGCThreads, ParallelGCThreads,
......
...@@ -496,12 +496,15 @@ protected: ...@@ -496,12 +496,15 @@ protected:
inline HeapWord* attempt_allocation(size_t word_size); inline HeapWord* attempt_allocation(size_t word_size);
// It assumes that the current alloc region has been retired and // It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs // tries to allocate a new one. If it's successful, it performs the
// the allocation out of the new current alloc region and updates // allocation out of the new current alloc region and updates
// _cur_alloc_region. // _cur_alloc_region. Normally, it would try to allocate a new
// region if the young gen is not full, unless can_expand is true in
// which case it would always try to allocate a new region.
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint, bool at_safepoint,
bool do_dirtying); bool do_dirtying,
bool can_expand);
// The slow path when we are unable to allocate a new current alloc // The slow path when we are unable to allocate a new current alloc
// region to satisfy an allocation request (i.e., when // region to satisfy an allocation request (i.e., when
......
...@@ -119,8 +119,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size) { ...@@ -119,8 +119,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size) {
// Try to get a new region and allocate out of it // Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
false, /* at safepoint */ false, /* at_safepoint */
true /* do_dirtying */); true, /* do_dirtying */
false /* can_expand */);
if (result != NULL) { if (result != NULL) {
assert_heap_not_locked(); assert_heap_not_locked();
return result; return result;
......
...@@ -479,6 +479,7 @@ void G1CollectorPolicy::calculate_young_list_target_length() { ...@@ -479,6 +479,7 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
// region before we need to do a collection again. // region before we need to do a collection again.
size_t min_length = _g1->young_list()->length() + 1; size_t min_length = _g1->young_list()->length() + 1;
_young_list_target_length = MAX2(_young_list_target_length, min_length); _young_list_target_length = MAX2(_young_list_target_length, min_length);
calculate_max_gc_locker_expansion();
calculate_survivors_policy(); calculate_survivors_policy();
} }
...@@ -2301,6 +2302,21 @@ size_t G1CollectorPolicy::max_regions(int purpose) { ...@@ -2301,6 +2302,21 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
}; };
} }
void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
size_t expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
double expansion_region_num_d = perc * (double) _young_list_target_length;
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
// less than 1.0) we'll get 1.
expansion_region_num = (size_t) ceil(expansion_region_num_d);
} else {
assert(expansion_region_num == 0, "sanity");
}
_young_list_max_length = _young_list_target_length + expansion_region_num;
assert(_young_list_target_length <= _young_list_max_length, "post-condition");
}
// Calculates survivor space parameters. // Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy() void G1CollectorPolicy::calculate_survivors_policy()
{ {
......
...@@ -196,6 +196,10 @@ protected: ...@@ -196,6 +196,10 @@ protected:
size_t _young_list_target_length; size_t _young_list_target_length;
size_t _young_list_fixed_length; size_t _young_list_fixed_length;
// The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length;
size_t _young_list_max_length;
size_t _young_cset_length; size_t _young_cset_length;
bool _last_young_gc_full; bool _last_young_gc_full;
...@@ -1113,13 +1117,22 @@ public: ...@@ -1113,13 +1117,22 @@ public:
bool is_young_list_full() { bool is_young_list_full() {
size_t young_list_length = _g1->young_list()->length(); size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_target_length; size_t young_list_target_length = _young_list_target_length;
if (G1FixedEdenSize) { if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions; young_list_target_length -= _max_survivor_regions;
} }
return young_list_length >= young_list_target_length;
}
return young_list_length >= young_list_max_length; bool can_expand_young_list() {
size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_max_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
return young_list_length < young_list_max_length;
} }
void update_region_num(bool young); void update_region_num(bool young);
bool in_young_gc_mode() { bool in_young_gc_mode() {
...@@ -1231,6 +1244,8 @@ public: ...@@ -1231,6 +1244,8 @@ public:
_survivors_age_table.merge_par(age_table); _survivors_age_table.merge_par(age_table);
} }
void calculate_max_gc_locker_expansion();
// Calculates survivor space parameters. // Calculates survivor space parameters.
void calculate_survivors_policy(); void calculate_survivors_policy();
......
...@@ -685,7 +685,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, ...@@ -685,7 +685,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
result = expand_heap_and_allocate(size, is_tlab); result = expand_heap_and_allocate(size, is_tlab);
} }
return result; // could be null if we are out of space return result; // could be null if we are out of space
} else if (!gch->incremental_collection_will_fail()) { } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection. // Do an incremental collection.
gch->do_collection(false /* full */, gch->do_collection(false /* full */,
false /* clear_all_soft_refs */, false /* clear_all_soft_refs */,
...@@ -693,6 +693,9 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, ...@@ -693,6 +693,9 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
is_tlab /* is_tlab */, is_tlab /* is_tlab */,
number_of_generations() - 1 /* max_level */); number_of_generations() - 1 /* max_level */);
} else { } else {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(" :: Trying full because partial may fail :: ");
}
// Try a full collection; see delta for bug id 6266275 // Try a full collection; see delta for bug id 6266275
// for the original code and why this has been simplified // for the original code and why this has been simplified
// with from-space allocation criteria modified and // with from-space allocation criteria modified and
......
...@@ -483,16 +483,17 @@ void DefNewGeneration::space_iterate(SpaceClosure* blk, ...@@ -483,16 +483,17 @@ void DefNewGeneration::space_iterate(SpaceClosure* blk,
// so we try to allocate the from-space, too. // so we try to allocate the from-space, too.
HeapWord* DefNewGeneration::allocate_from_space(size_t size) { HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
HeapWord* result = NULL; HeapWord* result = NULL;
if (PrintGC && Verbose) { if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
" will_fail: %s" " will_fail: %s"
" heap_lock: %s" " heap_lock: %s"
" free: " SIZE_FORMAT, " free: " SIZE_FORMAT,
size, size,
GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false", GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
Heap_lock->is_locked() ? "locked" : "unlocked", "true" : "false",
from()->free()); Heap_lock->is_locked() ? "locked" : "unlocked",
} from()->free());
}
if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
if (Heap_lock->owned_by_self() || if (Heap_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() && (SafepointSynchronize::is_at_safepoint() &&
...@@ -534,6 +535,9 @@ void DefNewGeneration::collect(bool full, ...@@ -534,6 +535,9 @@ void DefNewGeneration::collect(bool full,
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
// do it. // do it.
if (!collection_attempt_is_safe()) { if (!collection_attempt_is_safe()) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(" :: Collection attempt not safe :: ");
}
gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
return; return;
} }
...@@ -821,6 +825,9 @@ void DefNewGeneration::reset_scratch() { ...@@ -821,6 +825,9 @@ void DefNewGeneration::reset_scratch() {
bool DefNewGeneration::collection_attempt_is_safe() { bool DefNewGeneration::collection_attempt_is_safe() {
if (!to()->is_empty()) { if (!to()->is_empty()) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(" :: to is not empty :: ");
}
return false; return false;
} }
if (_next_gen == NULL) { if (_next_gen == NULL) {
...@@ -843,10 +850,18 @@ void DefNewGeneration::gc_epilogue(bool full) { ...@@ -843,10 +850,18 @@ void DefNewGeneration::gc_epilogue(bool full) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
if (full) { if (full) {
DEBUG_ONLY(seen_incremental_collection_failed = false;) DEBUG_ONLY(seen_incremental_collection_failed = false;)
if (!collection_attempt_is_safe()) { if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
GCCause::to_string(gch->gc_cause()));
}
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
set_should_allocate_from_space(); // we seem to be running out of space set_should_allocate_from_space(); // we seem to be running out of space
} else { } else {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
GCCause::to_string(gch->gc_cause()));
}
gch->clear_incremental_collection_failed(); // We just did a full collection gch->clear_incremental_collection_failed(); // We just did a full collection
clear_should_allocate_from_space(); // if set clear_should_allocate_from_space(); // if set
} }
...@@ -860,11 +875,20 @@ void DefNewGeneration::gc_epilogue(bool full) { ...@@ -860,11 +875,20 @@ void DefNewGeneration::gc_epilogue(bool full) {
// a full collection in between. // a full collection in between.
if (!seen_incremental_collection_failed && if (!seen_incremental_collection_failed &&
gch->incremental_collection_failed()) { gch->incremental_collection_failed()) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
GCCause::to_string(gch->gc_cause()));
}
seen_incremental_collection_failed = true; seen_incremental_collection_failed = true;
} else if (seen_incremental_collection_failed) { } else if (seen_incremental_collection_failed) {
assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed(), if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
GCCause::to_string(gch->gc_cause()));
}
assert(gch->gc_cause() == GCCause::_scavenge_alot ||
(gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
!gch->incremental_collection_failed(),
"Twice in a row"); "Twice in a row");
seen_incremental_collection_failed = false; seen_incremental_collection_failed = false;
} }
#endif // ASSERT #endif // ASSERT
......
...@@ -935,7 +935,7 @@ void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { ...@@ -935,7 +935,7 @@ void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
int max_level) { int max_level) {
int local_max_level; int local_max_level;
if (!incremental_collection_will_fail() && if (!incremental_collection_will_fail(false /* don't consult_young */) &&
gc_cause() == GCCause::_gc_locker) { gc_cause() == GCCause::_gc_locker) {
local_max_level = 0; local_max_level = 0;
} else { } else {
...@@ -951,7 +951,7 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ...@@ -951,7 +951,7 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
// A scavenge may not have been attempted, or may have // A scavenge may not have been attempted, or may have
// been attempted and failed, because the old gen was too full // been attempted and failed, because the old gen was too full
if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
incremental_collection_will_fail()) { incremental_collection_will_fail(false /* don't consult_young */)) {
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->print_cr("GC locker: Trying a full collection " gclog_or_tty->print_cr("GC locker: Trying a full collection "
"because scavenge failed"); "because scavenge failed");
......
...@@ -477,13 +477,17 @@ public: ...@@ -477,13 +477,17 @@ public:
bool no_allocs_since_save_marks(int level); bool no_allocs_since_save_marks(int level);
// Returns true if an incremental collection is likely to fail. // Returns true if an incremental collection is likely to fail.
bool incremental_collection_will_fail() { // We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
// Assumes a 2-generation system; the first disjunct remembers if an // Assumes a 2-generation system; the first disjunct remembers if an
// incremental collection failed, even when we thought (second disjunct) // incremental collection failed, even when we thought (second disjunct)
// that it would not. // that it would not.
assert(heap()->collector_policy()->is_two_generation_policy(), assert(heap()->collector_policy()->is_two_generation_policy(),
"the following definition may not be suitable for an n(>2)-generation system"); "the following definition may not be suitable for an n(>2)-generation system");
return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe(); return incremental_collection_failed() ||
(consult_young && !get_gen(0)->collection_attempt_is_safe());
} }
// If a generation bails out of an incremental collection, // If a generation bails out of an incremental collection,
......
...@@ -1146,6 +1146,20 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, ...@@ -1146,6 +1146,20 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
} }
} }
#ifndef PRODUCT
// Non-atomic (i.e. concurrent) discovery might allow us
// to observe j.l.References with NULL referents, being those
// cleared concurrently by mutators during (or after) discovery.
void ReferenceProcessor::verify_referent(oop obj) {
bool da = discovery_is_atomic();
oop referent = java_lang_ref_Reference::referent(obj);
assert(da ? referent->is_oop() : referent->is_oop_or_null(),
err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
INTPTR_FORMAT " during %satomic discovery ",
(intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
}
#endif
// We mention two of several possible choices here: // We mention two of several possible choices here:
// #0: if the reference object is not in the "originating generation" // #0: if the reference object is not in the "originating generation"
// (or part of the heap being collected, indicated by our "span" // (or part of the heap being collected, indicated by our "span"
...@@ -1196,14 +1210,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { ...@@ -1196,14 +1210,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// We only enqueue references whose referents are not (yet) strongly // We only enqueue references whose referents are not (yet) strongly
// reachable. // reachable.
if (is_alive_non_header() != NULL) { if (is_alive_non_header() != NULL) {
oop referent = java_lang_ref_Reference::referent(obj); verify_referent(obj);
// In the case of non-concurrent discovery, the last if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
// disjunct below should hold. It may not hold in the
// case of concurrent discovery because mutators may
// concurrently clear() a Reference.
assert(UseConcMarkSweepGC || UseG1GC || referent != NULL,
"Refs with null referents already filtered");
if (is_alive_non_header()->do_object_b(referent)) {
return false; // referent is reachable return false; // referent is reachable
} }
} }
...@@ -1247,13 +1255,13 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { ...@@ -1247,13 +1255,13 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
} }
if (RefDiscoveryPolicy == ReferentBasedDiscovery) { if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
oop referent = java_lang_ref_Reference::referent(obj); verify_referent(obj);
assert(referent->is_oop(), "bad referent");
// enqueue if and only if either: // enqueue if and only if either:
// reference is in our span or // reference is in our span or
// we are an atomic collector and referent is in our span // we are an atomic collector and referent is in our span
if (_span.contains(obj_addr) || if (_span.contains(obj_addr) ||
(discovery_is_atomic() && _span.contains(referent))) { (discovery_is_atomic() &&
_span.contains(java_lang_ref_Reference::referent(obj)))) {
// should_enqueue = true; // should_enqueue = true;
} else { } else {
return false; return false;
...@@ -1301,7 +1309,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { ...@@ -1301,7 +1309,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
} }
} }
assert(obj->is_oop(), "Enqueued a bad reference"); assert(obj->is_oop(), "Enqueued a bad reference");
assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent"); verify_referent(obj);
return true; return true;
} }
......
...@@ -345,6 +345,7 @@ class ReferenceProcessor : public CHeapObj { ...@@ -345,6 +345,7 @@ class ReferenceProcessor : public CHeapObj {
// debugging // debugging
void verify_no_references_recorded() PRODUCT_RETURN; void verify_no_references_recorded() PRODUCT_RETURN;
void verify_referent(oop obj) PRODUCT_RETURN;
static void verify(); static void verify();
// clear the discovered lists (unlinking each entry). // clear the discovered lists (unlinking each entry).
......
...@@ -118,12 +118,15 @@ inline oop oopDesc::forward_to_atomic(oop p) { ...@@ -118,12 +118,15 @@ inline oop oopDesc::forward_to_atomic(oop p) {
assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
while (!is_forwarded()) { while (!oldMark->is_marked()) {
curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark); curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
assert(is_forwarded(), "object should have been forwarded");
if (curMark == oldMark) { if (curMark == oldMark) {
assert(is_forwarded(), "the CAS should have succeeded.");
return NULL; return NULL;
} }
// If the CAS was unsuccessful then curMark->is_marked()
// should return true as another thread has CAS'd in another
// forwarding pointer.
oldMark = curMark; oldMark = curMark;
} }
return forwardee(); return forwardee();
......
...@@ -1400,6 +1400,10 @@ class CommandLineFlags { ...@@ -1400,6 +1400,10 @@ class CommandLineFlags {
"The exit of a JNI CS necessitating a scavenge also" \ "The exit of a JNI CS necessitating a scavenge also" \
" kicks off a bkgrd concurrent collection") \ " kicks off a bkgrd concurrent collection") \
\ \
product(uintx, GCLockerEdenExpansionPercent, 5, \
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
develop(bool, UseCMSAdaptiveFreeLists, true, \ develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use Adaptive Free Lists in the CMS generation") \ "Use Adaptive Free Lists in the CMS generation") \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册