提交 adff88a9 编写于 作者: J johnc

Merge

...@@ -619,15 +619,19 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { ...@@ -619,15 +619,19 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
HeapWord* HeapWord*
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint, bool at_safepoint,
bool do_dirtying) { bool do_dirtying,
bool can_expand) {
assert_heap_locked_or_at_safepoint(); assert_heap_locked_or_at_safepoint();
assert(_cur_alloc_region == NULL, assert(_cur_alloc_region == NULL,
"replace_cur_alloc_region_and_allocate() should only be called " "replace_cur_alloc_region_and_allocate() should only be called "
"after retiring the previous current alloc region"); "after retiring the previous current alloc region");
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
"at_safepoint and is_at_safepoint() should be a tautology"); "at_safepoint and is_at_safepoint() should be a tautology");
assert(!can_expand || g1_policy()->can_expand_young_list(),
"we should not call this method with can_expand == true if "
"we are not allowed to expand the young gen");
if (!g1_policy()->is_young_list_full()) { if (can_expand || !g1_policy()->is_young_list_full()) {
if (!at_safepoint) { if (!at_safepoint) {
// The cleanup operation might update _summary_bytes_used // The cleanup operation might update _summary_bytes_used
// concurrently with this method. So, right now, if we don't // concurrently with this method. So, right now, if we don't
...@@ -738,11 +742,26 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) { ...@@ -738,11 +742,26 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
} }
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// We are locked out of GC because of the GC locker. Right now, // We are locked out of GC because of the GC locker. We can
// we'll just stall until the GC locker-induced GC // allocate a new region only if we can expand the young gen.
// completes. This will be fixed in the near future by extending
// the eden while waiting for the GC locker to schedule the GC if (g1_policy()->can_expand_young_list()) {
// (see CR 6994056). // Yes, we are allowed to expand the young gen. Let's try to
// allocate a new current alloc region.
HeapWord* result =
replace_cur_alloc_region_and_allocate(word_size,
false, /* at_safepoint */
true, /* do_dirtying */
true /* can_expand */);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
}
// We could not expand the young gen further (or we could but we
// failed to allocate a new region). We'll stall until the GC
// locker forces a GC.
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
...@@ -950,7 +969,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, ...@@ -950,7 +969,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
"at this point we should have no cur alloc region"); "at this point we should have no cur alloc region");
return replace_cur_alloc_region_and_allocate(word_size, return replace_cur_alloc_region_and_allocate(word_size,
true, /* at_safepoint */ true, /* at_safepoint */
false /* do_dirtying */); false /* do_dirtying */,
false /* can_expand */);
} else { } else {
return attempt_allocation_humongous(word_size, return attempt_allocation_humongous(word_size,
true /* at_safepoint */); true /* at_safepoint */);
......
...@@ -496,12 +496,15 @@ protected: ...@@ -496,12 +496,15 @@ protected:
inline HeapWord* attempt_allocation(size_t word_size); inline HeapWord* attempt_allocation(size_t word_size);
// It assumes that the current alloc region has been retired and // It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs // tries to allocate a new one. If it's successful, it performs the
// the allocation out of the new current alloc region and updates // allocation out of the new current alloc region and updates
// _cur_alloc_region. // _cur_alloc_region. Normally, it would try to allocate a new
// region if the young gen is not full, unless can_expand is true in
// which case it would always try to allocate a new region.
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint, bool at_safepoint,
bool do_dirtying); bool do_dirtying,
bool can_expand);
// The slow path when we are unable to allocate a new current alloc // The slow path when we are unable to allocate a new current alloc
// region to satisfy an allocation request (i.e., when // region to satisfy an allocation request (i.e., when
......
...@@ -119,8 +119,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size) { ...@@ -119,8 +119,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size) {
// Try to get a new region and allocate out of it // Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
false, /* at safepoint */ false, /* at_safepoint */
true /* do_dirtying */); true, /* do_dirtying */
false /* can_expand */);
if (result != NULL) { if (result != NULL) {
assert_heap_not_locked(); assert_heap_not_locked();
return result; return result;
......
...@@ -479,6 +479,7 @@ void G1CollectorPolicy::calculate_young_list_target_length() { ...@@ -479,6 +479,7 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
// region before we need to do a collection again. // region before we need to do a collection again.
size_t min_length = _g1->young_list()->length() + 1; size_t min_length = _g1->young_list()->length() + 1;
_young_list_target_length = MAX2(_young_list_target_length, min_length); _young_list_target_length = MAX2(_young_list_target_length, min_length);
calculate_max_gc_locker_expansion();
calculate_survivors_policy(); calculate_survivors_policy();
} }
...@@ -2301,6 +2302,21 @@ size_t G1CollectorPolicy::max_regions(int purpose) { ...@@ -2301,6 +2302,21 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
}; };
} }
void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
size_t expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
double expansion_region_num_d = perc * (double) _young_list_target_length;
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
// less than 1.0) we'll get 1.
expansion_region_num = (size_t) ceil(expansion_region_num_d);
} else {
assert(expansion_region_num == 0, "sanity");
}
_young_list_max_length = _young_list_target_length + expansion_region_num;
assert(_young_list_target_length <= _young_list_max_length, "post-condition");
}
// Calculates survivor space parameters. // Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy() void G1CollectorPolicy::calculate_survivors_policy()
{ {
......
...@@ -196,6 +196,10 @@ protected: ...@@ -196,6 +196,10 @@ protected:
size_t _young_list_target_length; size_t _young_list_target_length;
size_t _young_list_fixed_length; size_t _young_list_fixed_length;
// The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length;
size_t _young_list_max_length;
size_t _young_cset_length; size_t _young_cset_length;
bool _last_young_gc_full; bool _last_young_gc_full;
...@@ -1113,13 +1117,22 @@ public: ...@@ -1113,13 +1117,22 @@ public:
bool is_young_list_full() { bool is_young_list_full() {
size_t young_list_length = _g1->young_list()->length(); size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_target_length; size_t young_list_target_length = _young_list_target_length;
if (G1FixedEdenSize) { if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions; young_list_target_length -= _max_survivor_regions;
} }
return young_list_length >= young_list_target_length;
}
return young_list_length >= young_list_max_length; bool can_expand_young_list() {
size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_max_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
return young_list_length < young_list_max_length;
} }
void update_region_num(bool young); void update_region_num(bool young);
bool in_young_gc_mode() { bool in_young_gc_mode() {
...@@ -1231,6 +1244,8 @@ public: ...@@ -1231,6 +1244,8 @@ public:
_survivors_age_table.merge_par(age_table); _survivors_age_table.merge_par(age_table);
} }
void calculate_max_gc_locker_expansion();
// Calculates survivor space parameters. // Calculates survivor space parameters.
void calculate_survivors_policy(); void calculate_survivors_policy();
......
...@@ -1403,6 +1403,10 @@ class CommandLineFlags { ...@@ -1403,6 +1403,10 @@ class CommandLineFlags {
"The exit of a JNI CS necessitating a scavenge also" \ "The exit of a JNI CS necessitating a scavenge also" \
" kicks off a bkgrd concurrent collection") \ " kicks off a bkgrd concurrent collection") \
\ \
product(uintx, GCLockerEdenExpansionPercent, 5, \
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
develop(bool, UseCMSAdaptiveFreeLists, true, \ develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use Adaptive Free Lists in the CMS generation") \ "Use Adaptive Free Lists in the CMS generation") \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册