提交 41dac17c 编写于 作者: T tonyp

6974966: G1: unnecessary direct-to-old allocations

Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr
上级 75e08dfa
......@@ -290,6 +290,63 @@ private:
// started is maintained in _total_full_collections in CollectedHeap.
volatile unsigned int _full_collections_completed;
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
#define heap_locking_asserts_err_msg(__extra_message) \
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
(__extra_message), \
(!Heap_lock->owned_by_self()) ? "NOT " : "", \
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
#define assert_heap_locked() \
do { \
assert(Heap_lock->owned_by_self(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0)
#define assert_heap_locked_or_at_safepoint() \
do { \
assert(Heap_lock->owned_by_self() || \
SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \
} while (0)
#define assert_heap_locked_and_not_at_safepoint() \
do { \
assert(Heap_lock->owned_by_self() && \
!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
"should not be at a safepoint")); \
} while (0)
#define assert_heap_not_locked() \
do { \
assert(!Heap_lock->owned_by_self(), \
heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
} while (0)
#define assert_heap_not_locked_and_not_at_safepoint() \
do { \
assert(!Heap_lock->owned_by_self() && \
!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
"should not be at a safepoint")); \
} while (0)
#define assert_at_safepoint() \
do { \
assert(SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0)
#define assert_not_at_safepoint() \
do { \
assert(!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should not be at a safepoint")); \
} while (0)
protected:
// Returns "true" iff none of the gc alloc regions have any allocations
......@@ -329,31 +386,162 @@ protected:
// Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure.
virtual HeapWord* humongousObjAllocate(size_t word_size);
// If possible, allocate a block of the given word_size, else return "NULL".
// Returning NULL will trigger GC or heap expansion.
// These two methods have rather awkward pre- and
// post-conditions. If they are called outside a safepoint, then
// they assume that the caller is holding the heap lock. Upon return
// they release the heap lock, if they are returning a non-NULL
// value. attempt_allocation_slow() also dirties the cards of a
// newly-allocated young region after it releases the heap
// lock. This change in interface was the neatest way to achieve
// this card dirtying without affecting mem_allocate(), which is a
// more frequently called method. We tried two or three different
// approaches, but they were even more hacky.
HeapWord* attempt_allocation(size_t word_size,
bool permit_collection_pause = true);
HeapWord* attempt_allocation_slow(size_t word_size,
bool permit_collection_pause = true);
virtual HeapWord* humongous_obj_allocate(size_t word_size);
// The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime
// into the G1's allocation routines. They have the following
// assumptions:
//
// * They should both be called outside safepoints.
//
// * They should both be called without holding the Heap_lock.
//
// * All allocation requests for new TLABs should go to
// allocate_new_tlab().
//
// * All non-TLAB allocation requests should go to mem_allocate()
// and mem_allocate() should never be called with is_tlab == true.
//
// * If the GC locker is active we currently stall until we can
// allocate a new young region. This will be changed in the
// near future (see CR 6994056).
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
// this fails, they will attempt to do an evacuation pause and
// retry the allocation.
//
// * If all allocation attempts fail, even after trying to schedule
// an evacuation pause, allocate_new_tlab() will return NULL,
// whereas mem_allocate() will attempt a heap expansion and/or
// schedule a Full GC.
//
// * We do not allow humongous-sized TLABs. So, allocate_new_tlab
// should never be called with word_size being humongous. All
// humongous allocation requests should go to mem_allocate() which
// will satisfy them with a special path.
virtual HeapWord* allocate_new_tlab(size_t word_size);
virtual HeapWord* mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab, /* expected to be false */
bool* gc_overhead_limit_was_exceeded);
// The following methods, allocate_from_cur_allocation_region(),
// attempt_allocation(), replace_cur_alloc_region_and_allocate(),
// attempt_allocation_slow(), and attempt_allocation_humongous()
// have very awkward pre- and post-conditions with respect to
// locking:
//
// If they are called outside a safepoint they assume the caller
// holds the Heap_lock when it calls them. However, on exit they
// will release the Heap_lock if they return a non-NULL result, but
// keep holding the Heap_lock if they return a NULL result. The
// reason for this is that we need to dirty the cards that span
// allocated blocks on young regions to avoid having to take the
// slow path of the write barrier (for performance reasons we don't
// update RSets for references whose source is a young region, so we
// don't need to look at dirty cards on young regions). But, doing
// this card dirtying while holding the Heap_lock can be a
// scalability bottleneck, especially given that some allocation
// requests might be of non-trivial size (and the larger the region
// size is, the fewer allocations requests will be considered
// humongous, as the humongous size limit is a fraction of the
// region size). So, when one of these calls succeeds in allocating
// a block it does the card dirtying after it releases the Heap_lock
// which is why it will return without holding it.
//
// The above assymetry is the reason why locking / unlocking is done
// explicitly (i.e., with Heap_lock->lock() and
// Heap_lock->unlocked()) instead of using MutexLocker and
// MutexUnlocker objects. The latter would ensure that the lock is
// unlocked / re-locked at every possible exit out of the basic
// block. However, we only want that action to happen in selected
// places.
//
// Further, if the above methods are called during a safepoint, then
// naturally there's no assumption about the Heap_lock being held or
// there's no attempt to unlock it. The parameter at_safepoint
// indicates whether the call is made during a safepoint or not (as
// an optimization, to avoid reading the global flag with
// SafepointSynchronize::is_at_safepoint()).
//
// The methods share these parameters:
//
// * word_size : the size of the allocation request in words
// * at_safepoint : whether the call is done at a safepoint; this
// also determines whether a GC is permitted
// (at_safepoint == false) or not (at_safepoint == true)
// * do_dirtying : whether the method should dirty the allocated
// block before returning
//
// They all return either the address of the block, if they
// successfully manage to allocate it, or NULL.
// It tries to satisfy an allocation request out of the current
// allocating region, which is passed as a parameter. It assumes
// that the caller has checked that the current allocating region is
// not NULL. Given that the caller has to check the current
// allocating region for at least NULL, it might as well pass it as
// the first parameter so that the method doesn't have to read it
// from the _cur_alloc_region field again.
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size);
// It attempts to allocate out of the current alloc region. If that
// fails, it retires the current alloc region (if there is one),
// tries to get a new one and retries the allocation.
inline HeapWord* attempt_allocation(size_t word_size);
// It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs
// the allocation out of the new current alloc region and updates
// _cur_alloc_region.
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint,
bool do_dirtying);
// The slow path when we are unable to allocate a new current alloc
// region to satisfy an allocation request (i.e., when
// attempt_allocation() fails). It will try to do an evacuation
// pause, which might stall due to the GC locker, and retry the
// allocation attempt when appropriate.
HeapWord* attempt_allocation_slow(size_t word_size);
// The method that tries to satisfy a humongous allocation
// request. If it cannot satisfy it it will try to do an evacuation
// pause to perhaps reclaim enough space to be able to satisfy the
// allocation request afterwards.
HeapWord* attempt_allocation_humongous(size_t word_size,
bool at_safepoint);
// It does the common work when we are retiring the current alloc region.
inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
// It retires the current alloc region, which is passed as a
// parameter (since, typically, the caller is already holding on to
// it). It sets _cur_alloc_region to NULL.
void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
// It attempts to do an allocation immediately before or after an
// evacuation pause and can only be called by the VM thread. It has
// slightly different assumptions that the ones before (i.e.,
// assumes that the current alloc region has been retired).
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_cur_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void dirty_young_block(HeapWord* start, size_t word_size);
// Allocate blocks during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
......@@ -370,12 +558,14 @@ protected:
void retire_alloc_region(HeapRegion* alloc_region, bool par);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references are cleared
// during the GC
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references should be
// cleared during the GC
// - if explicit_gc is false, word_size describes the allocation that
// the GC should attempt (at least) to satisfy
void do_collection(bool explicit_gc,
// the GC should attempt (at least) to satisfy
// - it returns false if it is unable to do the collection due to the
// GC locker being active, true otherwise
bool do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size);
......@@ -391,13 +581,13 @@ protected:
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t word_size);
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
virtual HeapWord* expand_and_allocate(size_t word_size);
HeapWord* expand_and_allocate(size_t word_size);
public:
// Expand the garbage-first heap by at least the given size (in bytes!).
......@@ -478,21 +668,27 @@ protected:
void reset_taskqueue_stats();
#endif // TASKQUEUE_STATS
// Do an incremental collection: identify a collection set, and evacuate
// its live objects elsewhere.
virtual void do_collection_pause();
// Schedule the VM operation that will do an evacuation pause to
// satisfy an allocation request of word_size. *succeeded will
// return whether the VM operation was successful (it did do an
// evacuation pause) or not (another thread beat us to it or the GC
// locker was active). Given that we should not be holding the
// Heap_lock when we enter this method, we will pass the
// gc_count_before (i.e., total_collections()) as a parameter since
// it has to be read while holding the Heap_lock. Currently, both
// methods that call do_collection_pause() release the Heap_lock
// before the call, so it's easy to read gc_count_before just before.
HeapWord* do_collection_pause(size_t word_size,
unsigned int gc_count_before,
bool* succeeded);
// The guts of the incremental collection pause, executed by the vm
// thread.
virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
// thread. It returns false if it is unable to do the collection due
// to the GC locker being active, true otherwise
bool do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set.
virtual void evacuate_collection_set();
// If this is an appropriate right time, do a collection pause.
// The "word_size" argument, if non-zero, indicates the size of an
// allocation request that is prompting this query.
void do_collection_pause_if_appropriate(size_t word_size);
void evacuate_collection_set();
// The g1 remembered set of the heap.
G1RemSet* _g1_rem_set;
......@@ -762,11 +958,6 @@ public:
#endif // PRODUCT
// These virtual functions do the actual allocation.
virtual HeapWord* mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
// Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.)
......@@ -1046,7 +1237,6 @@ public:
virtual bool supports_tlab_allocation() const;
virtual size_t tlab_capacity(Thread* thr) const;
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
virtual HeapWord* allocate_new_tlab(size_t word_size);
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
......@@ -1186,7 +1376,6 @@ public:
static G1CollectedHeap* heap();
void empty_young_list();
bool should_set_young_locked();
void set_region_short_lived_locked(HeapRegion* hr);
// add appropriate methods for any other surv rate groups
......@@ -1339,8 +1528,6 @@ public:
protected:
size_t _max_heap_capacity;
// debug_only(static void check_for_valid_allocation_state();)
public:
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
// MemoryService). In productization, we can make this assert false
......
......@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "utilities/taskqueue.hpp"
......@@ -58,37 +59,114 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return r != NULL && r->in_collection_set();
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
bool permit_collection_pause) {
HeapWord* res = NULL;
assert( SafepointSynchronize::is_at_safepoint() ||
Heap_lock->owned_by_self(), "pre-condition of the call" );
// All humongous allocation requests should go through the slow path in
// attempt_allocation_slow().
if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
// If this allocation causes a region to become non empty,
// then we need to update our free_regions count.
if (_cur_alloc_region->is_empty()) {
res = _cur_alloc_region->allocate(word_size);
if (res != NULL)
_free_regions--;
} else {
res = _cur_alloc_region->allocate(word_size);
}
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size) {
assert_heap_locked_and_not_at_safepoint();
assert(cur_alloc_region != NULL, "pre-condition of the method");
assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
"should not be used for humongous allocations");
assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
assert(!cur_alloc_region->is_empty(),
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
cur_alloc_region->bottom(), cur_alloc_region->end()));
// This allocate method does BOT updates and we don't need them in
// the young generation. This will be fixed in the near future by
// CR 6994297.
HeapWord* result = cur_alloc_region->allocate(word_size);
if (result != NULL) {
assert(is_in(result), "result should be in the heap");
Heap_lock->unlock();
// Do the dirtying after we release the Heap_lock.
dirty_young_block(result, word_size);
return result;
}
assert_heap_locked();
return NULL;
}
if (res != NULL) {
if (!SafepointSynchronize::is_at_safepoint()) {
assert( Heap_lock->owned_by_self(), "invariant" );
Heap_lock->unlock();
}
return res;
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size) {
assert_heap_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
"for humongous allocation requests");
HeapRegion* cur_alloc_region = _cur_alloc_region;
if (cur_alloc_region != NULL) {
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
word_size);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
assert_heap_locked();
// Since we couldn't successfully allocate into it, retire the
// current alloc region.
retire_cur_alloc_region(cur_alloc_region);
}
// attempt_allocation_slow will also unlock the heap lock when appropriate.
return attempt_allocation_slow(word_size, permit_collection_pause);
// Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
false, /* at safepoint */
true /* do_dirtying */);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
assert_heap_locked();
return NULL;
}
inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
assert_heap_locked_or_at_safepoint();
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
"pre-condition of the call");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
// The region is guaranteed to be young
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
_summary_bytes_used += cur_alloc_region->used();
_cur_alloc_region = NULL;
}
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert_heap_not_locked();
// Assign the containing region to containing_hr so that we don't
// have to keep calling heap_region_containing_raw() in the
// asserts below.
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
assert(containing_hr != NULL && start != NULL && word_size > 0,
"pre-condition");
assert(containing_hr->is_in(start), "it should contain start");
assert(containing_hr->is_young(), "it should be young");
assert(!containing_hr->isHumongous(), "it should not be humongous");
HeapWord* end = start + word_size;
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end);
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
}
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
......
......@@ -458,8 +458,8 @@ void G1CollectorPolicy::calculate_young_list_min_length() {
double now_sec = os::elapsedTime();
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
double alloc_rate_ms = predict_alloc_rate_ms();
int min_regions = (int) ceil(alloc_rate_ms * when_ms);
int current_region_num = (int) _g1->young_list()->length();
size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
size_t current_region_num = _g1->young_list()->length();
_young_list_min_length = min_regions + current_region_num;
}
}
......@@ -473,9 +473,12 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
_young_list_target_length = _young_list_fixed_length;
else
_young_list_target_length = _young_list_fixed_length / 2;
_young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
}
// Make sure we allow the application to allocate at least one
// region before we need to do a collection again.
size_t min_length = _g1->young_list()->length() + 1;
_young_list_target_length = MAX2(_young_list_target_length, min_length);
calculate_survivors_policy();
}
......@@ -568,7 +571,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
// we should have at least one region in the target young length
_young_list_target_length =
MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
final_young_length + _recorded_survivor_regions;
// let's keep an eye of how long we spend on this calculation
// right now, I assume that we'll print it when we need it; we
......@@ -617,8 +620,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
_young_list_min_length);
#endif // TRACE_CALC_YOUNG_LENGTH
// we'll do the pause as soon as possible by choosing the minimum
_young_list_target_length =
MAX2(_young_list_min_length, (size_t) 1);
_young_list_target_length = _young_list_min_length;
}
_rs_lengths_prediction = rs_lengths;
......@@ -801,7 +803,7 @@ void G1CollectorPolicy::record_full_collection_end() {
_survivor_surv_rate_group->reset();
calculate_young_list_min_length();
calculate_young_list_target_length();
}
}
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
_bytes_in_to_space_before_gc += bytes;
......@@ -824,9 +826,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
}
assert(_g1->used_regions() == _g1->recalculate_used_regions(),
"sanity");
assert(_g1->used() == _g1->recalculate_used(), "sanity");
assert(_g1->used() == _g1->recalculate_used(),
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
_g1->used(), _g1->recalculate_used()));
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
_all_stop_world_times_ms->add(s_w_t_ms);
......@@ -2266,24 +2268,13 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
#endif // PRODUCT
}
bool
G1CollectorPolicy::should_add_next_region_to_young_list() {
assert(in_young_gc_mode(), "should be in young GC mode");
bool ret;
size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_target_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
if (young_list_length < young_list_max_length) {
ret = true;
void
G1CollectorPolicy::update_region_num(bool young) {
if (young) {
++_region_num_young;
} else {
ret = false;
++_region_num_tenured;
}
return ret;
}
#ifndef PRODUCT
......@@ -2327,32 +2318,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
}
}
bool
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
word_size) {
assert(_g1->regions_accounted_for(), "Region leakage!");
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_target_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
bool reached_target_length = young_list_length >= young_list_max_length;
if (in_young_gc_mode()) {
if (reached_target_length) {
assert( young_list_length > 0 && _g1->young_list()->length() > 0,
"invariant" );
return true;
}
} else {
guarantee( false, "should not reach here" );
}
return false;
}
#ifndef PRODUCT
class HRSortIndexIsOKClosure: public HeapRegionClosure {
CollectionSetChooser* _chooser;
......
......@@ -993,11 +993,6 @@ public:
void record_before_bytes(size_t bytes);
void record_after_bytes(size_t bytes);
// Returns "true" if this is a good time to do a collection pause.
// The "word_size" argument, if non-zero, indicates the size of an
// allocation request that is prompting this query.
virtual bool should_do_collection_pause(size_t word_size) = 0;
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
......@@ -1116,7 +1111,16 @@ public:
// do that for any other surv rate groups
}
bool should_add_next_region_to_young_list();
bool is_young_list_full() {
size_t young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_target_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
return young_list_length >= young_list_max_length;
}
void update_region_num(bool young);
bool in_young_gc_mode() {
return _in_young_gc_mode;
......@@ -1270,7 +1274,6 @@ public:
_collectionSetChooser = new CollectionSetChooser();
}
void record_collection_pause_end();
bool should_do_collection_pause(size_t word_size);
// This is not needed any more, after the CSet choosing code was
// changed to use the pause prediction work. But let's leave the
// hook in just in case.
......
......@@ -27,13 +27,22 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp"
VM_G1CollectForAllocation::VM_G1CollectForAllocation(
unsigned int gc_count_before,
size_t word_size)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
guarantee(word_size > 0, "an allocation should always be requested");
}
void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_res = g1h->satisfy_failed_allocation(_size);
assert(g1h->is_in_or_null(_res), "result not in heap");
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded,
"if we get back a result, the pause should have succeeded");
}
void VM_G1CollectFull::doit() {
......@@ -43,6 +52,25 @@ void VM_G1CollectFull::doit() {
g1h->do_full_collection(false /* clear_all_soft_refs */);
}
VM_G1IncCollectionPause::VM_G1IncCollectionPause(
unsigned int gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause gc_cause)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms),
_full_collections_completed_before(0) {
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
target_pause_time_ms));
guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
"we can only request an allocation if the GC cause is for "
"an incremental GC pause");
_gc_cause = gc_cause;
}
void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
......@@ -51,6 +79,18 @@ void VM_G1IncCollectionPause::doit() {
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
"only a GC locker or a System.gc() induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
// pause then we will consider this pause successful.
_pause_succeeded = true;
return;
}
}
GCCauseSetter x(g1h, _gc_cause);
if (_should_initiate_conc_mark) {
// It's safer to read full_collections_completed() here, given
......@@ -63,7 +103,16 @@ void VM_G1IncCollectionPause::doit() {
// will do so if one is not already in progress.
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
}
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
_pause_succeeded =
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
// An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
true /* expect_null_cur_alloc_region */);
} else {
assert(_result == NULL, "invariant");
}
}
void VM_G1IncCollectionPause::doit_epilogue() {
......
......@@ -31,19 +31,33 @@
// VM_GC_Operation:
// - VM_CGC_Operation
// - VM_G1CollectFull
// - VM_G1CollectForAllocation
// - VM_G1IncCollectionPause
// - VM_G1PopRegionCollectionPause
// - VM_G1OperationWithAllocRequest
// - VM_G1CollectForAllocation
// - VM_G1IncCollectionPause
class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
protected:
size_t _word_size;
HeapWord* _result;
bool _pause_succeeded;
public:
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
size_t word_size)
: VM_GC_Operation(gc_count_before),
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
HeapWord* result() { return _result; }
bool pause_succeeded() { return _pause_succeeded; }
};
class VM_G1CollectFull: public VM_GC_Operation {
public:
public:
VM_G1CollectFull(unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before, full_gc_count_before) {
_gc_cause = cause;
}
~VM_G1CollectFull() {}
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
virtual void doit();
virtual const char* name() const {
......@@ -51,45 +65,28 @@ class VM_G1CollectFull: public VM_GC_Operation {
}
};
class VM_G1CollectForAllocation: public VM_GC_Operation {
private:
HeapWord* _res;
size_t _size; // size of object to be allocated
public:
VM_G1CollectForAllocation(size_t size, int gc_count_before)
: VM_GC_Operation(gc_count_before) {
_size = size;
_res = NULL;
}
~VM_G1CollectForAllocation() {}
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
public:
VM_G1CollectForAllocation(unsigned int gc_count_before,
size_t word_size);
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual void doit();
virtual const char* name() const {
return "garbage-first collection to satisfy allocation";
}
HeapWord* result() { return _res; }
};
class VM_G1IncCollectionPause: public VM_GC_Operation {
class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
private:
bool _should_initiate_conc_mark;
double _target_pause_time_ms;
bool _should_initiate_conc_mark;
double _target_pause_time_ms;
unsigned int _full_collections_completed_before;
public:
VM_G1IncCollectionPause(unsigned int gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before),
_full_collections_completed_before(0),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms) {
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
target_pause_time_ms));
_gc_cause = cause;
}
GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual void doit();
virtual void doit_epilogue();
......@@ -103,14 +100,9 @@ public:
class VM_CGC_Operation: public VM_Operation {
VoidClosure* _cl;
const char* _printGCMessage;
public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) :
_cl(cl),
_printGCMessage(printGCMsg)
{}
~VM_CGC_Operation() {}
public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
: _cl(cl), _printGCMessage(printGCMsg) { }
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
virtual void doit();
virtual bool doit_prologue();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册