提交 1362ac76 编写于 作者: T tonyp

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally

Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr
上级 54d5feb3
......@@ -129,6 +129,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
// region in _alloc_region. This is the reason why an active region
// can never be empty.
_alloc_region = new_alloc_region;
_count += 1;
trace("region allocation successful");
return result;
} else {
......@@ -139,8 +140,8 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
}
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
_name, message, BOOL_TO_STR(_bot_updates),
msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
_name, message, _count, BOOL_TO_STR(_bot_updates),
_alloc_region, _used_bytes_before);
}
......@@ -148,16 +149,34 @@ void G1AllocRegion::init() {
trace("initializing");
assert(_alloc_region == NULL && _used_bytes_before == 0,
ar_ext_msg(this, "pre-condition"));
assert(_dummy_region != NULL, "should have been set");
assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
_alloc_region = _dummy_region;
_count = 0;
trace("initialized");
}
void G1AllocRegion::set(HeapRegion* alloc_region) {
trace("setting");
// We explicitly check that the region is not empty to make sure we
// maintain the "the alloc region cannot be empty" invariant.
assert(alloc_region != NULL && !alloc_region->is_empty(),
ar_ext_msg(this, "pre-condition"));
assert(_alloc_region == _dummy_region &&
_used_bytes_before == 0 && _count == 0,
ar_ext_msg(this, "pre-condition"));
_used_bytes_before = alloc_region->used();
_alloc_region = alloc_region;
_count += 1;
trace("set");
}
HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
retire(false /* fill_up */);
assert(_alloc_region == _dummy_region, "post-condition of retire()");
assert(_alloc_region == _dummy_region,
ar_ext_msg(this, "post-condition of retire()"));
_alloc_region = NULL;
trace("released");
return (alloc_region == _dummy_region) ? NULL : alloc_region;
......@@ -196,7 +215,8 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
jio_snprintf(rest_buffer, buffer_length, "");
}
tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
_name, _count, hr_buffer, str, rest_buffer);
}
}
#endif // G1_ALLOC_REGION_TRACING
......@@ -204,5 +224,5 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
_alloc_region(NULL), _used_bytes_before(0) { }
_alloc_region(NULL), _count(0), _used_bytes_before(0) { }
......@@ -36,7 +36,7 @@ class ar_ext_msg;
// A class that holds a region that is active in satisfying allocation
// requests, potentially issued in parallel. When the active region is
// full it will be retired it replaced with a new one. The
// full it will be retired and replaced with a new one. The
// implementation assumes that fast-path allocations will be lock-free
// and a lock will need to be taken when the active region needs to be
// replaced.
......@@ -57,13 +57,22 @@ private:
// correct use of init() and release()).
HeapRegion* _alloc_region;
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
// between a call to init() and a call to release(). The count
// mostly includes regions that are freshly allocated, as well as
// the region that is re-used using the set() method. This count can
// be used in any heuristics that might want to bound how many
// distinct regions this object can used during an active interval.
size_t _count;
// When we set up a new active region we save its used bytes in this
// field so that, when we retire it, we can calculate how much space
// we allocated in it.
size_t _used_bytes_before;
// Specifies whether the allocate calls will do BOT updates or not.
bool _bot_updates;
// When true, indicates that allocate calls should do BOT updates.
const bool _bot_updates;
// Useful for debugging and tracing.
const char* _name;
......@@ -127,6 +136,8 @@ public:
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
}
size_t count() { return _count; }
// The following two are the building blocks for the allocation method.
// First-level allocation: Should be called without holding a
......@@ -153,6 +164,12 @@ public:
// Should be called before we start using this object.
void init();
// This can be used to set the active region to a specific
// region. (Use Example: we try to retain the last old GC alloc
// region that we've used during a GC and we can use set() to
// re-instate it at the beginning of the next GC.)
void set(HeapRegion* alloc_region);
// Should be called when we want to release the active region which
// is returned after it's been retired.
HeapRegion* release();
......
......@@ -155,6 +155,24 @@ public:
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
SurvivorGCAllocRegion()
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};
class OldGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
};
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
friend class VM_G1CollectForAllocation;
......@@ -163,6 +181,8 @@ class G1CollectedHeap : public SharedHeap {
friend class VM_G1IncCollectionPause;
friend class VMStructs;
friend class MutatorAllocRegion;
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
// Closures used in implementation.
friend class G1ParCopyHelper;
......@@ -225,30 +245,33 @@ private:
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
// The last old region we allocated to during the last GC.
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
// It releases the mutator alloc region.
void release_mutator_alloc_region();
void abandon_gc_alloc_regions();
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions();
// The to-space memory regions into which objects are being copied during
// a GC.
HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
size_t _gc_alloc_region_counts[GCAllocPurposeCount];
// These are the regions, one per GCAllocPurpose, that are half-full
// at the end of a collection and that we want to reuse during the
// next collection.
HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
// This specifies whether we will keep the last half-full region at
// the end of a collection so that it can be reused during the next
// collection (this is specified per GCAllocPurpose)
bool _retain_gc_alloc_region[GCAllocPurposeCount];
// A list of the regions that have been set to be alloc regions in the
// current collection.
HeapRegion* _gc_alloc_region_list;
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions();
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.
void abandon_gc_alloc_regions();
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
......@@ -256,20 +279,6 @@ private:
// Determines PLAB size for a particular allocation purpose.
static size_t desired_plab_sz(GCAllocPurpose purpose);
// When called by par thread, requires the FreeList_lock to be held.
void push_gc_alloc_region(HeapRegion* hr);
// This should only be called single-threaded. Undeclares all GC alloc
// regions.
void forget_alloc_region_list();
// Should be used to set an alloc region, because there's other
// associated bookkeeping.
void set_gc_alloc_region(int purpose, HeapRegion* r);
// Check well-formedness of alloc region list.
bool check_gc_alloc_regions();
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
......@@ -387,12 +396,6 @@ private:
protected:
// Returns "true" iff none of the gc alloc regions have any allocations
// since the last call to "save_marks".
bool all_alloc_regions_no_allocs_since_save_marks();
// Perform finalization stuff on all allocation regions.
void retire_all_alloc_regions();
// The young region list.
YoungList* _young_list;
......@@ -412,11 +415,6 @@ protected:
// request.
HeapRegion* new_region(size_t word_size, bool do_expand);
// Try to allocate a new region to be used for allocation by
// a GC thread. It will try to expand the heap if no region is
// available.
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
......@@ -524,16 +522,25 @@ protected:
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
// Retires an allocation region when it is full or at the end of a
// GC pause.
void retire_alloc_region(HeapRegion* alloc_region, bool par);
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size);
// These two methods are the "callbacks" from the G1AllocRegion class.
// These methods are the "callbacks" from the G1AllocRegion class.
// For mutator alloc regions.
HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
void retire_mutator_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes);
// For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
GCAllocPurpose ap);
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, GCAllocPurpose ap);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references should be
......@@ -727,9 +734,6 @@ protected:
void g1_process_weak_roots(OopClosure* root_closure,
OopClosure* non_root_closure);
// Invoke "save_marks" on all heap regions.
void save_marks();
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
......@@ -821,24 +825,6 @@ protected:
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
// Ensure that the relevant gc_alloc regions are set.
void get_gc_alloc_regions();
// We're done with GC alloc regions. We are going to tear down the
// gc alloc list and remove the gc alloc tag from all the regions on
// that list. However, we will also retain the last (i.e., the one
// that is half-full) GC alloc region, per GCAllocPurpose, for
// possible reuse during the next collection, provided
// _retain_gc_alloc_region[] indicates that it should be the
// case. Said regions are kept in the _retained_gc_alloc_regions[]
// array. If the parameter totally is set, we will not retain any
// regions, irrespective of what _retain_gc_alloc_region[]
// indicates.
void release_gc_alloc_regions(bool totally);
#ifndef PRODUCT
// Useful for debugging.
void print_gc_alloc_regions();
#endif // !PRODUCT
// Instance of the concurrent mark is_alive closure for embedding
// into the reference processor as the is_alive_non_header. This
// prevents unnecessary additions to the discovered lists during
......@@ -947,9 +933,6 @@ public:
// result might be a bit inaccurate.
size_t used_unlocked() const;
size_t recalculate_used() const;
#ifndef PRODUCT
size_t recalculate_used_regions() const;
#endif // PRODUCT
// These virtual functions do the actual allocation.
// Some heaps may offer a contiguous region for shared non-blocking
......@@ -1109,9 +1092,6 @@ public:
virtual bool is_in_closed_subset(const void* p) const;
// Dirty card table entries covering a list of young regions.
void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
// This resets the card table to all zeros. It is used after
// a collection pause which used the card table to claim cards.
void cleanUpCardTable();
......
......@@ -77,6 +77,38 @@ G1CollectedHeap::attempt_allocation(size_t word_size,
return result;
}
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
word_size) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
......
......@@ -859,14 +859,6 @@ void G1CollectorPolicy::record_full_collection_end() {
calculate_young_list_target_length();
}
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
_bytes_in_to_space_before_gc += bytes;
}
void G1CollectorPolicy::record_after_bytes(size_t bytes) {
_bytes_in_to_space_after_gc += bytes;
}
void G1CollectorPolicy::record_stop_world_start() {
_stop_world_start = os::elapsedTime();
}
......@@ -894,9 +886,8 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_pending_cards = _g1->pending_card_num();
_max_pending_cards = _g1->max_pending_card_num();
_bytes_in_to_space_before_gc = 0;
_bytes_in_to_space_after_gc = 0;
_bytes_in_collection_set_before_gc = 0;
_bytes_copied_during_gc = 0;
YoungList* young_list = _g1->young_list();
_eden_bytes_before_gc = young_list->eden_used_bytes();
......@@ -1578,7 +1569,7 @@ void G1CollectorPolicy::record_collection_pause_end() {
double survival_ratio = 0.0;
if (_bytes_in_collection_set_before_gc > 0) {
survival_ratio = (double) bytes_in_to_space_during_gc() /
survival_ratio = (double) _bytes_copied_during_gc /
(double) _bytes_in_collection_set_before_gc;
}
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -585,13 +585,9 @@ protected:
int _last_update_rs_processed_buffers;
double _last_pause_time_ms;
size_t _bytes_in_to_space_before_gc;
size_t _bytes_in_to_space_after_gc;
size_t bytes_in_to_space_during_gc() {
return
_bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
}
size_t _bytes_in_collection_set_before_gc;
size_t _bytes_copied_during_gc;
// Used to count used bytes in CS.
friend class CountCSClosure;
......@@ -805,10 +801,6 @@ public:
return _bytes_in_collection_set_before_gc;
}
size_t bytes_in_to_space() {
return bytes_in_to_space_during_gc();
}
unsigned calc_gc_alloc_time_stamp() {
return _all_pause_times_ms->num() + 1;
}
......@@ -977,9 +969,16 @@ public:
}
#endif
// Record the fact that "bytes" bytes allocated in a region.
void record_before_bytes(size_t bytes);
void record_after_bytes(size_t bytes);
// Record how much space we copied during a GC. This is typically
// called when a GC alloc region is being retired.
void record_bytes_copied_during_gc(size_t bytes) {
_bytes_copied_during_gc += bytes;
}
// The amount of space we copied during a GC.
size_t bytes_copied_during_gc() {
return _bytes_copied_during_gc;
}
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
......@@ -1193,10 +1192,6 @@ public:
return purpose == GCAllocForSurvived;
}
inline GCAllocPurpose alternative_purpose(int purpose) {
return GCAllocForTenured;
}
static const size_t REGIONS_UNLIMITED = ~(size_t)0;
size_t max_regions(int purpose);
......
......@@ -352,7 +352,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
"we should have already filtered out humongous regions");
_in_collection_set = false;
_is_gc_alloc_region = false;
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
......@@ -486,7 +485,7 @@ HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _is_gc_alloc_region(false),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
......@@ -716,8 +715,6 @@ void HeapRegion::print_on(outputStream* st) const {
}
if (in_collection_set())
st->print(" CS");
else if (is_gc_alloc_region())
st->print(" A ");
else
st->print(" ");
if (is_young())
......
......@@ -251,10 +251,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// True iff the region is in current collection_set.
bool _in_collection_set;
// Is this or has it been an allocation region in the current collection
// pause.
bool _is_gc_alloc_region;
// True iff an attempt to evacuate an object in the region failed.
bool _evacuation_failed;
......@@ -497,27 +493,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r;
}
// True iff it is or has been an allocation region in the current
// collection pause.
bool is_gc_alloc_region() const {
return _is_gc_alloc_region;
}
void set_is_gc_alloc_region(bool b) {
_is_gc_alloc_region = b;
}
HeapRegion* next_gc_alloc_region() {
assert(is_gc_alloc_region(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->is_gc_alloc_region(),
"Malformed CS.");
return _next_in_special_set;
}
void set_next_gc_alloc_region(HeapRegion* r) {
assert(is_gc_alloc_region(), "should only invoke on member of CS.");
assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
_next_in_special_set = r;
}
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next field used to link regions into
......
......@@ -364,7 +364,10 @@ public:
PosParPRT** next_addr() { return &_next; }
bool should_expand(int tid) {
return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
// Given that we now defer RSet updates for after a GC we don't
// really need to expand the tables any more. This code should be
// cleaned up in the future (see CR 6921087).
return false;
}
void par_expand() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册