提交 10b871c9 编写于 作者: T tonyp

7023069: G1: Introduce symmetric locking in the slow allocation path

7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads
7018286: G1: humongous allocation attempts should take the GC locker into account
Summary: First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future.
Reviewed-by: stefank, brutisso, johnc
上级 c8a9f198
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
HeapRegion* G1AllocRegion::_dummy_region = NULL;
void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
assert(_dummy_region == NULL, "should be set once");
assert(dummy_region != NULL, "pre-condition");
assert(dummy_region->free() == 0, "pre-condition");
// Make sure that any allocation attempt on this region will fail
// and will not trigger any asserts.
assert(allocate(dummy_region, 1, false) == NULL, "should fail");
assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
assert(allocate(dummy_region, 1, true) == NULL, "should fail");
assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
_g1h = g1h;
_dummy_region = dummy_region;
}
void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
bool bot_updates) {
assert(alloc_region != NULL && alloc_region != _dummy_region,
"pre-condition");
// Other threads might still be trying to allocate using a CAS out
// of the region we are trying to retire, as they can do so without
// holding the lock. So, we first have to make sure that noone else
// can allocate out of it by doing a maximal allocation. Even if our
// CAS attempt fails a few times, we'll succeed sooner or later
// given that failed CAS attempts mean that the region is getting
// closed to being full.
size_t free_word_size = alloc_region->free() / HeapWordSize;
// This is the minimum free chunk we can turn into a dummy
// object. If the free space falls below this, then noone can
// allocate in this region anyway (all allocation requests will be
// of a size larger than this) so we won't have to perform the dummy
// allocation.
size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
while (free_word_size >= min_word_size_to_fill) {
HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
if (dummy != NULL) {
// If the allocation was successful we should fill in the space.
CollectedHeap::fill_with_object(dummy, free_word_size);
alloc_region->set_pre_dummy_top(dummy);
break;
}
free_word_size = alloc_region->free() / HeapWordSize;
// It's also possible that someone else beats us to the
// allocation and they fill up the region. In that case, we can
// just get out of the loop.
}
assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
"post-condition");
}
void G1AllocRegion::retire(bool fill_up) {
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
trace("retiring");
HeapRegion* alloc_region = _alloc_region;
if (alloc_region != _dummy_region) {
// We never have to check whether the active region is empty or not,
// and potentially free it if it is, given that it's guaranteed that
// it will never be empty.
assert(!alloc_region->is_empty(),
ar_ext_msg(this, "the alloc region should never be empty"));
if (fill_up) {
fill_up_remaining_space(alloc_region, _bot_updates);
}
assert(alloc_region->used() >= _used_bytes_before,
ar_ext_msg(this, "invariant"));
size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
retire_region(alloc_region, allocated_bytes);
_used_bytes_before = 0;
_alloc_region = _dummy_region;
}
trace("retired");
}
HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
bool force) {
assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
trace("attempting region allocation");
HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
if (new_alloc_region != NULL) {
new_alloc_region->reset_pre_dummy_top();
// Need to do this before the allocation
_used_bytes_before = new_alloc_region->used();
HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
OrderAccess::storestore();
// Note that we first perform the allocation and then we store the
// region in _alloc_region. This is the reason why an active region
// can never be empty.
_alloc_region = new_alloc_region;
trace("region allocation successful");
return result;
} else {
trace("region allocation failed");
return NULL;
}
ShouldNotReachHere();
}
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
_name, message, BOOL_TO_STR(_bot_updates),
_alloc_region, _used_bytes_before);
}
void G1AllocRegion::init() {
trace("initializing");
assert(_alloc_region == NULL && _used_bytes_before == 0,
ar_ext_msg(this, "pre-condition"));
assert(_dummy_region != NULL, "should have been set");
_alloc_region = _dummy_region;
trace("initialized");
}
HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
retire(false /* fill_up */);
assert(_alloc_region == _dummy_region, "post-condition of retire()");
_alloc_region = NULL;
trace("released");
return (alloc_region == _dummy_region) ? NULL : alloc_region;
}
#if G1_ALLOC_REGION_TRACING
void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
// All the calls to trace that set either just the size or the size
// and the result are considered part of level 2 tracing and are
// skipped during level 1 tracing.
if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
const size_t buffer_length = 128;
char hr_buffer[buffer_length];
char rest_buffer[buffer_length];
HeapRegion* alloc_region = _alloc_region;
if (alloc_region == NULL) {
jio_snprintf(hr_buffer, buffer_length, "NULL");
} else if (alloc_region == _dummy_region) {
jio_snprintf(hr_buffer, buffer_length, "DUMMY");
} else {
jio_snprintf(hr_buffer, buffer_length,
HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
}
if (G1_ALLOC_REGION_TRACING > 1) {
if (result != NULL) {
jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
word_size, result);
} else if (word_size != 0) {
jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
} else {
jio_snprintf(rest_buffer, buffer_length, "");
}
} else {
jio_snprintf(rest_buffer, buffer_length, "");
}
tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
}
}
#endif // G1_ALLOC_REGION_TRACING
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
_alloc_region(NULL), _used_bytes_before(0) { }
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
#include "gc_implementation/g1/heapRegion.hpp"
class G1CollectedHeap;
// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
#define G1_ALLOC_REGION_TRACING 0
class ar_ext_msg;
// A class that holds a region that is active in satisfying allocation
// requests, potentially issued in parallel. When the active region is
// full it will be retired it replaced with a new one. The
// implementation assumes that fast-path allocations will be lock-free
// and a lock will need to be taken when the active region needs to be
// replaced.
class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
friend class ar_ext_msg;
private:
// The active allocating region we are currently allocating out
// of. The invariant is that if this object is initialized (i.e.,
// init() has been called and release() has not) then _alloc_region
// is either an active allocating region or the dummy region (i.e.,
// it can never be NULL) and this object can be used to satisfy
// allocation requests. If this object is not initialized
// (i.e. init() has not been called or release() has been called)
// then _alloc_region is NULL and this object should not be used to
// satisfy allocation requests (it was done this way to force the
// correct use of init() and release()).
HeapRegion* _alloc_region;
// When we set up a new active region we save its used bytes in this
// field so that, when we retire it, we can calculate how much space
// we allocated in it.
size_t _used_bytes_before;
// Specifies whether the allocate calls will do BOT updates or not.
bool _bot_updates;
// Useful for debugging and tracing.
const char* _name;
// A dummy region (i.e., it's been allocated specially for this
// purpose and it is not part of the heap) that is full (i.e., top()
// == end()). When we don't have a valid active region we make
// _alloc_region point to this. This allows us to skip checking
// whether the _alloc_region is NULL or not.
static HeapRegion* _dummy_region;
// Some of the methods below take a bot_updates parameter. Its value
// should be the same as the _bot_updates field. The idea is that
// the parameter will be a constant for a particular alloc region
// and, given that these methods will be hopefully inlined, the
// compiler should compile out the test.
// Perform a non-MT-safe allocation out of the given region.
static inline HeapWord* allocate(HeapRegion* alloc_region,
size_t word_size,
bool bot_updates);
// Perform a MT-safe allocation out of the given region.
static inline HeapWord* par_allocate(HeapRegion* alloc_region,
size_t word_size,
bool bot_updates);
// Ensure that the region passed as a parameter has been filled up
// so that noone else can allocate out of it any more.
static void fill_up_remaining_space(HeapRegion* alloc_region,
bool bot_updates);
// Retire the active allocating region. If fill_up is true then make
// sure that the region is full before we retire it so that noone
// else can allocate out of it.
void retire(bool fill_up);
// Allocate a new active region and use it to perform a word_size
// allocation. The force parameter will be passed on to
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
// to allocate a new region even if the max has been reached.
HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
protected:
// For convenience as subclasses use it.
static G1CollectedHeap* _g1h;
virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
virtual void retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) = 0;
G1AllocRegion(const char* name, bool bot_updates);
public:
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
HeapRegion* get() const {
// Make sure that the dummy region does not escape this class.
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
}
// The following two are the building blocks for the allocation method.
// First-level allocation: Should be called without holding a
// lock. It will try to allocate lock-free out of the active region,
// or return NULL if it was unable to.
inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates);
// Second-level allocation: Should be called while holding a
// lock. It will try to first allocate lock-free out of the active
// region or, if it's unable to, it will try to replace the active
// alloc region with a new one. We require that the caller takes the
// appropriate lock before calling this so that it is easier to make
// it conform to its locking protocol.
inline HeapWord* attempt_allocation_locked(size_t word_size,
bool bot_updates);
// Should be called to allocate a new region even if the max of this
// type of regions has been reached. Should only be called if other
// allocation attempts have failed and we are not holding a valid
// active region.
inline HeapWord* attempt_allocation_force(size_t word_size,
bool bot_updates);
// Should be called before we start using this object.
void init();
// Should be called when we want to release the active region which
// is returned after it's been retired.
HeapRegion* release();
#if G1_ALLOC_REGION_TRACING
void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
#else // G1_ALLOC_REGION_TRACING
void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { }
#endif // G1_ALLOC_REGION_TRACING
};
class ar_ext_msg : public err_msg {
public:
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") {
alloc_region->fill_in_ext_msg(this, message);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
#include "gc_implementation/g1/g1AllocRegion.hpp"
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
size_t word_size,
bool bot_updates) {
assert(alloc_region != NULL, err_msg("pre-condition"));
if (!bot_updates) {
return alloc_region->allocate_no_bot_updates(word_size);
} else {
return alloc_region->allocate(word_size);
}
}
inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
size_t word_size,
bool bot_updates) {
assert(alloc_region != NULL, err_msg("pre-condition"));
assert(!alloc_region->is_empty(), err_msg("pre-condition"));
if (!bot_updates) {
return alloc_region->par_allocate_no_bot_updates(word_size);
} else {
return alloc_region->par_allocate(word_size);
}
}
inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
bool bot_updates) {
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
HeapRegion* alloc_region = _alloc_region;
assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
HeapWord* result = par_allocate(alloc_region, word_size, bot_updates);
if (result != NULL) {
trace("alloc", word_size, result);
return result;
}
trace("alloc failed", word_size);
return NULL;
}
inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
bool bot_updates) {
// First we have to tedo the allocation, assuming we're holding the
// appropriate lock, in case another thread changed the region while
// we were waiting to get the lock.
HeapWord* result = attempt_allocation(word_size, bot_updates);
if (result != NULL) {
return result;
}
retire(true /* fill_up */);
result = new_alloc_region_and_allocate(word_size, false /* force */);
if (result != NULL) {
trace("alloc locked (second attempt)", word_size, result);
return result;
}
trace("alloc locked failed", word_size);
return NULL;
}
inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
bool bot_updates) {
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
trace("forcing alloc");
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
if (result != NULL) {
trace("alloc forced", word_size, result);
return result;
}
trace("alloc forced failed", word_size);
return NULL;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
......@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
......@@ -128,6 +129,15 @@ public:
void print();
};
class MutatorAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
MutatorAllocRegion()
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
friend class VM_G1CollectForAllocation;
......@@ -135,6 +145,7 @@ class G1CollectedHeap : public SharedHeap {
friend class VM_G1CollectFull;
friend class VM_G1IncCollectionPause;
friend class VMStructs;
friend class MutatorAllocRegion;
// Closures used in implementation.
friend class G1ParCopyHelper;
......@@ -197,12 +208,15 @@ private:
// The sequence of all heap regions in the heap.
HeapRegionSeq* _hrs;
// The region from which normal-sized objects are currently being
// allocated. May be NULL.
HeapRegion* _cur_alloc_region;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
// It releases the mutator alloc region.
void release_mutator_alloc_region();
// Postcondition: cur_alloc_region == NULL.
void abandon_cur_alloc_region();
void abandon_gc_alloc_regions();
// The to-space memory regions into which objects are being copied during
......@@ -360,27 +374,21 @@ protected:
G1CollectorPolicy* _g1_policy;
// This is the second level of trying to allocate a new region. If
// new_region_work didn't find a region in the free_list, this call
// will check whether there's anything available in the
// secondary_free_list and/or wait for more regions to appear in that
// list, if _free_regions_coming is set.
// new_region() didn't find a region on the free_list, this call will
// check whether there's anything available on the
// secondary_free_list and/or wait for more regions to appear on
// that list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list();
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request.
HeapRegion* new_region_work(size_t word_size, bool do_expand);
HeapRegion* new_region(size_t word_size, bool do_expand);
// Try to allocate a new region to be used for allocation by a
// mutator thread. Attempt to expand the heap if no region is
// Try to allocate a new region to be used for allocation by
// a GC thread. It will try to expand the heap if no region is
// available.
HeapRegion* new_alloc_region(size_t word_size) {
return new_region_work(word_size, false /* do_expand */);
}
// Try to allocate a new region to be used for allocation by a GC
// thread. Attempt to expand the heap if no region is available.
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
// Attempt to satisfy a humongous allocation request of the given
......@@ -415,10 +423,6 @@ protected:
// * All non-TLAB allocation requests should go to mem_allocate()
// and mem_allocate() should never be called with is_tlab == true.
//
// * If the GC locker is active we currently stall until we can
// allocate a new young region. This will be changed in the
// near future (see CR 6994056).
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
// this fails, they will attempt to do an evacuation pause and
......@@ -441,122 +445,38 @@ protected:
bool is_tlab, /* expected to be false */
bool* gc_overhead_limit_was_exceeded);
// The following methods, allocate_from_cur_allocation_region(),
// attempt_allocation(), attempt_allocation_locked(),
// replace_cur_alloc_region_and_allocate(),
// attempt_allocation_slow(), and attempt_allocation_humongous()
// have very awkward pre- and post-conditions with respect to
// locking:
//
// If they are called outside a safepoint they assume the caller
// holds the Heap_lock when it calls them. However, on exit they
// will release the Heap_lock if they return a non-NULL result, but
// keep holding the Heap_lock if they return a NULL result. The
// reason for this is that we need to dirty the cards that span
// allocated blocks on young regions to avoid having to take the
// slow path of the write barrier (for performance reasons we don't
// update RSets for references whose source is a young region, so we
// don't need to look at dirty cards on young regions). But, doing
// this card dirtying while holding the Heap_lock can be a
// scalability bottleneck, especially given that some allocation
// requests might be of non-trivial size (and the larger the region
// size is, the fewer allocations requests will be considered
// humongous, as the humongous size limit is a fraction of the
// region size). So, when one of these calls succeeds in allocating
// a block it does the card dirtying after it releases the Heap_lock
// which is why it will return without holding it.
//
// The above assymetry is the reason why locking / unlocking is done
// explicitly (i.e., with Heap_lock->lock() and
// Heap_lock->unlocked()) instead of using MutexLocker and
// MutexUnlocker objects. The latter would ensure that the lock is
// unlocked / re-locked at every possible exit out of the basic
// block. However, we only want that action to happen in selected
// places.
//
// Further, if the above methods are called during a safepoint, then
// naturally there's no assumption about the Heap_lock being held or
// there's no attempt to unlock it. The parameter at_safepoint
// indicates whether the call is made during a safepoint or not (as
// an optimization, to avoid reading the global flag with
// SafepointSynchronize::is_at_safepoint()).
//
// The methods share these parameters:
//
// * word_size : the size of the allocation request in words
// * at_safepoint : whether the call is done at a safepoint; this
// also determines whether a GC is permitted
// (at_safepoint == false) or not (at_safepoint == true)
// * do_dirtying : whether the method should dirty the allocated
// block before returning
//
// They all return either the address of the block, if they
// successfully manage to allocate it, or NULL.
// It tries to satisfy an allocation request out of the current
// alloc region, which is passed as a parameter. It assumes that the
// caller has checked that the current alloc region is not NULL.
// Given that the caller has to check the current alloc region for
// at least NULL, it might as well pass it as the first parameter so
// that the method doesn't have to read it from the
// _cur_alloc_region field again. It is called from both
// attempt_allocation() and attempt_allocation_locked() and the
// with_heap_lock parameter indicates whether the caller was holding
// the heap lock when it called it or not.
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size,
bool with_heap_lock);
// First-level of allocation slow path: it attempts to allocate out
// of the current alloc region in a lock-free manner using a CAS. If
// that fails it takes the Heap_lock and calls
// attempt_allocation_locked() for the second-level slow path.
inline HeapWord* attempt_allocation(size_t word_size);
// Second-level of allocation slow path: while holding the Heap_lock
// it tries to allocate out of the current alloc region and, if that
// fails, tries to allocate out of a new current alloc region.
inline HeapWord* attempt_allocation_locked(size_t word_size);
// It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs the
// allocation out of the new current alloc region and updates
// _cur_alloc_region. Normally, it would try to allocate a new
// region if the young gen is not full, unless can_expand is true in
// which case it would always try to allocate a new region.
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint,
bool do_dirtying,
bool can_expand);
// Third-level of allocation slow path: when we are unable to
// allocate a new current alloc region to satisfy an allocation
// request (i.e., when attempt_allocation_locked() fails). It will
// try to do an evacuation pause, which might stall due to the GC
// locker, and retry the allocation attempt when appropriate.
HeapWord* attempt_allocation_slow(size_t word_size);
// The method that tries to satisfy a humongous allocation
// request. If it cannot satisfy it it will try to do an evacuation
// pause to perhaps reclaim enough space to be able to satisfy the
// allocation request afterwards.
// The following three methods take a gc_count_before_ret
// parameter which is used to return the GC count if the method
// returns NULL. Given that we are required to read the GC count
// while holding the Heap_lock, and these paths will take the
// Heap_lock at some point, it's easier to get them to read the GC
// count while holding the Heap_lock before they return NULL instead
// of the caller (namely: mem_allocate()) having to also take the
// Heap_lock just to read the GC count.
// First-level mutator allocation attempt: try to allocate out of
// the mutator alloc region without taking the Heap_lock. This
// should only be used for non-humongous allocations.
inline HeapWord* attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret);
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
unsigned int* gc_count_before_ret);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
HeapWord* attempt_allocation_humongous(size_t word_size,
bool at_safepoint);
unsigned int* gc_count_before_ret);
// It does the common work when we are retiring the current alloc region.
inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
// It retires the current alloc region, which is passed as a
// parameter (since, typically, the caller is already holding on to
// it). It sets _cur_alloc_region to NULL.
void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
// It attempts to do an allocation immediately before or after an
// evacuation pause and can only be called by the VM thread. It has
// slightly different assumptions that the ones before (i.e.,
// assumes that the current alloc region has been retired).
// Allocation attempt that should be called during safepoints (e.g.,
// at the end of a successful GC). expect_null_mutator_alloc_region
// specifies whether the mutator alloc region is expected to be NULL
// or not.
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_cur_alloc_region);
bool expect_null_mutator_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
......@@ -583,6 +503,12 @@ protected:
// GC pause.
void retire_alloc_region(HeapRegion* alloc_region, bool par);
// These two methods are the "callbacks" from the G1AllocRegion class.
HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
void retire_mutator_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references should be
......@@ -1027,6 +953,9 @@ public:
// The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; }
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
......
......@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "utilities/taskqueue.hpp"
......@@ -59,131 +60,23 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return r != NULL && r->in_collection_set();
}
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size,
bool with_heap_lock) {
assert_not_at_safepoint();
assert(with_heap_lock == Heap_lock->owned_by_self(),
"with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
assert(cur_alloc_region != NULL, "pre-condition of the method");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
"should not be used for humongous allocations");
assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
assert(!cur_alloc_region->is_empty(),
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
cur_alloc_region->bottom(), cur_alloc_region->end()));
HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
if (result != NULL) {
assert(is_in(result), "result should be in the heap");
if (with_heap_lock) {
Heap_lock->unlock();
}
assert_heap_not_locked();
// Do the dirtying after we release the Heap_lock.
dirty_young_block(result, word_size);
return result;
}
if (with_heap_lock) {
assert_heap_locked();
} else {
assert_heap_not_locked();
}
return NULL;
}
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size) {
G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
"for humongous allocation requests");
HeapRegion* cur_alloc_region = _cur_alloc_region;
if (cur_alloc_region != NULL) {
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
word_size,
false /* with_heap_lock */);
assert_heap_not_locked();
if (result != NULL) {
return result;
}
}
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
// Our attempt to allocate lock-free failed as the current
// allocation region is either NULL or full. So, we'll now take the
// Heap_lock and retry.
Heap_lock->lock();
HeapWord* result = attempt_allocation_locked(word_size);
if (result != NULL) {
assert_heap_not_locked();
return result;
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size, gc_count_before_ret);
}
assert_heap_locked();
return NULL;
}
inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
"pre-condition of the call");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
// The region is guaranteed to be young
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
_summary_bytes_used += cur_alloc_region->used();
_cur_alloc_region = NULL;
}
inline HeapWord*
G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
assert_heap_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation_locked() "
"should not be called for humongous allocation requests");
// First, reread the current alloc region and retry the allocation
// in case somebody replaced it while we were waiting to get the
// Heap_lock.
HeapRegion* cur_alloc_region = _cur_alloc_region;
if (cur_alloc_region != NULL) {
HeapWord* result = allocate_from_cur_alloc_region(
cur_alloc_region, word_size,
true /* with_heap_lock */);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
// We failed to allocate out of the current alloc region, so let's
// retire it before getting a new one.
retire_cur_alloc_region(cur_alloc_region);
}
assert_heap_locked();
// Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
false, /* at_safepoint */
true, /* do_dirtying */
false /* can_expand */);
if (result != NULL) {
assert_heap_not_locked();
return result;
dirty_young_block(result, word_size);
}
assert_heap_locked();
return NULL;
return result;
}
// It dirties the cards that cover the block so that so that the post
......
......@@ -360,6 +360,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
set_young_type(NotYoung);
reset_pre_dummy_top();
if (!par) {
// If this is parallel, this will be done later.
......@@ -923,11 +924,11 @@ void G1OffsetTableContigSpace::set_saved_mark() {
ContiguousSpace::set_saved_mark();
OrderAccess::storestore();
_gc_time_stamp = curr_gc_time_stamp;
// The following fence is to force a flush of the writes above, but
// is strictly not needed because when an allocating worker thread
// calls set_saved_mark() it does so under the ParGCRareEvent_lock;
// when the lock is released, the write will be flushed.
// OrderAccess::fence();
// No need to do another barrier to flush the writes above. If
// this is called in parallel with other threads trying to
// allocate into the region, the caller should call this while
// holding a lock and when the lock is released the writes will be
// flushed.
}
}
......
......@@ -149,6 +149,13 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
G1BlockOffsetArrayContigSpace _offsets;
Mutex _par_alloc_lock;
volatile unsigned _gc_time_stamp;
// When we need to retire an allocation region, while other threads
// are also concurrently trying to allocate into it, we typically
// allocate a dummy object at the end of the region to ensure that
// no more allocations can take place in it. However, sometimes we
// want to know where the end of the last "real" object we allocated
// into the region was and this is what this keeps track.
HeapWord* _pre_dummy_top;
public:
// Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
......@@ -163,6 +170,17 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
virtual void set_saved_mark();
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
// See the comment above in the declaration of _pre_dummy_top for an
// explanation of what it is.
void set_pre_dummy_top(HeapWord* pre_dummy_top) {
assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
_pre_dummy_top = pre_dummy_top;
}
HeapWord* pre_dummy_top() {
return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
}
void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
virtual void clear(bool mangle_space);
......
......@@ -38,15 +38,8 @@ inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
// this is used for larger LAB allocations only.
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
MutexLocker x(&_par_alloc_lock);
// This ought to be just "allocate", because of the lock above, but that
// ContiguousSpace::allocate asserts that either the allocating thread
// holds the heap lock or it is the VM thread and we're at a safepoint.
// The best I (dld) could figure was to put a field in ContiguousSpace
// meaning "locking at safepoint taken care of", and set/reset that
// here. But this will do for now, especially in light of the comment
// above. Perhaps in the future some lock-free manner of keeping the
// coordination.
HeapWord* res = ContiguousSpace::par_allocate(size);
// Given that we take the lock no need to use par_allocate() here.
HeapWord* res = ContiguousSpace::allocate(size);
if (res != NULL) {
_offsets.alloc_block(res, size);
}
......
......@@ -382,6 +382,11 @@ public:
return (addr_for(pcard) == p);
}
HeapWord* align_to_card_boundary(HeapWord* p) {
jbyte* pcard = byte_for(p + card_size_in_words - 1);
return addr_for(pcard);
}
// The kinds of precision a CardTableModRefBS may offer.
enum PrecisionStyle {
Precise,
......
......@@ -818,9 +818,14 @@ size_t ContiguousSpace::block_size(const HeapWord* p) const {
// This version requires locking.
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
HeapWord* const end_value) {
// In G1 there are places where a GC worker can allocates into a
// region using this serial allocation code without being prone to a
// race with other GC workers (we ensure that no other GC worker can
// access the same region at the same time). So the assert below is
// too strong in the case of G1.
assert(Heap_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() &&
Thread::current()->is_VM_thread()),
(Thread::current()->is_VM_thread() || UseG1GC)),
"not locked");
HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册