提交 ece07404 编写于 作者: S sjohanss

8057536: Refactor G1 to allow context specific allocations

Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context.
Reviewed-by: mgerdin, brutisso
上级 89037d56
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
public class G1Allocator extends VMObject {
//size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1Allocator");
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
}
public long getSummaryBytes() {
return summaryBytesUsedField.getValue(addr);
}
public G1Allocator(Address addr) {
super(addr);
}
}
......@@ -36,7 +36,6 @@ import sun.jvm.hotspot.memory.SpaceClosure;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
......@@ -47,8 +46,8 @@ public class G1CollectedHeap extends SharedHeap {
static private long hrmFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1Allocator* _allocator
static private AddressField g1Allocator;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// HeapRegionSet _old_set;
......@@ -68,7 +67,7 @@ public class G1CollectedHeap extends SharedHeap {
Type type = db.lookupType("G1CollectedHeap");
hrmFieldOffset = type.getField("_hrm").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1Allocator = type.getAddressField("_allocator");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
......@@ -79,7 +78,7 @@ public class G1CollectedHeap extends SharedHeap {
}
public long used() {
return summaryBytesUsedField.getValue(addr);
return allocator().getSummaryBytes();
}
public long n_regions() {
......@@ -97,6 +96,11 @@ public class G1CollectedHeap extends SharedHeap {
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
}
public G1Allocator allocator() {
Address g1AllocatorAddr = g1Allocator.getValue(addr);
return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
}
public HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
......
......@@ -129,8 +129,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
// Note that we first perform the allocation and then we store the
// region in _alloc_region. This is the reason why an active region
// can never be empty.
_alloc_region = new_alloc_region;
_count += 1;
update_alloc_region(new_alloc_region);
trace("region allocation successful");
return result;
} else {
......@@ -172,6 +171,19 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
trace("set");
}
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
trace("update");
// We explicitly check that the region is not empty to make sure we
// maintain the "the alloc region cannot be empty" invariant.
assert(alloc_region != NULL && !alloc_region->is_empty(),
ar_ext_msg(this, "pre-condition"));
_alloc_region = alloc_region;
_alloc_region->set_allocation_context(allocation_context());
_count += 1;
trace("updated");
}
HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
......@@ -225,5 +237,70 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
_alloc_region(NULL), _count(0), _used_bytes_before(0) { }
_alloc_region(NULL), _count(0), _used_bytes_before(0),
_allocation_context(AllocationContext::system()) { }
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
bool force) {
return _g1h->new_mutator_alloc_region(word_size, force);
}
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
}
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
}
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForSurvived);
}
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
}
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForTenured);
}
HeapRegion* OldGCAllocRegion::release() {
HeapRegion* cur = get();
if (cur != NULL) {
// Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card.
HeapWord* top = cur->top();
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
if (to_allocate_words != 0) {
// We are not at a card boundary. Fill up, possibly into the next, taking the
// end of the region and the minimum object size into account.
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
// Skip allocation if there is not enough space to allocate even the smallest
// possible object. In this case this region will not be retained, so the
// original problem cannot occur.
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
CollectedHeap::fill_with_object(dummy, to_allocate_words);
}
}
}
return G1AllocRegion::release();
}
......@@ -57,6 +57,9 @@ private:
// correct use of init() and release()).
HeapRegion* volatile _alloc_region;
// Allocation context associated with this alloc region.
AllocationContext_t _allocation_context;
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
// between a call to init() and a call to release(). The count
......@@ -110,6 +113,10 @@ private:
// else can allocate out of it.
void retire(bool fill_up);
// After a region is allocated by alloc_new_region, this
// method is used to set it as the active alloc_region
void update_alloc_region(HeapRegion* alloc_region);
// Allocate a new active region and use it to perform a word_size
// allocation. The force parameter will be passed on to
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
......@@ -137,6 +144,9 @@ public:
return (hr == _dummy_region) ? NULL : hr;
}
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
AllocationContext_t allocation_context() { return _allocation_context; }
uint count() { return _count; }
// The following two are the building blocks for the allocation method.
......@@ -182,6 +192,40 @@ public:
#endif // G1_ALLOC_REGION_TRACING
};
class MutatorAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
MutatorAllocRegion()
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
SurvivorGCAllocRegion()
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};
class OldGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
// This specialization of release() makes sure that the last card that has
// been allocated into has been completely filled by a dummy object. This
// avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual HeapRegion* release();
};
class ar_ext_msg : public err_msg {
public:
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#include "memory/allocation.hpp"
typedef unsigned char AllocationContext_t;
class AllocationContext : AllStatic {
public:
// Currently used context
static AllocationContext_t current() {
return 0;
}
// System wide default context
static AllocationContext_t system() {
return 0;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
void G1DefaultAllocator::init_mutator_alloc_region() {
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
_mutator_alloc_region.init();
}
void G1DefaultAllocator::release_mutator_alloc_region() {
_mutator_alloc_region.release();
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
*retained_old = NULL;
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it),
// c) it's empty (this means that it was emptied during
// a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
// has been subsequently used to allocate a humongous
// object that may be less than the region size).
if (retained_region != NULL &&
!retained_region->in_collection_set() &&
!(retained_region->top() == retained_region->end()) &&
!retained_region->is_empty() &&
!retained_region->isHumongous()) {
retained_region->record_top_and_timestamp();
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_g1h->_old_set.remove(retained_region);
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
}
}
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */);
_survivor_gc_alloc_region.init();
_old_gc_alloc_region.init();
reuse_retained_old_region(evacuation_info,
&_old_gc_alloc_region,
&_retained_old_gc_alloc_region);
}
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
AllocationContext_t context = AllocationContext::current();
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
old_gc_alloc_region(context)->count());
survivor_gc_alloc_region(context)->release();
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
// _retained_old_gc_alloc_region will become NULL. This is what we
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
}
}
void G1DefaultAllocator::abandon_gc_alloc_regions() {
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
_retained_old_gc_alloc_region = NULL;
}
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
}
return obj;
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
GCAllocPurposeCount
};
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
G1CollectedHeap* _g1h;
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
public:
G1Allocator(G1CollectedHeap* heap) :
_g1h(heap), _summary_bytes_used(0) { }
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
virtual size_t used() = 0;
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
size_t used_unlocked() const {
return _summary_bytes_used;
}
void increase_used(size_t bytes) {
_summary_bytes_used += bytes;
}
void decrease_used(size_t bytes) {
assert(_summary_bytes_used >= bytes,
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
_summary_bytes_used, bytes));
_summary_bytes_used -= bytes;
}
void set_used(size_t bytes) {
_summary_bytes_used = bytes;
}
};
// The default allocator for G1.
class G1DefaultAllocator : public G1Allocator {
protected:
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
HeapRegion* _retained_old_gc_alloc_region;
public:
G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
virtual void init_mutator_alloc_region();
virtual void release_mutator_alloc_region();
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions();
virtual bool is_retained_old_region(HeapRegion* hr) {
return _retained_old_gc_alloc_region == hr;
}
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
return &_mutator_alloc_region;
}
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
return &_survivor_gc_alloc_region;
}
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
return &_old_gc_alloc_region;
}
virtual size_t used() {
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = _summary_bytes_used;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
if (hr != NULL) {
result += hr->used();
}
return result;
}
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
virtual ~G1ParGCAllocBuffer() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
virtual void retire(bool end_of_gc, bool retain) {
if (_retired) {
return;
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
};
class G1ParGCAllocator : public CHeapObj<mtGC> {
friend class G1ParScanThreadState;
protected:
G1CollectedHeap* _g1h;
size_t _alloc_buffer_waste;
size_t _undo_waste;
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
virtual void retire_alloc_buffers() = 0;
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
}
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() {return _undo_waste; }
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
if (purpose == GCAllocForSurvived) {
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
} else {
obj = alloc_buffer(purpose, context)->allocate(word_sz);
}
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz, context);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
if (alloc_buffer(purpose, context)->contains(obj)) {
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
return _alloc_buffers[purpose];
}
virtual void retire_alloc_buffers() ;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultAllocator(g1h);
}
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultParGCAllocator(g1h);
}
......@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/evacuationInfo.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
......@@ -80,12 +82,6 @@ typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
GCAllocPurposeCount
};
class YoungList : public CHeapObj<mtGC> {
private:
G1CollectedHeap* _g1h;
......@@ -158,40 +154,6 @@ public:
void print();
};
class MutatorAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
MutatorAllocRegion()
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
SurvivorGCAllocRegion()
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};
class OldGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
// This specialization of release() makes sure that the last card that has been
// allocated into has been completely filled by a dummy object.
// This avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual HeapRegion* release();
};
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
......@@ -222,6 +184,9 @@ class G1CollectedHeap : public SharedHeap {
friend class MutatorAllocRegion;
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
friend class G1Allocator;
friend class G1DefaultAllocator;
friend class G1ResManAllocator;
// Closures used in implementation.
template <G1Barrier barrier, G1Mark do_mark_object>
......@@ -232,6 +197,8 @@ class G1CollectedHeap : public SharedHeap {
friend class G1ParScanClosureSuper;
friend class G1ParEvacuateFollowersClosure;
friend class G1ParTask;
friend class G1ParGCAllocator;
friend class G1DefaultParGCAllocator;
friend class G1FreeGarbageRegionClosure;
friend class RefineCardTableEntryClosure;
friend class G1PrepareCompactClosure;
......@@ -293,44 +260,15 @@ private:
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// Class that handles the different kinds of allocations.
G1Allocator* _allocator;
// PLAB sizing policy for survivors.
PLABStats _survivor_plab_stats;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
// PLAB sizing policy for tenured objects.
PLABStats _old_plab_stats;
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// The last old region we allocated to during the last GC.
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
// It specifies whether we should attempt to expand the heap after a
// region allocation failure. If heap expansion fails we set this to
// false so that we don't re-attempt the heap expansion (it's likely
......@@ -348,9 +286,6 @@ private:
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// Setup the retained old gc alloc region as the currrent old gc alloc region.
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
......@@ -361,13 +296,6 @@ private:
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
// Determines PLAB size for a particular allocation purpose.
size_t desired_plab_sz(GCAllocPurpose purpose);
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
// Records whether the region at the given index is kept live by roots or
// references from the young generation.
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
......@@ -525,11 +453,12 @@ protected:
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
size_t word_size);
size_t word_size,
AllocationContext_t context);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
HeapWord* humongous_obj_allocate(size_t word_size);
HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
// The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime
......@@ -585,6 +514,7 @@ protected:
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
......@@ -599,7 +529,8 @@ protected:
// specifies whether the mutator alloc region is expected to be NULL
// or not.
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_mutator_alloc_region);
AllocationContext_t context,
bool expect_null_mutator_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
......@@ -611,7 +542,9 @@ protected:
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
size_t word_size,
AllocationContext_t context);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
......@@ -623,10 +556,12 @@ protected:
void par_allocate_remaining_space(HeapRegion* r);
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size);
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size);
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
// These methods are the "callbacks" from the G1AllocRegion class.
......@@ -665,13 +600,15 @@ protected:
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
HeapWord* satisfy_failed_allocation(size_t word_size,
AllocationContext_t context,
bool* succeeded);
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
HeapWord* expand_and_allocate(size_t word_size);
HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
// Process any reference objects discovered during
// an incremental evacuation pause.
......@@ -694,6 +631,27 @@ public:
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
// Returns the PLAB statistics given a purpose.
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// Determines PLAB size for a particular allocation purpose.
size_t desired_plab_sz(GCAllocPurpose purpose);
// Do anything common to GC's.
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
......@@ -1271,7 +1229,7 @@ public:
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr) {
return hr == _retained_old_gc_alloc_region;
return _allocator->is_retained_old_region(hr);
}
// Perform a collection of the heap; intended for use in implementing
......@@ -1752,28 +1710,4 @@ protected:
size_t _max_heap_capacity;
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
virtual ~G1ParGCAllocBuffer() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
virtual void retire(bool end_of_gc, bool retain) {
if (_retired) {
return;
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
......@@ -98,10 +98,12 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
......@@ -112,17 +114,17 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
return result;
}
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
word_size) {
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */);
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
......@@ -130,16 +132,17 @@ inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
return result;
}
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
true /* bot_updates */);
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
true /* bot_updates */);
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
......
......@@ -38,11 +38,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false), _scanner(g1h, rp),
_strong_roots_time(0), _term_time(0),
_alloc_buffer_waste(0), _undo_waste(0) {
_strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
// we allocate G1YoungSurvRateNumRegions plus one entries, since
// we "sacrifice" entry 0 to keep track of surviving bytes for
......@@ -60,14 +57,14 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_start = os::elapsedTime();
}
G1ParScanThreadState::~G1ParScanThreadState() {
retire_alloc_buffers();
_g1_par_allocator->retire_alloc_buffers();
delete _g1_par_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
......@@ -90,14 +87,16 @@ G1ParScanThreadState::print_termination_stats(int i,
const double elapsed_ms = elapsed_time() * 1000.0;
const double s_roots_ms = strong_roots_time() * 1000.0;
const double term_ms = term_time() * 1000.0;
const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
const size_t undo_waste = _g1_par_allocator->undo_waste();
st->print_cr("%3d %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
(alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
alloc_buffer_waste() * HeapWordSize / K,
undo_waste() * HeapWordSize / K);
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
alloc_buffer_waste * HeapWordSize / K,
undo_waste * HeapWordSize / K);
}
#ifdef ASSERT
......@@ -164,12 +163,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
: m->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
AllocationContext_t context = from_region->allocation_context();
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
if (obj_ptr != NULL) {
undo_allocation(alloc_purpose, obj_ptr, word_sz);
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj_ptr = NULL;
}
}
......@@ -246,66 +246,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
obj->oop_iterate_backwards(&_scanner);
}
} else {
undo_allocation(alloc_purpose, obj_ptr, word_sz);
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj = forward_ptr;
}
return obj;
}
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
if (purpose == GCAllocForSurvived) {
obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
} else {
obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
}
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz);
}
void G1ParScanThreadState::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
......@@ -46,9 +46,8 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
G1ParGCAllocator* _g1_par_allocator;
ageTable _age_table;
G1ParScanClosure _scanner;
......@@ -78,7 +77,6 @@ class G1ParScanThreadState : public StackObj {
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
......@@ -104,13 +102,6 @@ class G1ParScanThreadState : public StackObj {
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return _alloc_buffers[purpose];
}
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
size_t undo_waste() const { return _undo_waste; }
#ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }
......@@ -126,12 +117,6 @@ class G1ParScanThreadState : public StackObj {
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
private:
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
......@@ -177,8 +162,6 @@ class G1ParScanThreadState : public StackObj {
}
private:
void retire_alloc_buffers();
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
......
......@@ -219,6 +219,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
_in_collection_set = false;
set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
set_young_type(NotYoung);
......@@ -346,9 +347,9 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
HeapRegion::HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
MemRegion mr, AllocationContext_t context) :
G1OffsetTableContigSpace(sharedOffsetArray, mr),
_hrm_index(hrm_index),
_hrm_index(hrm_index), _allocation_context(context),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
......@@ -715,6 +716,8 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
st->print("AC%4u", allocation_context());
if (isHumongous()) {
if (startsHumongous())
st->print(" HS");
......
......@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
#include "gc_implementation/g1/survRateGroup.hpp"
......@@ -236,6 +237,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
// The index of this region in the heap region sequence.
uint _hrm_index;
AllocationContext_t _allocation_context;
HumongousType _humongous_type;
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
......@@ -332,7 +335,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public:
HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr);
MemRegion mr,
AllocationContext_t context = AllocationContext::system());
// Initializing the HeapRegion not only resets the data structure, but also
// resets the BOT for that heap region.
......@@ -527,6 +531,14 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r;
}
void set_allocation_context(AllocationContext_t context) {
_allocation_context = context;
}
AllocationContext_t allocation_context() const {
return _allocation_context;
}
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next and prev fields used to link regions into
......
......@@ -45,11 +45,13 @@
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \
\
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \
\
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
......@@ -72,14 +74,16 @@
\
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \
declare_toplevel_type(HeapRegionManager) \
declare_toplevel_type(HeapRegionManager) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
declare_toplevel_type(G1MonitoringSupport) \
declare_toplevel_type(G1Allocator) \
\
declare_toplevel_type(G1CollectedHeap*) \
declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
declare_toplevel_type(G1Allocator*) \
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
......@@ -45,7 +45,8 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
void VM_G1CollectForAllocation::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause);
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
_result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
assert(_result == NULL || _pause_succeeded,
"if we get back a result, the pause should have succeeded");
}
......@@ -99,7 +100,7 @@ void VM_G1IncCollectionPause::doit() {
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
......@@ -152,7 +153,7 @@ void VM_G1IncCollectionPause::doit() {
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
// An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
true /* expect_null_cur_alloc_region */);
} else {
assert(_result == NULL, "invariant");
......
......@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
// VM_operations for the G1 collector.
......@@ -40,6 +41,7 @@ protected:
size_t _word_size;
HeapWord* _result;
bool _pause_succeeded;
AllocationContext_t _allocation_context;
public:
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
......@@ -49,6 +51,8 @@ public:
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
HeapWord* result() { return _result; }
bool pause_succeeded() { return _pause_succeeded; }
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
AllocationContext_t allocation_context() { return _allocation_context; }
};
class VM_G1CollectFull: public VM_GC_Operation {
......
......@@ -68,6 +68,7 @@
template(G1CollectFull) \
template(G1CollectForAllocation) \
template(G1IncCollectionPause) \
template(DestroyAllocationContext) \
template(EnableBiasedLocking) \
template(RevokeBias) \
template(BulkRevokeBias) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册