提交 f9a4e271 编写于 作者: M mgerdin

8047818: G1 HeapRegions can no longer be ContiguousSpaces

Summary: Change parent of G1OffsetTableContigSpace to CompactibleSpace, reimplement missing functionality
Reviewed-by: stefank, jmasa, tschatzl
上级 7d109a1a
......@@ -24,23 +24,26 @@
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.ArrayList;
import java.util.List;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.memory.ContiguousSpace;
import sun.jvm.hotspot.memory.CompactibleSpace;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegion. Currently we don't actually include
// any of its fields but only iterate over it (which we get "for free"
// as HeapRegion ultimately inherits from ContiguousSpace).
// any of its fields but only iterate over it.
public class HeapRegion extends ContiguousSpace {
public class HeapRegion extends CompactibleSpace {
// static int GrainBytes;
static private CIntegerField grainBytesField;
static private AddressField topField;
static {
VM.registerVMInitializedObserver(new Observer() {
......@@ -54,6 +57,8 @@ public class HeapRegion extends ContiguousSpace {
Type type = db.lookupType("HeapRegion");
grainBytesField = type.getCIntegerField("GrainBytes");
topField = type.getAddressField("_top");
}
static public long grainBytes() {
......@@ -63,4 +68,25 @@ public class HeapRegion extends ContiguousSpace {
public HeapRegion(Address addr) {
super(addr);
}
public Address top() {
return topField.getValue(addr);
}
@Override
public List getLiveRegions() {
List res = new ArrayList();
res.add(new MemRegion(bottom(), top()));
return res;
}
@Override
public long used() {
return top().minus(bottom());
}
@Override
public long free() {
return end().minus(top());
}
}
......@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
size_t word_size,
......
......@@ -26,7 +26,8 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "memory/space.hpp"
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
......
......@@ -30,6 +30,7 @@
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/liveRange.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/space.inline.hpp"
......@@ -60,7 +61,7 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
HeapRegion* hr,
HeapWord* cur, HeapWord* top) {
oop cur_oop = oop(cur);
int oop_size = cur_oop->size();
size_t oop_size = hr->block_size(cur);
HeapWord* next_obj = cur + oop_size;
while (next_obj < top) {
// Keep filtering the remembered set.
......@@ -71,7 +72,7 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
}
cur = next_obj;
cur_oop = oop(cur);
oop_size = cur_oop->size();
oop_size = hr->block_size(cur);
next_obj = cur + oop_size;
}
return cur;
......@@ -81,7 +82,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
HeapWord* bottom,
HeapWord* top) {
G1CollectedHeap* g1h = _g1;
int oop_size;
size_t oop_size;
ExtendedOopClosure* cl2 = NULL;
FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
......@@ -101,7 +102,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
oop_size = oop(bottom)->oop_iterate(cl2, mr);
} else {
oop_size = oop(bottom)->size();
oop_size = _hr->block_size(bottom);
}
bottom += oop_size;
......@@ -452,7 +453,7 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
if (cl->abort()) return cur;
// The check above must occur before the operation below, since an
// abort might invalidate the "size" operation.
cur += obj->size();
cur += block_size(cur);
}
return NULL;
}
......@@ -524,7 +525,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
return cur;
}
// Otherwise...
next = (cur + obj->size());
next = cur + block_size(cur);
}
// If we finish the above loop...We have a parseable object that
......@@ -532,10 +533,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
// inside or spans the entire region.
assert(obj == oop(cur), "sanity");
assert(cur <= start &&
obj->klass_or_null() != NULL &&
(cur + obj->size()) > start,
"Loop postcondition");
assert(cur <= start, "Loop postcondition");
assert(obj->klass_or_null() != NULL, "Loop postcondition");
assert((cur + block_size(cur)) > start, "Loop postcondition");
if (!g1h->is_obj_dead(obj)) {
obj->oop_iterate(cl, mr);
......@@ -549,7 +549,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
};
// Otherwise:
next = (cur + obj->size());
next = cur + block_size(cur);
if (!g1h->is_obj_dead(obj)) {
if (next < end || !obj->is_objArray()) {
......@@ -904,7 +904,7 @@ void HeapRegion::verify(VerifyOption vo,
size_t object_num = 0;
while (p < top()) {
oop obj = oop(p);
size_t obj_size = obj->size();
size_t obj_size = block_size(p);
object_num += 1;
if (is_humongous != g1->isHumongous(obj_size)) {
......@@ -1040,7 +1040,9 @@ void HeapRegion::verify() const {
// away eventually.
void G1OffsetTableContigSpace::clear(bool mangle_space) {
ContiguousSpace::clear(mangle_space);
set_top(bottom());
set_saved_mark_word(bottom());
CompactibleSpace::clear(mangle_space);
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
}
......@@ -1078,7 +1080,7 @@ HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
if (_gc_time_stamp < g1h->get_gc_time_stamp())
return top();
else
return ContiguousSpace::saved_mark_word();
return Space::saved_mark_word();
}
void G1OffsetTableContigSpace::record_top_and_timestamp() {
......@@ -1093,7 +1095,7 @@ void G1OffsetTableContigSpace::record_top_and_timestamp() {
// of region. If it does so after _gc_time_stamp = ..., then it
// will pick up the right saved_mark_word() as the high water mark
// of the region. Either way, the behaviour will be correct.
ContiguousSpace::set_saved_mark();
Space::set_saved_mark_word(top());
OrderAccess::storestore();
_gc_time_stamp = curr_gc_time_stamp;
// No need to do another barrier to flush the writes above. If
......@@ -1104,6 +1106,26 @@ void G1OffsetTableContigSpace::record_top_and_timestamp() {
}
}
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
object_iterate(blk);
}
void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
HeapWord* p = bottom();
while (p < top()) {
if (block_is_obj(p)) {
blk->do_object(oop(p));
}
p += block_size(p);
}
}
#define block_is_always_obj(q) true
void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
}
#undef block_is_always_obj
G1OffsetTableContigSpace::
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
......@@ -1113,7 +1135,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
{
_offsets.set_space(this);
// false ==> we'll do the clearing if there's clearing to be done.
ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
_top = bottom();
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
}
......@@ -46,8 +46,6 @@
// The solution is to remove this method from the definition
// of a Space.
class CompactibleSpace;
class ContiguousSpace;
class HeapRegionRemSet;
class HeapRegionRemSetIterator;
class HeapRegion;
......@@ -125,9 +123,9 @@ public:
// the regions anyway) and at the end of a Full GC. The current scheme
// that uses sequential unsigned ints will fail only if we have 4b
// evacuation pauses between two cleanups, which is _highly_ unlikely.
class G1OffsetTableContigSpace: public ContiguousSpace {
class G1OffsetTableContigSpace: public CompactibleSpace {
friend class VMStructs;
HeapWord* _top;
protected:
G1BlockOffsetArrayContigSpace _offsets;
Mutex _par_alloc_lock;
......@@ -144,6 +142,27 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr);
void set_top(HeapWord* value) { _top = value; }
HeapWord* top() const { return _top; }
protected:
HeapWord** top_addr() { return &_top; }
// Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
public:
void reset_after_compaction() { set_top(compaction_top()); }
size_t used() const { return byte_size(bottom(), top()); }
size_t free() const { return byte_size(top(), end()); }
bool is_free_block(const HeapWord* p) const { return p >= top(); }
MemRegion used_region() const { return MemRegion(bottom(), top()); }
void object_iterate(ObjectClosure* blk);
void safe_object_iterate(ObjectClosure* blk);
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
......@@ -168,6 +187,8 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
HeapWord* block_start(const void* p);
HeapWord* block_start_const(const void* p) const;
void prepare_for_compaction(CompactPoint* cp);
// Add offset table update.
virtual HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
......@@ -349,14 +370,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
ParMarkRootClaimValue = 9
};
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
assert(is_young(), "we can only skip BOT updates on young regions");
return ContiguousSpace::par_allocate(word_size);
}
inline HeapWord* allocate_no_bot_updates(size_t word_size) {
assert(is_young(), "we can only skip BOT updates on young regions");
return ContiguousSpace::allocate(word_size);
}
// All allocated blocks are occupied by objects in a HeapRegion
bool block_is_obj(const HeapWord* p) const;
// Returns the object size for all valid block starts
// and the amount of unallocated words if called on top()
size_t block_size(const HeapWord* p) const;
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
inline HeapWord* allocate_no_bot_updates(size_t word_size);
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
......
......@@ -26,9 +26,48 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/space.hpp"
#include "runtime/atomic.inline.hpp"
// This version requires locking.
inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
HeapWord* const end_value) {
HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) {
HeapWord* new_top = obj + size;
set_top(new_top);
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
return obj;
} else {
return NULL;
}
}
// This version is lock-free.
inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
HeapWord* const end_value) {
do {
HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
if (result == obj) {
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
return obj;
}
} else {
return NULL;
}
} while (true);
}
inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
HeapWord* res = ContiguousSpace::allocate(size);
HeapWord* res = allocate_impl(size, end());
if (res != NULL) {
_offsets.alloc_block(res, size);
}
......@@ -40,12 +79,7 @@ inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
// this is used for larger LAB allocations only.
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
MutexLocker x(&_par_alloc_lock);
// Given that we take the lock no need to use par_allocate() here.
HeapWord* res = ContiguousSpace::allocate(size);
if (res != NULL) {
_offsets.alloc_block(res, size);
}
return res;
return allocate(size);
}
inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
......@@ -57,6 +91,32 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
return _offsets.block_start_const(p);
}
inline bool
HeapRegion::block_is_obj(const HeapWord* p) const {
return p < top();
}
inline size_t
HeapRegion::block_size(const HeapWord *addr) const {
const HeapWord* current_top = top();
if (addr < current_top) {
return oop(addr)->size();
} else {
assert(addr == current_top, "just checking");
return pointer_delta(end(), addr);
}
}
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
assert(is_young(), "we can only skip BOT updates on young regions");
return par_allocate_impl(word_size, end());
}
inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
assert(is_young(), "we can only skip BOT updates on young regions");
return allocate_impl(word_size, end());
}
inline void HeapRegion::note_start_of_marking() {
_next_marked_bytes = 0;
_next_top_at_mark_start = top();
......
......@@ -34,6 +34,8 @@
static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord*) \
\
nonstatic_field(G1HeapRegionTable, _base, address) \
nonstatic_field(G1HeapRegionTable, _length, size_t) \
nonstatic_field(G1HeapRegionTable, _biased_base, address) \
......@@ -69,7 +71,8 @@
\
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \
declare_toplevel_type(HeapRegionSeq) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
......
......@@ -684,14 +684,8 @@ size_t ContiguousSpace::block_size(const HeapWord* p) const {
// This version requires locking.
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
HeapWord* const end_value) {
// In G1 there are places where a GC worker can allocates into a
// region using this serial allocation code without being prone to a
// race with other GC workers (we ensure that no other GC worker can
// access the same region at the same time). So the assert below is
// too strong in the case of G1.
assert(Heap_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() &&
(Thread::current()->is_VM_thread() || UseG1GC)),
(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
"not locked");
HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册