提交 0c811a79 编写于 作者: J jmasa

Merge

...@@ -2954,7 +2954,7 @@ public: ...@@ -2954,7 +2954,7 @@ public:
// The object has been either evacuated or is dead. Fill it with a // The object has been either evacuated or is dead. Fill it with a
// dummy object. // dummy object.
MemRegion mr((HeapWord*)obj, obj->size()); MemRegion mr((HeapWord*)obj, obj->size());
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
_cm->clearRangeBothMaps(mr); _cm->clearRangeBothMaps(mr);
} }
} }
...@@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { ...@@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
// Otherwise, try to claim it. // Otherwise, try to claim it.
block = r->par_allocate(free_words); block = r->par_allocate(free_words);
} while (block == NULL); } while (block == NULL);
SharedHeap::fill_region_with_object(MemRegion(block, free_words)); fill_with_object(block, free_words);
} }
#define use_local_bitmaps 1 #define use_local_bitmaps 1
...@@ -3619,9 +3619,8 @@ public: ...@@ -3619,9 +3619,8 @@ public:
guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object"); "should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz); alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} } else {
else { CollectedHeap::fill_with_object(obj, word_sz);
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
add_to_undo_waste(word_sz); add_to_undo_waste(word_sz);
} }
} }
......
...@@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { ...@@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
HeapWord* tmp = hr->allocate(sz); HeapWord* tmp = hr->allocate(sz);
assert(tmp != NULL, "Humongous allocation failure"); assert(tmp != NULL, "Humongous allocation failure");
MemRegion mr = MemRegion(tmp, sz); MemRegion mr = MemRegion(tmp, sz);
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
hr->declare_filled_region_to_BOT(mr); hr->declare_filled_region_to_BOT(mr);
if (i == first) { if (i == first) {
first_hr->set_startsHumongous(); first_hr->set_startsHumongous();
......
...@@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) { ...@@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
if (_retained) { if (_retained) {
// If the buffer had been retained shorten the previous filler object. // If the buffer had been retained shorten the previous filler object.
assert(_retained_filler.end() <= _top, "INVARIANT"); assert(_retained_filler.end() <= _top, "INVARIANT");
SharedHeap::fill_region_with_object(_retained_filler); CollectedHeap::fill_with_object(_retained_filler);
// Wasted space book-keeping, otherwise (normally) done in invalidate() // Wasted space book-keeping, otherwise (normally) done in invalidate()
_wasted += _retained_filler.word_size(); _wasted += _retained_filler.word_size();
_retained = false; _retained = false;
} }
assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
if (_top < _hard_end) { if (_top < _hard_end) {
SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end)); CollectedHeap::fill_with_object(_top, _hard_end);
if (!retain) { if (!retain) {
invalidate(); invalidate();
} else { } else {
...@@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz, ...@@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
// modifying the _next_threshold state in the BOT. // modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
bool contig) { bool contig) {
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
if (contig) { if (contig) {
_bt.alloc_block(mr.start(), mr.end()); _bt.alloc_block(mr.start(), mr.end());
} else { } else {
...@@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) { ...@@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
"or else _true_end should be equal to _hard_end"); "or else _true_end should be equal to _hard_end");
assert(_retained, "or else _true_end should be equal to _hard_end"); assert(_retained, "or else _true_end should be equal to _hard_end");
assert(_retained_filler.end() <= _top, "INVARIANT"); assert(_retained_filler.end() <= _top, "INVARIANT");
SharedHeap::fill_region_with_object(_retained_filler); CollectedHeap::fill_with_object(_retained_filler);
if (_top < _hard_end) { if (_top < _hard_end) {
fill_region_with_block(MemRegion(_top, _hard_end), true); fill_region_with_block(MemRegion(_top, _hard_end), true);
} }
...@@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) { ...@@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
while (_top <= chunk_boundary) { while (_top <= chunk_boundary) {
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above."); "Consequence of last card handling above.");
MemRegion chunk_portion(chunk_boundary, _hard_end); _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
_bt.BlockOffsetArray::alloc_block(chunk_portion.start(), CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
chunk_portion.end()); _hard_end = chunk_boundary;
SharedHeap::fill_region_with_object(chunk_portion);
_hard_end = chunk_portion.start();
chunk_boundary -= ChunkSizeInWords; chunk_boundary -= ChunkSizeInWords;
} }
_end = _hard_end - AlignmentReserve; _end = _hard_end - AlignmentReserve;
......
...@@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, ...@@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
"Should contain whole object."); "Should contain whole object.");
to_space_alloc_buffer()->undo_allocation(obj, word_sz); to_space_alloc_buffer()->undo_allocation(obj, word_sz);
} else { } else {
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); CollectedHeap::fill_with_object(obj, word_sz);
} }
} }
......
...@@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, ...@@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// full GC. // full GC.
const size_t alignment = old_gen->virtual_space()->alignment(); const size_t alignment = old_gen->virtual_space()->alignment();
const size_t eden_used = eden_space->used_in_bytes(); const size_t eden_used = eden_space->used_in_bytes();
const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average()); const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
const size_t absorb_size = align_size_up(eden_used + promoted, alignment); const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
const size_t eden_capacity = eden_space->capacity_in_bytes(); const size_t eden_capacity = eden_space->capacity_in_bytes();
...@@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, ...@@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Fill the unused part of the old gen. // Fill the unused part of the old gen.
MutableSpace* const old_space = old_gen->object_space(); MutableSpace* const old_space = old_gen->object_space();
MemRegion old_gen_unused(old_space->top(), old_space->end()); HeapWord* const unused_start = old_space->top();
size_t const unused_words = pointer_delta(old_space->end(), unused_start);
// If the unused part of the old gen cannot be filled, skip if (unused_words > 0) {
// absorbing eden. if (unused_words < CollectedHeap::min_fill_size()) {
if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) { return false; // If the old gen cannot be filled, must give up.
return false; }
} CollectedHeap::fill_with_objects(unused_start, unused_words);
if (!old_gen_unused.is_empty()) {
SharedHeap::fill_region_with_object(old_gen_unused);
} }
// Take the live data from eden and set both top and end in the old gen to // Take the live data from eden and set both top and end in the old gen to
...@@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, ...@@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Update the object start array for the filler object and the data from eden. // Update the object start array for the filler object and the data from eden.
ObjectStartArray* const start_array = old_gen->start_array(); ObjectStartArray* const start_array = old_gen->start_array();
HeapWord* const start = old_gen_unused.start(); for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { start_array->allocate_block(p);
start_array->allocate_block(addr);
} }
// Could update the promoted average here, but it is not typically updated at // Could update the promoted average here, but it is not typically updated at
......
...@@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words, ...@@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) { HeapWord* q, size_t deadlength) {
if (allowed_deadspace_words >= deadlength) { if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength; allowed_deadspace_words -= deadlength;
oop(q)->set_mark(markOopDesc::prototype()->set_marked()); CollectedHeap::fill_with_object(q, deadlength);
const size_t aligned_min_int_array_size = oop(q)->set_mark(oop(q)->mark()->set_marked());
align_object_size(typeArrayOopDesc::header_size(T_INT)); assert((int) deadlength == oop(q)->size(), "bad filler object size");
if (deadlength >= aligned_min_int_array_size) {
oop(q)->set_klass(Universe::intArrayKlassObj());
assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint,
"deadspace too big for Arrayoop");
typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size)
* (HeapWordSize/sizeof(jint))));
} else {
assert((int) deadlength == instanceOopDesc::header_size(),
"size for smallest fake dead object doesn't match");
oop(q)->set_klass(SystemDictionary::object_klass());
}
assert((int) deadlength == oop(q)->size(),
"make sure size for fake dead object match");
// Recall that we required "q == compaction_top". // Recall that we required "q == compaction_top".
return true; return true;
} else { } else {
......
...@@ -36,6 +36,123 @@ class PreGCValues; ...@@ -36,6 +36,123 @@ class PreGCValues;
class MoveAndUpdateClosure; class MoveAndUpdateClosure;
class RefProcTaskExecutor; class RefProcTaskExecutor;
// The SplitInfo class holds the information needed to 'split' a source region
// so that the live data can be copied to two destination *spaces*. Normally,
// all the live data in a region is copied to a single destination space (e.g.,
// everything live in a region in eden is copied entirely into the old gen).
// However, when the heap is nearly full, all the live data in eden may not fit
// into the old gen. Copying only some of the regions from eden to old gen
// requires finding a region that does not contain a partial object (i.e., no
// live object crosses the region boundary) somewhere near the last object that
// does fit into the old gen. Since it's not always possible to find such a
// region, splitting is necessary for predictable behavior.
//
// A region is always split at the end of the partial object. This avoids
// additional tests when calculating the new location of a pointer, which is a
// very hot code path. The partial object and everything to its left will be
// copied to another space (call it dest_space_1). The live data to the right
// of the partial object will be copied either within the space itself, or to a
// different destination space (distinct from dest_space_1).
//
// Split points are identified during the summary phase, when region
// destinations are computed: data about the split, including the
// partial_object_size, is recorded in a SplitInfo record and the
// partial_object_size field in the summary data is set to zero. The zeroing is
// possible (and necessary) since the partial object will move to a different
// destination space than anything to its right, thus the partial object should
// not affect the locations of any objects to its right.
//
// The recorded data is used during the compaction phase, but only rarely: when
// the partial object on the split region will be copied across a destination
// region boundary. This test is made once each time a region is filled, and is
// a simple address comparison, so the overhead is negligible (see
// PSParallelCompact::first_src_addr()).
//
// Notes:
//
// Only regions with partial objects are split; a region without a partial
// object does not need any extra bookkeeping.
//
// At most one region is split per space, so the amount of data required is
// constant.
//
// A region is split only when the destination space would overflow. Once that
// happens, the destination space is abandoned and no other data (even from
// other source spaces) is targeted to that destination space. Abandoning the
// destination space may leave a somewhat large unused area at the end, if a
// large object caused the overflow.
//
// Future work:
//
// More bookkeeping would be required to continue to use the destination space.
// The most general solution would allow data from regions in two different
// source spaces to be "joined" in a single destination region. At the very
// least, additional code would be required in next_src_region() to detect the
// join and skip to an out-of-order source region. If the join region was also
// the last destination region to which a split region was copied (the most
// likely case), then additional work would be needed to get fill_region() to
// stop iteration and switch to a new source region at the right point. Basic
// idea would be to use a fake value for the top of the source space. It is
// doable, if a bit tricky.
//
// A simpler (but less general) solution would fill the remainder of the
// destination region with a dummy object and continue filling the next
// destination region.
class SplitInfo
{
public:
// Return true if this split info is valid (i.e., if a split has been
// recorded). The very first region cannot have a partial object and thus is
// never split, so 0 is the 'invalid' value.
bool is_valid() const { return _src_region_idx > 0; }
// Return true if this split holds data for the specified source region.
inline bool is_split(size_t source_region) const;
// The index of the split region, the size of the partial object on that
// region and the destination of the partial object.
size_t src_region_idx() const { return _src_region_idx; }
size_t partial_obj_size() const { return _partial_obj_size; }
HeapWord* destination() const { return _destination; }
// The destination count of the partial object referenced by this split
// (either 1 or 2). This must be added to the destination count of the
// remainder of the source region.
unsigned int destination_count() const { return _destination_count; }
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of the destination region;
// otherwise this is NULL.
HeapWord* dest_region_addr() const { return _dest_region_addr; }
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of that word within the partial
// object; otherwise this is NULL.
HeapWord* first_src_addr() const { return _first_src_addr; }
// Record the data necessary to split the region src_region_idx.
void record(size_t src_region_idx, size_t partial_obj_size,
HeapWord* destination);
void clear();
DEBUG_ONLY(void verify_clear();)
private:
size_t _src_region_idx;
size_t _partial_obj_size;
HeapWord* _destination;
unsigned int _destination_count;
HeapWord* _dest_region_addr;
HeapWord* _first_src_addr;
};
inline bool SplitInfo::is_split(size_t region_idx) const
{
return _src_region_idx == region_idx && is_valid();
}
class SpaceInfo class SpaceInfo
{ {
public: public:
...@@ -58,18 +175,23 @@ class SpaceInfo ...@@ -58,18 +175,23 @@ class SpaceInfo
// is no start array. // is no start array.
ObjectStartArray* start_array() const { return _start_array; } ObjectStartArray* start_array() const { return _start_array; }
SplitInfo& split_info() { return _split_info; }
void set_space(MutableSpace* s) { _space = s; } void set_space(MutableSpace* s) { _space = s; }
void set_new_top(HeapWord* addr) { _new_top = addr; } void set_new_top(HeapWord* addr) { _new_top = addr; }
void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
void set_start_array(ObjectStartArray* s) { _start_array = s; } void set_start_array(ObjectStartArray* s) { _start_array = s; }
void publish_new_top() const { _space->set_top(_new_top); }
private: private:
MutableSpace* _space; MutableSpace* _space;
HeapWord* _new_top; HeapWord* _new_top;
HeapWord* _min_dense_prefix; HeapWord* _min_dense_prefix;
HeapWord* _dense_prefix; HeapWord* _dense_prefix;
ObjectStartArray* _start_array; ObjectStartArray* _start_array;
SplitInfo _split_info;
}; };
class ParallelCompactData class ParallelCompactData
...@@ -230,9 +352,14 @@ public: ...@@ -230,9 +352,14 @@ public:
// must be region-aligned; end need not be. // must be region-aligned; end need not be.
void summarize_dense_prefix(HeapWord* beg, HeapWord* end); void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
bool summarize(HeapWord* target_beg, HeapWord* target_end, HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
HeapWord* destination, HeapWord* target_end,
HeapWord** target_next);
bool summarize(SplitInfo& split_info,
HeapWord* source_beg, HeapWord* source_end, HeapWord* source_beg, HeapWord* source_end,
HeapWord** target_next, HeapWord** source_next = 0); HeapWord** source_next,
HeapWord* target_beg, HeapWord* target_end,
HeapWord** target_next);
void clear(); void clear();
void clear_range(size_t beg_region, size_t end_region); void clear_range(size_t beg_region, size_t end_region);
...@@ -838,13 +965,27 @@ class PSParallelCompact : AllStatic { ...@@ -838,13 +965,27 @@ class PSParallelCompact : AllStatic {
// non-empty. // non-empty.
static void fill_dense_prefix_end(SpaceId id); static void fill_dense_prefix_end(SpaceId id);
// Clear the summary data source_region field for the specified addresses.
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
#ifndef PRODUCT
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
// Fill the region [start, start + words) with live object(s). Only usable
// for the old and permanent generations.
static void fill_with_live_objects(SpaceId id, HeapWord* const start,
size_t words);
// Include the new objects in the summary data.
static void summarize_new_objects(SpaceId id, HeapWord* start);
// Add live objects and/or choose the dense prefix to provoke splitting.
static void provoke_split(bool & maximum_compaction);
#endif
static void summarize_spaces_quick(); static void summarize_spaces_quick();
static void summarize_space(SpaceId id, bool maximum_compaction); static void summarize_space(SpaceId id, bool maximum_compaction);
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
// The space that is compacted after space_id.
static SpaceId next_compaction_space_id(SpaceId space_id);
// Adjust addresses in roots. Does not adjust addresses in heap. // Adjust addresses in roots. Does not adjust addresses in heap.
static void adjust_roots(); static void adjust_roots();
...@@ -999,6 +1140,7 @@ class PSParallelCompact : AllStatic { ...@@ -999,6 +1140,7 @@ class PSParallelCompact : AllStatic {
// Return the address of the word to be copied to dest_addr, which must be // Return the address of the word to be copied to dest_addr, which must be
// aligned to a region boundary. // aligned to a region boundary.
static HeapWord* first_src_addr(HeapWord* const dest_addr, static HeapWord* first_src_addr(HeapWord* const dest_addr,
SpaceId src_space_id,
size_t src_region_idx); size_t src_region_idx);
// Determine the next source region, set closure.source() to the start of the // Determine the next source region, set closure.source() to the start of the
...@@ -1081,6 +1223,10 @@ class PSParallelCompact : AllStatic { ...@@ -1081,6 +1223,10 @@ class PSParallelCompact : AllStatic {
const SpaceId id, const SpaceId id,
const bool maximum_compaction, const bool maximum_compaction,
HeapWord* const addr); HeapWord* const addr);
static void summary_phase_msg(SpaceId dst_space_id,
HeapWord* dst_beg, HeapWord* dst_end,
SpaceId src_space_id,
HeapWord* src_beg, HeapWord* src_end);
#endif // #ifndef PRODUCT #endif // #ifndef PRODUCT
#ifdef ASSERT #ifdef ASSERT
...@@ -1324,31 +1470,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr) ...@@ -1324,31 +1470,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
oop(addr)->update_contents(compaction_manager()); oop(addr)->update_contents(compaction_manager());
} }
class FillClosure: public ParMarkBitMapClosure { class FillClosure: public ParMarkBitMapClosure
public: {
public:
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
_space_id(space_id), _start_array(PSParallelCompact::start_array(space_id))
_start_array(PSParallelCompact::start_array(space_id)) { {
assert(_space_id == PSParallelCompact::perm_space_id || assert(space_id == PSParallelCompact::perm_space_id ||
_space_id == PSParallelCompact::old_space_id, space_id == PSParallelCompact::old_space_id,
"cannot use FillClosure in the young gen"); "cannot use FillClosure in the young gen");
assert(bitmap() != NULL, "need a bitmap");
assert(_start_array != NULL, "need a start array");
}
void fill_region(HeapWord* addr, size_t size) {
MemRegion region(addr, size);
SharedHeap::fill_region_with_object(region);
_start_array->allocate_block(addr);
} }
virtual IterationStatus do_addr(HeapWord* addr, size_t size) { virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
fill_region(addr, size); CollectedHeap::fill_with_objects(addr, size);
HeapWord* const end = addr + size;
do {
_start_array->allocate_block(addr);
addr += oop(addr)->size();
} while (addr < end);
return ParMarkBitMap::incomplete; return ParMarkBitMap::incomplete;
} }
private: private:
const PSParallelCompact::SpaceId _space_id; ObjectStartArray* const _start_array;
ObjectStartArray* const _start_array;
}; };
...@@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) { ...@@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
// We lost, someone else "owns" this object // We lost, someone else "owns" this object
guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
// Unallocate the space used. NOTE! We may have directly allocated // Try to deallocate the space. If it was directly allocated we cannot
// the object. If so, we cannot deallocate it, so we have to test! // deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if (new_obj_is_tenured) { if (new_obj_is_tenured) {
if (!_old_lab.unallocate_object(new_obj)) { if (!_old_lab.unallocate_object(new_obj)) {
// The promotion lab failed to unallocate the object. CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion mr((HeapWord*)new_obj, new_obj_size);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap::fill_region_with_object(mr);
}
} else {
if (!_young_lab.unallocate_object(new_obj)) {
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion mr((HeapWord*)new_obj, new_obj_size);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap::fill_region_with_object(mr);
} }
} else if (!_young_lab.unallocate_object(new_obj)) {
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
} }
// don't update this before the unallocation! // don't update this before the unallocation!
......
...@@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() { ...@@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace *s = ls->space(); MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceeding the one containing top() if (s->top() < top()) { // For all spaces preceeding the one containing top()
if (s->free_in_words() > 0) { if (s->free_in_words() > 0) {
SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
size_t area_touched_words = pointer_delta(s->end(), s->top()); size_t area_touched_words = pointer_delta(s->end(), s->top());
CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT #ifndef ASSERT
if (!ZapUnusedHeapArea) { if (!ZapUnusedHeapArea) {
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
...@@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) { ...@@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) {
// a minimal object; assuming that's not the last chunk in which case we don't care. // a minimal object; assuming that's not the last chunk in which case we don't care.
if (i < lgrp_spaces()->length() - 1) { if (i < lgrp_spaces()->length() - 1) {
size_t remainder = pointer_delta(s->end(), value); size_t remainder = pointer_delta(s->end(), value);
const size_t minimal_object_size = oopDesc::header_size(); const size_t min_fill_size = CollectedHeap::min_fill_size();
if (remainder < minimal_object_size && remainder > 0) { if (remainder < min_fill_size && remainder > 0) {
// Add a filler object of a minimal size, it will cross the chunk boundary. // Add a minimum size filler object; it will cross the chunk boundary.
SharedHeap::fill_region_with_object(MemRegion(value, minimal_object_size)); CollectedHeap::fill_with_object(value, min_fill_size);
value += minimal_object_size; value += min_fill_size;
assert(!s->contains(value), "Should be in the next chunk"); assert(!s->contains(value), "Should be in the next chunk");
// Restart the loop from the same chunk, since the value has moved // Restart the loop from the same chunk, since the value has moved
// to the next one. // to the next one.
......
...@@ -30,12 +30,21 @@ ...@@ -30,12 +30,21 @@
int CollectedHeap::_fire_out_of_memory_count = 0; int CollectedHeap::_fire_out_of_memory_count = 0;
#endif #endif
size_t CollectedHeap::_filler_array_max_size = 0;
// Memory state functions. // Memory state functions.
CollectedHeap::CollectedHeap() : CollectedHeap::CollectedHeap()
_reserved(), _barrier_set(NULL), _is_gc_active(false), {
_total_collections(0), _total_full_collections(0), const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) { const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() +
max_len * elements_per_word);
_barrier_set = NULL;
_is_gc_active = false;
_total_collections = _total_full_collections = 0;
_gc_cause = _gc_lastcause = GCCause::_no_gc;
NOT_PRODUCT(_promotion_failure_alot_count = 0;) NOT_PRODUCT(_promotion_failure_alot_count = 0;)
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
...@@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { ...@@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
return obj; return obj;
} }
size_t CollectedHeap::filler_array_hdr_size() {
return size_t(arrayOopDesc::header_size(T_INT));
}
size_t CollectedHeap::filler_array_min_size() {
return align_object_size(filler_array_hdr_size());
}
size_t CollectedHeap::filler_array_max_size() {
return _filler_array_max_size;
}
#ifdef ASSERT
void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
{
assert(words >= min_fill_size(), "too small to fill");
assert(words % MinObjAlignment == 0, "unaligned size");
assert(Universe::heap()->is_in_reserved(start), "not in heap");
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
}
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
{
if (ZapFillerObjects) {
Copy::fill_to_words(start + filler_array_hdr_size(),
words - filler_array_hdr_size(), 0XDEAFBABE);
}
}
#endif // ASSERT
void
CollectedHeap::fill_with_array(HeapWord* start, size_t words)
{
assert(words >= filler_array_min_size(), "too small for an array");
assert(words <= filler_array_max_size(), "too big for a single object");
const size_t payload_size = words - filler_array_hdr_size();
const size_t len = payload_size * HeapWordSize / sizeof(jint);
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::fillerArrayKlassObj(), start,
words);
DEBUG_ONLY(zap_filler_array(start, words);)
}
void
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
{
assert(words <= filler_array_max_size(), "too big for a single object");
if (words >= filler_array_min_size()) {
fill_with_array(start, words);
} else if (words > 0) {
assert(words == min_fill_size(), "unaligned size");
post_allocation_setup_common(SystemDictionary::object_klass(), start,
words);
}
}
void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
fill_with_object_impl(start, words);
}
void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
#ifdef LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.
const size_t min = min_fill_size();
const size_t max = filler_array_max_size();
while (words > max) {
const size_t cur = words - max >= min ? max : max - min;
fill_with_array(start, cur);
start += cur;
words -= cur;
}
#endif
fill_with_object_impl(start, words);
}
oop CollectedHeap::new_store_barrier(oop new_obj) { oop CollectedHeap::new_store_barrier(oop new_obj) {
// %%% This needs refactoring. (It was imported from the server compiler.) // %%% This needs refactoring. (It was imported from the server compiler.)
guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported"); guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
......
...@@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj { ...@@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj {
static int _fire_out_of_memory_count; static int _fire_out_of_memory_count;
#endif #endif
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
protected: protected:
MemRegion _reserved; MemRegion _reserved;
BarrierSet* _barrier_set; BarrierSet* _barrier_set;
...@@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj { ...@@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj {
// Clears an allocated object. // Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size); inline static void init_obj(HeapWord* obj, size_t size);
// Filler object utilities.
static inline size_t filler_array_hdr_size();
static inline size_t filler_array_min_size();
static inline size_t filler_array_max_size();
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
// Fill with a single array; caller must ensure filler_array_min_size() <=
// words <= filler_array_max_size().
static inline void fill_with_array(HeapWord* start, size_t words);
// Fill with a single object (either an int array or a java.lang.Object).
static inline void fill_with_object_impl(HeapWord* start, size_t words);
// Verification functions // Verification functions
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
PRODUCT_RETURN; PRODUCT_RETURN;
...@@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj { ...@@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj {
// The boundary between a "large" and "small" array of primitives, in words. // The boundary between a "large" and "small" array of primitives, in words.
virtual size_t large_typearray_limit() = 0; virtual size_t large_typearray_limit() = 0;
// Utilities for turning raw memory into filler objects.
//
// min_fill_size() is the smallest region that can be filled.
// fill_with_objects() can fill arbitrary-sized regions of the heap using
// multiple objects. fill_with_object() is for regions known to be smaller
// than the largest array of integers; it uses a single object to fill the
// region and has slightly less overhead.
static size_t min_fill_size() {
return size_t(align_object_size(oopDesc::header_size()));
}
static void fill_with_objects(HeapWord* start, size_t words);
static void fill_with_object(HeapWord* start, size_t words);
static void fill_with_object(MemRegion region) {
fill_with_object(region.start(), region.word_size());
}
static void fill_with_object(HeapWord* start, HeapWord* end) {
fill_with_object(start, pointer_delta(end, start));
}
// Some heaps may offer a contiguous region for shared non-blocking // Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and // allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.) // end fields defining the extent of the contiguous allocation region.)
......
...@@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass, ...@@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
HeapWord* objPtr, HeapWord* objPtr,
size_t size) { size_t size) {
oop obj = (oop)objPtr; oop obj = (oop)objPtr;
assert(obj != NULL, "NULL object pointer"); assert(obj != NULL, "NULL object pointer");
...@@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, ...@@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
// May be bootstrapping // May be bootstrapping
obj->set_mark(markOopDesc::prototype()); obj->set_mark(markOopDesc::prototype());
} }
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
} }
void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
...@@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, ...@@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
// Support for jvmti and dtrace // Support for jvmti and dtrace
inline void post_allocation_notify(KlassHandle klass, oop obj) { inline void post_allocation_notify(KlassHandle klass, oop obj) {
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
// support for JVMTI VMObjectAlloc event (no-op if not enabled) // support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj); JvmtiExport::vm_object_alloc_event_collector(obj);
......
...@@ -28,21 +28,22 @@ collectedHeap.cpp collectedHeap.hpp ...@@ -28,21 +28,22 @@ collectedHeap.cpp collectedHeap.hpp
collectedHeap.cpp collectedHeap.inline.hpp collectedHeap.cpp collectedHeap.inline.hpp
collectedHeap.cpp init.hpp collectedHeap.cpp init.hpp
collectedHeap.cpp oop.inline.hpp collectedHeap.cpp oop.inline.hpp
collectedHeap.cpp systemDictionary.hpp
collectedHeap.cpp thread_<os_family>.inline.hpp collectedHeap.cpp thread_<os_family>.inline.hpp
collectedHeap.hpp allocation.hpp collectedHeap.hpp allocation.hpp
collectedHeap.hpp barrierSet.hpp collectedHeap.hpp barrierSet.hpp
collectedHeap.hpp gcCause.hpp collectedHeap.hpp gcCause.hpp
collectedHeap.hpp handles.hpp collectedHeap.hpp handles.hpp
collectedHeap.hpp perfData.hpp collectedHeap.hpp perfData.hpp
collectedHeap.hpp safepoint.hpp collectedHeap.hpp safepoint.hpp
collectedHeap.inline.hpp arrayOop.hpp collectedHeap.inline.hpp arrayOop.hpp
collectedHeap.inline.hpp collectedHeap.hpp collectedHeap.inline.hpp collectedHeap.hpp
collectedHeap.inline.hpp copy.hpp collectedHeap.inline.hpp copy.hpp
collectedHeap.inline.hpp jvmtiExport.hpp collectedHeap.inline.hpp jvmtiExport.hpp
collectedHeap.inline.hpp lowMemoryDetector.hpp collectedHeap.inline.hpp lowMemoryDetector.hpp
collectedHeap.inline.hpp sharedRuntime.hpp collectedHeap.inline.hpp sharedRuntime.hpp
collectedHeap.inline.hpp thread.hpp collectedHeap.inline.hpp thread.hpp
collectedHeap.inline.hpp threadLocalAllocBuffer.inline.hpp collectedHeap.inline.hpp threadLocalAllocBuffer.inline.hpp
collectedHeap.inline.hpp universe.hpp collectedHeap.inline.hpp universe.hpp
......
...@@ -26,20 +26,24 @@ ...@@ -26,20 +26,24 @@
#include "incls/_permGen.cpp.incl" #include "incls/_permGen.cpp.incl"
HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
MutexLocker ml(Heap_lock);
GCCause::Cause next_cause = GCCause::_permanent_generation_full; GCCause::Cause next_cause = GCCause::_permanent_generation_full;
GCCause::Cause prev_cause = GCCause::_no_gc; GCCause::Cause prev_cause = GCCause::_no_gc;
unsigned int gc_count_before, full_gc_count_before;
HeapWord* obj;
for (;;) { for (;;) {
HeapWord* obj = gen->allocate(size, false); {
if (obj != NULL) { MutexLocker ml(Heap_lock);
return obj; if ((obj = gen->allocate(size, false)) != NULL) {
} return obj;
if (gen->capacity() < _capacity_expansion_limit || }
prev_cause != GCCause::_no_gc) { if (gen->capacity() < _capacity_expansion_limit ||
obj = gen->expand_and_allocate(size, false); prev_cause != GCCause::_no_gc) {
} obj = gen->expand_and_allocate(size, false);
if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) { }
if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
return obj;
}
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
...@@ -61,31 +65,27 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { ...@@ -61,31 +65,27 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
return NULL; return NULL;
} }
} }
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
unsigned int gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = SharedHeap::heap()->total_collections();
unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections(); full_gc_count_before = SharedHeap::heap()->total_full_collections();
{ }
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, // Give up heap lock above, VMThread::execute below gets it back
next_cause); VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
VMThread::execute(&op); next_cause);
if (!op.prologue_succeeded() || op.gc_locked()) { VMThread::execute(&op);
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); if (!op.prologue_succeeded() || op.gc_locked()) {
continue; // retry and/or stall as necessary assert(op.result() == NULL, "must be NULL if gc_locked() is true");
} continue; // retry and/or stall as necessary
obj = op.result(); }
assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), obj = op.result();
"result not in heap"); assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
if (obj != NULL) { "result not in heap");
return obj; if (obj != NULL) {
}
}
prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection;
} else {
return obj; return obj;
} }
prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection;
} }
} }
......
...@@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() { ...@@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() {
perm_gen()->ref_processor_init(); perm_gen()->ref_processor_init();
} }
void SharedHeap::fill_region_with_object(MemRegion mr) {
// Disable the posting of JVMTI VMObjectAlloc events as we
// don't want the filling of tlabs with filler arrays to be
// reported to the profiler.
NoJvmtiVMObjectAllocMark njm;
// Disable low memory detector because there is no real allocation.
LowMemoryDetectorDisabler lmd_dis;
// It turns out that post_allocation_setup_array takes a handle, so the
// call below contains an implicit conversion. Best to free that handle
// as soon as possible.
HandleMark hm;
size_t word_size = mr.word_size();
size_t aligned_array_header_size =
align_object_size(typeArrayOopDesc::header_size(T_INT));
if (word_size >= aligned_array_header_size) {
const size_t array_length =
pointer_delta(mr.end(), mr.start()) -
typeArrayOopDesc::header_size(T_INT);
const size_t array_length_words =
array_length * (HeapWordSize/sizeof(jint));
post_allocation_setup_array(Universe::intArrayKlassObj(),
mr.start(),
mr.word_size(),
(int)array_length_words);
#ifdef ASSERT
HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
Copy::fill_to_words(elt_words, array_length, 0xDEAFBABE);
#endif
} else {
assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
post_allocation_setup_obj(SystemDictionary::object_klass(),
mr.start(),
mr.word_size());
}
}
// Some utilities. // Some utilities.
void SharedHeap::print_size_transition(outputStream* out, void SharedHeap::print_size_transition(outputStream* out,
size_t bytes_before, size_t bytes_before,
......
...@@ -108,14 +108,6 @@ public: ...@@ -108,14 +108,6 @@ public:
void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; } void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; }
// A helper function that fills a region of the heap with
// with a single object.
static void fill_region_with_object(MemRegion mr);
// Minimum garbage fill object size
static size_t min_fill_size() { return (size_t)align_object_size(oopDesc::header_size()); }
static size_t min_fill_size_in_bytes() { return min_fill_size() * HeapWordSize; }
// This function returns the "GenRemSet" object that allows us to scan // This function returns the "GenRemSet" object that allows us to scan
// generations; at least the perm gen, possibly more in a fully // generations; at least the perm gen, possibly more in a fully
// generational heap. // generational heap.
......
...@@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, ...@@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) { HeapWord* q, size_t deadlength) {
if (allowed_deadspace_words >= deadlength) { if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength; allowed_deadspace_words -= deadlength;
oop(q)->set_mark(markOopDesc::prototype()->set_marked()); CollectedHeap::fill_with_object(q, deadlength);
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT); oop(q)->set_mark(oop(q)->mark()->set_marked());
if (deadlength >= min_int_array_size) { assert((int) deadlength == oop(q)->size(), "bad filler object size");
oop(q)->set_klass(Universe::intArrayKlassObj());
typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
* (HeapWordSize/sizeof(jint))));
} else {
assert((int) deadlength == instanceOopDesc::header_size(),
"size for smallest fake dead object doesn't match");
oop(q)->set_klass(SystemDictionary::object_klass());
}
assert((int) deadlength == oop(q)->size(),
"make sure size for fake dead object match");
// Recall that we required "q == compaction_top". // Recall that we required "q == compaction_top".
return true; return true;
} else { } else {
......
...@@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num, ...@@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
"should contain whole object"); "should contain whole object");
buf->undo_allocation(obj, word_sz); buf->undo_allocation(obj, word_sz);
} else { } else {
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); CollectedHeap::fill_with_object(obj, word_sz);
} }
} }
......
...@@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() { ...@@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
void ThreadLocalAllocBuffer::make_parsable(bool retire) { void ThreadLocalAllocBuffer::make_parsable(bool retire) {
if (end() != NULL) { if (end() != NULL) {
invariants(); invariants();
MemRegion mr(top(), hard_end()); CollectedHeap::fill_with_object(top(), hard_end());
SharedHeap::fill_region_with_object(mr);
if (retire || ZeroTLAB) { // "Reset" the TLAB if (retire || ZeroTLAB) { // "Reset" the TLAB
set_start(NULL); set_start(NULL);
......
...@@ -49,16 +49,17 @@ klassOop Universe::_constantPoolKlassObj = NULL; ...@@ -49,16 +49,17 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop Universe::_constantPoolCacheKlassObj = NULL; klassOop Universe::_constantPoolCacheKlassObj = NULL;
klassOop Universe::_compiledICHolderKlassObj = NULL; klassOop Universe::_compiledICHolderKlassObj = NULL;
klassOop Universe::_systemObjArrayKlassObj = NULL; klassOop Universe::_systemObjArrayKlassObj = NULL;
oop Universe::_int_mirror = NULL; klassOop Universe::_fillerArrayKlassObj = NULL;
oop Universe::_float_mirror = NULL; oop Universe::_int_mirror = NULL;
oop Universe::_double_mirror = NULL; oop Universe::_float_mirror = NULL;
oop Universe::_byte_mirror = NULL; oop Universe::_double_mirror = NULL;
oop Universe::_bool_mirror = NULL; oop Universe::_byte_mirror = NULL;
oop Universe::_char_mirror = NULL; oop Universe::_bool_mirror = NULL;
oop Universe::_long_mirror = NULL; oop Universe::_char_mirror = NULL;
oop Universe::_short_mirror = NULL; oop Universe::_long_mirror = NULL;
oop Universe::_void_mirror = NULL; oop Universe::_short_mirror = NULL;
oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; oop Universe::_void_mirror = NULL;
oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
oop Universe::_main_thread_group = NULL; oop Universe::_main_thread_group = NULL;
oop Universe::_system_thread_group = NULL; oop Universe::_system_thread_group = NULL;
typeArrayOop Universe::_the_empty_byte_array = NULL; typeArrayOop Universe::_the_empty_byte_array = NULL;
...@@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) { ...@@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) {
f(instanceKlassKlassObj()); f(instanceKlassKlassObj());
f(constantPoolKlassObj()); f(constantPoolKlassObj());
f(systemObjArrayKlassObj()); f(systemObjArrayKlassObj());
f(fillerArrayKlassObj());
} }
void Universe::oops_do(OopClosure* f, bool do_all) { void Universe::oops_do(OopClosure* f, bool do_all) {
...@@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) { ...@@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_constantPoolCacheKlassObj); f->do_oop((oop*)&_constantPoolCacheKlassObj);
f->do_oop((oop*)&_compiledICHolderKlassObj); f->do_oop((oop*)&_compiledICHolderKlassObj);
f->do_oop((oop*)&_systemObjArrayKlassObj); f->do_oop((oop*)&_systemObjArrayKlassObj);
f->do_oop((oop*)&_fillerArrayKlassObj);
f->do_oop((oop*)&_the_empty_byte_array); f->do_oop((oop*)&_the_empty_byte_array);
f->do_oop((oop*)&_the_empty_short_array); f->do_oop((oop*)&_the_empty_short_array);
f->do_oop((oop*)&_the_empty_int_array); f->do_oop((oop*)&_the_empty_int_array);
...@@ -257,16 +260,17 @@ void Universe::genesis(TRAPS) { ...@@ -257,16 +260,17 @@ void Universe::genesis(TRAPS) {
_typeArrayKlassObjs[T_INT] = _intArrayKlassObj; _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
_typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
_methodKlassObj = methodKlass::create_klass(CHECK); _methodKlassObj = methodKlass::create_klass(CHECK);
_constMethodKlassObj = constMethodKlass::create_klass(CHECK); _constMethodKlassObj = constMethodKlass::create_klass(CHECK);
_methodDataKlassObj = methodDataKlass::create_klass(CHECK); _methodDataKlassObj = methodDataKlass::create_klass(CHECK);
_constantPoolKlassObj = constantPoolKlass::create_klass(CHECK); _constantPoolKlassObj = constantPoolKlass::create_klass(CHECK);
_constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK); _constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK);
_compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK); _compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK);
_systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK); _systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK);
_fillerArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), "<filler>", CHECK);
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
_the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK); _the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK);
_the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK); _the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK);
_the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK); _the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK);
...@@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) { ...@@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) {
_the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK); _the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK);
_vm_exception = oopFactory::new_symbol("vm exception holder", CHECK); _vm_exception = oopFactory::new_symbol("vm exception holder", CHECK);
} else { } else {
FileMapInfo *mapinfo = FileMapInfo::current_info(); FileMapInfo *mapinfo = FileMapInfo::current_info();
char* buffer = mapinfo->region_base(CompactingPermGenGen::md); char* buffer = mapinfo->region_base(CompactingPermGenGen::md);
void** vtbl_list = (void**)buffer; void** vtbl_list = (void**)buffer;
......
...@@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache { ...@@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache {
class Universe: AllStatic { class Universe: AllStatic {
// Ugh. Universe is much too friendly.
friend class MarkSweep; friend class MarkSweep;
friend class oopDesc; friend class oopDesc;
friend class ClassLoader; friend class ClassLoader;
...@@ -132,6 +133,7 @@ class Universe: AllStatic { ...@@ -132,6 +133,7 @@ class Universe: AllStatic {
static klassOop _constantPoolCacheKlassObj; static klassOop _constantPoolCacheKlassObj;
static klassOop _compiledICHolderKlassObj; static klassOop _compiledICHolderKlassObj;
static klassOop _systemObjArrayKlassObj; static klassOop _systemObjArrayKlassObj;
static klassOop _fillerArrayKlassObj;
// Known objects in the VM // Known objects in the VM
...@@ -264,6 +266,7 @@ class Universe: AllStatic { ...@@ -264,6 +266,7 @@ class Universe: AllStatic {
static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; } static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; }
static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; } static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; }
static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; } static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; }
static klassOop fillerArrayKlassObj() { return _fillerArrayKlassObj; }
// Known objects in tbe VM // Known objects in tbe VM
static oop int_mirror() { return check_mirror(_int_mirror); static oop int_mirror() { return check_mirror(_int_mirror);
......
...@@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc { ...@@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc {
: typesize_in_bytes/HeapWordSize); : typesize_in_bytes/HeapWordSize);
} }
// This method returns the maximum length that can passed into // Return the maximum length of an array of BasicType. The length can passed
// typeArrayOop::object_size(scale, length, header_size) without causing an // to typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow. We substract an extra 2*wordSize to guard against double word // overflow.
// alignments. It gets the scale from the type2aelembytes array.
static int32_t max_array_length(BasicType type) { static int32_t max_array_length(BasicType type) {
assert(type >= 0 && type < T_CONFLICT, "wrong type"); assert(type >= 0 && type < T_CONFLICT, "wrong type");
assert(type2aelembytes(type) != 0, "wrong type"); assert(type2aelembytes(type) != 0, "wrong type");
// We use max_jint, since object_size is internally represented by an 'int' const int bytes_per_element = type2aelembytes(type);
// This gives us an upper bound of max_jint words for the size of the oop. if (bytes_per_element < HeapWordSize) {
int32_t max_words = (max_jint - header_size(type) - 2); return max_jint;
int elembytes = type2aelembytes(type); }
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
return (len > max_jint) ? max_jint : (int32_t)len;
}
const int32_t max_words = align_size_down(max_jint, MinObjAlignment);
const int32_t max_element_words = max_words - header_size(type);
const int32_t words_per_element = bytes_per_element >> LogHeapWordSize;
return max_element_words / words_per_element;
}
}; };
...@@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) { ...@@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) {
return element_type() == tak->element_type(); return element_type() == tak->element_type();
} }
klassOop typeArrayKlass::create_klass(BasicType type, int scale, TRAPS) { klassOop typeArrayKlass::create_klass(BasicType type, int scale,
const char* name_str, TRAPS) {
typeArrayKlass o; typeArrayKlass o;
symbolHandle sym(symbolOop(NULL)); symbolHandle sym(symbolOop(NULL));
// bootstrapping: don't create sym if symbolKlass not created yet // bootstrapping: don't create sym if symbolKlass not created yet
if (Universe::symbolKlassObj() != NULL) { if (Universe::symbolKlassObj() != NULL && name_str != NULL) {
sym = oopFactory::new_symbol_handle(external_name(type), CHECK_NULL); sym = oopFactory::new_symbol_handle(name_str, CHECK_NULL);
} }
KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj()); KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj());
......
...@@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass { ...@@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass {
// klass allocation // klass allocation
DEFINE_ALLOCATE_PERMANENT(typeArrayKlass); DEFINE_ALLOCATE_PERMANENT(typeArrayKlass);
static klassOop create_klass(BasicType type, int scale, TRAPS); static klassOop create_klass(BasicType type, int scale, const char* name_str,
TRAPS);
static inline klassOop create_klass(BasicType type, int scale, TRAPS) {
return create_klass(type, scale, external_name(type), CHECK_NULL);
}
int oop_size(oop obj) const; int oop_size(oop obj) const;
int klass_oop_size() const { return object_size(); } int klass_oop_size() const { return object_size(); }
......
...@@ -1517,6 +1517,16 @@ bool Arguments::check_vm_args_consistency() { ...@@ -1517,6 +1517,16 @@ bool Arguments::check_vm_args_consistency() {
MarkSweepAlwaysCompactCount = 1; // Move objects every gc. MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
} }
if (UseParallelOldGC && ParallelOldGCSplitALot) {
// Settings to encourage splitting.
if (!FLAG_IS_CMDLINE(NewRatio)) {
FLAG_SET_CMDLINE(intx, NewRatio, 2);
}
if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) {
FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
}
}
status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
status = status && verify_percentage(GCTimeLimit, "GCTimeLimit"); status = status && verify_percentage(GCTimeLimit, "GCTimeLimit");
if (GCTimeLimit == 100) { if (GCTimeLimit == 100) {
......
...@@ -625,6 +625,9 @@ class CommandLineFlags { ...@@ -625,6 +625,9 @@ class CommandLineFlags {
develop(bool, CheckZapUnusedHeapArea, false, \ develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \ "Check zapping of unused heap space") \
\ \
develop(bool, ZapFillerObjects, trueInDebug, \
"Zap filler objects with 0xDEAFBABE") \
\
develop(bool, PrintVMMessages, true, \ develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \ "Print vm messages on console") \
\ \
...@@ -1200,11 +1203,12 @@ class CommandLineFlags { ...@@ -1200,11 +1203,12 @@ class CommandLineFlags {
product(uintx, ParallelCMSThreads, 0, \ product(uintx, ParallelCMSThreads, 0, \
"Max number of threads CMS will use for concurrent work") \ "Max number of threads CMS will use for concurrent work") \
\ \
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \ develop(bool, ParallelOldGCSplitALot, false, \
"Use the Parallel Old MT unsafe in marking the bitmap") \ "Provoke splitting (copying data from a young gen space to" \
"multiple destination spaces)") \
\ \
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \ develop(uintx, ParallelOldGCSplitInterval, 3, \
"Use the Parallel Old MT unsafe in update of live size") \ "How often to provoke splitting a young gen space") \
\ \
develop(bool, TraceRegionTasksQueuing, false, \ develop(bool, TraceRegionTasksQueuing, false, \
"Trace the queuing of the region tasks") \ "Trace the queuing of the region tasks") \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册