提交 9f0ea04c 编写于 作者: J jmasa

Merge

......@@ -726,7 +726,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
result += partial_obj_size + live_to_left;
assert(result <= addr, "object cannot move to the right");
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
return result;
}
......@@ -1472,9 +1472,53 @@ PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
assert(result, "should not fail: bad filler object size");
}
void
PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
{
if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
return;
}
MutableSpace* const space = _space_info[id].space();
if (space->is_empty()) {
HeapWord* b = space->bottom();
HeapWord* t = b + space->capacity_in_words() / 2;
space->set_top(t);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
size_t obj_len = 8;
while (b + obj_len <= t) {
CollectedHeap::fill_with_object(b, obj_len);
mark_bitmap()->mark_obj(b, obj_len);
summary_data().add_obj(b, obj_len);
b += obj_len;
obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
}
if (b < t) {
// The loop didn't completely fill to t (top); adjust top downward.
space->set_top(b);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
}
HeapWord** nta = _space_info[id].new_top_addr();
bool result = summary_data().summarize(_space_info[id].split_info(),
space->bottom(), space->top(), NULL,
space->bottom(), space->end(), nta);
assert(result, "space must fit into itself");
}
}
void
PSParallelCompact::provoke_split(bool & max_compaction)
{
if (total_invocations() % ParallelOldGCSplitInterval != 0) {
return;
}
const size_t region_size = ParallelCompactData::RegionSize;
ParallelCompactData& sd = summary_data();
......@@ -1587,6 +1631,12 @@ void PSParallelCompact::summarize_spaces_quick()
assert(result, "space must fit into itself");
_space_info[i].set_dense_prefix(space->bottom());
}
#ifndef PRODUCT
if (ParallelOldGCSplitALot) {
provoke_split_fill_survivor(to_space_id);
}
#endif // #ifndef PRODUCT
}
void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
......@@ -1794,9 +1844,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
#ifndef PRODUCT
if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
if (total_invocations() % ParallelOldGCSplitInterval == 0) {
provoke_split(maximum_compaction);
}
provoke_split(maximum_compaction);
}
#endif // #ifndef PRODUCT
......@@ -1828,14 +1876,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr);
assert(done, "space must fit into old gen");
// XXX - this is necessary because decrement_destination_counts() tests
// source_region() to determine if a region will be filled. Probably
// better to pass src_space->new_top() into decrement_destination_counts
// and test that instead.
//
// Clear the source_region field for each region in the space.
clear_source_region(space->bottom(), _space_info[id].new_top());
// Reset the new_top value for the space.
_space_info[id].set_new_top(space->bottom());
} else if (live > 0) {
......@@ -1854,7 +1894,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
dst_space_id = SpaceId(id);
dst_space_end = space->end();
new_top_addr = _space_info[id].new_top_addr();
HeapWord* const clear_end = _space_info[id].new_top();
NOT_PRODUCT(summary_phase_msg(dst_space_id,
space->bottom(), dst_space_end,
SpaceId(id), next_src_addr, space->top());)
......@@ -1865,13 +1904,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr);
assert(done, "space must fit when compacted into itself");
assert(*new_top_addr <= space->top(), "usage should not grow");
// XXX - this should go away. See comments above.
//
// Clear the source_region field in regions at the end of the space that
// will not be filled.
HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
clear_source_region(clear_beg, clear_end);
}
}
......@@ -3051,19 +3083,34 @@ HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
}
void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
SpaceId src_space_id,
size_t beg_region,
HeapWord* end_addr)
{
ParallelCompactData& sd = summary_data();
#ifdef ASSERT
MutableSpace* const src_space = _space_info[src_space_id].space();
HeapWord* const beg_addr = sd.region_to_addr(beg_region);
assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
"src_space_id does not match beg_addr");
assert(src_space->contains(end_addr) || end_addr == src_space->end(),
"src_space_id does not match end_addr");
#endif // #ifdef ASSERT
RegionData* const beg = sd.region(beg_region);
HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
size_t cur_idx = beg_region;
for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
// Regions up to new_top() are enqueued if they become available.
HeapWord* const new_top = _space_info[src_space_id].new_top();
RegionData* const enqueue_end =
sd.addr_to_region_ptr(sd.region_align_up(new_top));
for (RegionData* cur = beg; cur < end; ++cur) {
assert(cur->data_size() > 0, "region must have live data");
cur->decrement_destination_count();
if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
cm->save_for_processing(cur_idx);
if (cur < enqueue_end && cur->available() && cur->claim()) {
cm->save_for_processing(sd.region(cur));
}
}
}
......@@ -3178,7 +3225,8 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord* const old_src_addr = closure.source();
closure.copy_partial_obj();
if (closure.is_full()) {
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_deferred_obj_addr(NULL);
region_ptr->set_completed();
return;
......@@ -3187,7 +3235,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord* const end_addr = sd.region_align_down(closure.source());
if (sd.region_align_down(old_src_addr) != end_addr) {
// The partial object was copied from more than one source region.
decrement_destination_counts(cm, src_region_idx, end_addr);
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
......@@ -3227,19 +3275,21 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
region_ptr->set_deferred_obj_addr(closure.destination());
status = closure.copy_until_full(); // copies from closure.source()
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_completed();
return;
}
if (status == ParMarkBitMap::full) {
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_deferred_obj_addr(NULL);
region_ptr->set_completed();
return;
}
decrement_destination_counts(cm, src_region_idx, end_addr);
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
......@@ -3318,7 +3368,7 @@ void PSParallelCompact::reset_millis_since_last_gc() {
ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
{
if (source() != destination()) {
assert(source() > destination(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words_remaining());
}
update_state(words_remaining());
......@@ -3339,7 +3389,7 @@ void MoveAndUpdateClosure::copy_partial_obj()
// This test is necessary; if omitted, the pointer updates to a partial object
// that crosses the dense prefix boundary could be overwritten.
if (source() != destination()) {
assert(source() > destination(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words);
}
update_state(words);
......@@ -3364,7 +3414,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
}
if (destination() != source()) {
assert(destination() < source(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words);
}
......
......@@ -978,6 +978,10 @@ class PSParallelCompact : AllStatic {
// Include the new objects in the summary data.
static void summarize_new_objects(SpaceId id, HeapWord* start);
// Add live objects to a survivor space since it's rare that both survivors
// are non-empty.
static void provoke_split_fill_survivor(SpaceId id);
// Add live objects and/or choose the dense prefix to provoke splitting.
static void provoke_split(bool & maximum_compaction);
#endif
......@@ -1154,8 +1158,10 @@ class PSParallelCompact : AllStatic {
HeapWord* end_addr);
// Decrement the destination count for each non-empty source region in the
// range [beg_region, region(region_align_up(end_addr))).
// range [beg_region, region(region_align_up(end_addr))). If the destination
// count for a region goes to 0 and it needs to be filled, enqueue it.
static void decrement_destination_counts(ParCompactionManager* cm,
SpaceId src_space_id,
size_t beg_region,
HeapWord* end_addr);
......@@ -1230,6 +1236,8 @@ class PSParallelCompact : AllStatic {
#endif // #ifndef PRODUCT
#ifdef ASSERT
// Sanity check the new location of a word in the heap.
static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
// Verify that all the regions have been emptied.
static void verify_complete(SpaceId space_id);
#endif // #ifdef ASSERT
......@@ -1397,6 +1405,15 @@ inline void PSParallelCompact::adjust_pointer(T* p,
}
}
#ifdef ASSERT
inline void
PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
{
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space");
}
#endif // ASSERT
class MoveAndUpdateClosure: public ParMarkBitMapClosure {
public:
inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
......
......@@ -178,8 +178,7 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words)
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::fillerArrayKlassObj(), start,
words);
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
DEBUG_ONLY(zap_filler_array(start, words);)
}
......
......@@ -49,7 +49,6 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop Universe::_constantPoolCacheKlassObj = NULL;
klassOop Universe::_compiledICHolderKlassObj = NULL;
klassOop Universe::_systemObjArrayKlassObj = NULL;
klassOop Universe::_fillerArrayKlassObj = NULL;
oop Universe::_int_mirror = NULL;
oop Universe::_float_mirror = NULL;
oop Universe::_double_mirror = NULL;
......@@ -127,7 +126,6 @@ void Universe::system_classes_do(void f(klassOop)) {
f(instanceKlassKlassObj());
f(constantPoolKlassObj());
f(systemObjArrayKlassObj());
f(fillerArrayKlassObj());
}
void Universe::oops_do(OopClosure* f, bool do_all) {
......@@ -182,7 +180,6 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_constantPoolCacheKlassObj);
f->do_oop((oop*)&_compiledICHolderKlassObj);
f->do_oop((oop*)&_systemObjArrayKlassObj);
f->do_oop((oop*)&_fillerArrayKlassObj);
f->do_oop((oop*)&_the_empty_byte_array);
f->do_oop((oop*)&_the_empty_short_array);
f->do_oop((oop*)&_the_empty_int_array);
......@@ -268,7 +265,6 @@ void Universe::genesis(TRAPS) {
_compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK);
_systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK);
_fillerArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), "<filler>", CHECK);
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
_the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK);
......
......@@ -133,7 +133,6 @@ class Universe: AllStatic {
static klassOop _constantPoolCacheKlassObj;
static klassOop _compiledICHolderKlassObj;
static klassOop _systemObjArrayKlassObj;
static klassOop _fillerArrayKlassObj;
// Known objects in the VM
......@@ -266,7 +265,6 @@ class Universe: AllStatic {
static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; }
static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; }
static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; }
static klassOop fillerArrayKlassObj() { return _fillerArrayKlassObj; }
// Known objects in tbe VM
static oop int_mirror() { return check_mirror(_int_mirror);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册