提交 d2f0b980 编写于 作者: M minqi

Merge

...@@ -578,16 +578,16 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { ...@@ -578,16 +578,16 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
} }
if (res == NULL && do_expand) { if (res == NULL && do_expand) {
if (expand(word_size * HeapWordSize)) { if (expand(word_size * HeapWordSize)) {
// The expansion succeeded and so we should have at least one // Even though the heap was expanded, it might not have reached
// region on the free list. // the desired size. So, we cannot assume that the allocation
res = _free_list.remove_head(); // will succeed.
res = _free_list.remove_head_or_null();
} }
} }
if (res != NULL) { if (res != NULL) {
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " gclog_or_tty->print_cr("new alloc region "HR_FORMAT,
"top "PTR_FORMAT, res->hrs_index(), HR_FORMAT_PARAMS(res));
res->bottom(), res->end(), res->top());
} }
} }
return res; return res;
...@@ -608,12 +608,12 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, ...@@ -608,12 +608,12 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
return alloc_region; return alloc_region;
} }
int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size) { size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous"); assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
int first = -1; size_t first = G1_NULL_HRS_INDEX;
if (num_regions == 1) { if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower // Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expasion if this fails, so // path. The caller will attempt the expasion if this fails, so
...@@ -622,7 +622,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, ...@@ -622,7 +622,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
if (hr != NULL) { if (hr != NULL) {
first = hr->hrs_index(); first = hr->hrs_index();
} else { } else {
first = -1; first = G1_NULL_HRS_INDEX;
} }
} else { } else {
// We can't allocate humongous regions while cleanupComplete() is // We can't allocate humongous regions while cleanupComplete() is
...@@ -637,10 +637,10 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, ...@@ -637,10 +637,10 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
append_secondary_free_list_if_not_empty_with_lock(); append_secondary_free_list_if_not_empty_with_lock();
if (free_regions() >= num_regions) { if (free_regions() >= num_regions) {
first = _hrs->find_contiguous(num_regions); first = _hrs.find_contiguous(num_regions);
if (first != -1) { if (first != G1_NULL_HRS_INDEX) {
for (int i = first; i < first + (int) num_regions; ++i) { for (size_t i = first; i < first + num_regions; ++i) {
HeapRegion* hr = _hrs->at(i); HeapRegion* hr = region_at(i);
assert(hr->is_empty(), "sanity"); assert(hr->is_empty(), "sanity");
assert(is_on_master_free_list(hr), "sanity"); assert(is_on_master_free_list(hr), "sanity");
hr->set_pending_removal(true); hr->set_pending_removal(true);
...@@ -653,15 +653,15 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, ...@@ -653,15 +653,15 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
} }
HeapWord* HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
size_t num_regions, size_t num_regions,
size_t word_size) { size_t word_size) {
assert(first != -1, "pre-condition"); assert(first != G1_NULL_HRS_INDEX, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous"); assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series + 1. // Index of last region in the series + 1.
int last = first + (int) num_regions; size_t last = first + num_regions;
// We need to initialize the region(s) we just discovered. This is // We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with // a bit tricky given that it can happen concurrently with
...@@ -676,7 +676,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, ...@@ -676,7 +676,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
assert(word_size <= word_size_sum, "sanity"); assert(word_size <= word_size_sum, "sanity");
// This will be the "starts humongous" region. // This will be the "starts humongous" region.
HeapRegion* first_hr = _hrs->at(first); HeapRegion* first_hr = region_at(first);
// The header of the new object will be placed at the bottom of // The header of the new object will be placed at the bottom of
// the first region. // the first region.
HeapWord* new_obj = first_hr->bottom(); HeapWord* new_obj = first_hr->bottom();
...@@ -711,8 +711,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, ...@@ -711,8 +711,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
// Then, if there are any, we will set up the "continues // Then, if there are any, we will set up the "continues
// humongous" regions. // humongous" regions.
HeapRegion* hr = NULL; HeapRegion* hr = NULL;
for (int i = first + 1; i < last; ++i) { for (size_t i = first + 1; i < last; ++i) {
hr = _hrs->at(i); hr = region_at(i);
hr->set_continuesHumongous(first_hr); hr->set_continuesHumongous(first_hr);
} }
// If we have "continues humongous" regions (hr != NULL), then the // If we have "continues humongous" regions (hr != NULL), then the
...@@ -746,8 +746,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, ...@@ -746,8 +746,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
// last one) is actually used when we will free up the humongous // last one) is actually used when we will free up the humongous
// region in free_humongous_region(). // region in free_humongous_region().
hr = NULL; hr = NULL;
for (int i = first + 1; i < last; ++i) { for (size_t i = first + 1; i < last; ++i) {
hr = _hrs->at(i); hr = region_at(i);
if ((i + 1) == last) { if ((i + 1) == last) {
// last continues humongous region // last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(), assert(hr->bottom() < new_top && new_top <= hr->end(),
...@@ -783,9 +783,9 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { ...@@ -783,9 +783,9 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
size_t num_regions = size_t num_regions =
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
size_t x_size = expansion_regions(); size_t x_size = expansion_regions();
size_t fs = _hrs->free_suffix(); size_t fs = _hrs.free_suffix();
int first = humongous_obj_allocate_find_first(num_regions, word_size); size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == -1) { if (first == G1_NULL_HRS_INDEX) {
// The only thing we can do now is attempt expansion. // The only thing we can do now is attempt expansion.
if (fs + x_size >= num_regions) { if (fs + x_size >= num_regions) {
// If the number of regions we're trying to allocate for this // If the number of regions we're trying to allocate for this
...@@ -799,16 +799,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { ...@@ -799,16 +799,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert(num_regions > fs, "earlier allocation should have succeeded"); assert(num_regions > fs, "earlier allocation should have succeeded");
if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
// Even though the heap was expanded, it might not have
// reached the desired size. So, we cannot assume that the
// allocation will succeed.
first = humongous_obj_allocate_find_first(num_regions, word_size); first = humongous_obj_allocate_find_first(num_regions, word_size);
// If the expansion was successful then the allocation
// should have been successful.
assert(first != -1, "this should have worked");
} }
} }
} }
HeapWord* result = NULL; HeapWord* result = NULL;
if (first != -1) { if (first != G1_NULL_HRS_INDEX) {
result = result =
humongous_obj_allocate_initialize_regions(first, num_regions, word_size); humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
assert(result != NULL, "it should always return a valid result"); assert(result != NULL, "it should always return a valid result");
...@@ -1366,6 +1366,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1366,6 +1366,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Update the number of full collections that have been completed. // Update the number of full collections that have been completed.
increment_full_collections_completed(false /* concurrent */); increment_full_collections_completed(false /* concurrent */);
_hrs.verify_optional();
verify_region_sets_optional(); verify_region_sets_optional();
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
...@@ -1589,6 +1590,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { ...@@ -1589,6 +1590,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
if (expand(expand_bytes)) { if (expand(expand_bytes)) {
_hrs.verify_optional();
verify_region_sets_optional(); verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size, return attempt_allocation_at_safepoint(word_size,
false /* expect_null_mutator_alloc_region */); false /* expect_null_mutator_alloc_region */);
...@@ -1596,6 +1598,19 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { ...@@ -1596,6 +1598,19 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
return NULL; return NULL;
} }
void G1CollectedHeap::update_committed_space(HeapWord* old_end,
HeapWord* new_end) {
assert(old_end != new_end, "don't call this otherwise");
assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
// Update the committed mem region.
_g1_committed.set_end(new_end);
// Tell the card table about the update.
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
// Tell the BOT about the update.
_bot_shared->resize(_g1_committed.word_size());
}
bool G1CollectedHeap::expand(size_t expand_bytes) { bool G1CollectedHeap::expand(size_t expand_bytes) {
size_t old_mem_size = _g1_storage.committed_size(); size_t old_mem_size = _g1_storage.committed_size();
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
...@@ -1607,47 +1622,37 @@ bool G1CollectedHeap::expand(size_t expand_bytes) { ...@@ -1607,47 +1622,37 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
old_mem_size/K, aligned_expand_bytes/K); old_mem_size/K, aligned_expand_bytes/K);
} }
HeapWord* old_end = (HeapWord*)_g1_storage.high(); // First commit the memory.
HeapWord* old_end = (HeapWord*) _g1_storage.high();
bool successful = _g1_storage.expand_by(aligned_expand_bytes); bool successful = _g1_storage.expand_by(aligned_expand_bytes);
if (successful) { if (successful) {
HeapWord* new_end = (HeapWord*)_g1_storage.high(); // Then propagate this update to the necessary data structures.
HeapWord* new_end = (HeapWord*) _g1_storage.high();
// Expand the committed region. update_committed_space(old_end, new_end);
_g1_committed.set_end(new_end);
FreeRegionList expansion_list("Local Expansion List");
// Tell the cardtable about the expansion. MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); assert(mr.start() == old_end, "post-condition");
// mr might be a smaller region than what was requested if
// And the offset table as well. // expand_by() was unable to allocate the HeapRegion instances
_bot_shared->resize(_g1_committed.word_size()); assert(mr.end() <= new_end, "post-condition");
expand_bytes = aligned_expand_bytes; size_t actual_expand_bytes = mr.byte_size();
HeapWord* base = old_end; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
// Create the heap regions for [old_end, new_end) "post-condition");
while (expand_bytes > 0) { if (actual_expand_bytes < aligned_expand_bytes) {
HeapWord* high = base + HeapRegion::GrainWords; // We could not expand _hrs to the desired size. In this case we
// need to shrink the committed space accordingly.
// Create a new HeapRegion. assert(mr.end() < new_end, "invariant");
MemRegion mr(base, high);
bool is_zeroed = !_g1_max_committed.contains(base); size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); // First uncommit the memory.
_g1_storage.shrink_by(diff_bytes);
// Add it to the HeapRegionSeq. // Then propagate this update to the necessary data structures.
_hrs->insert(hr); update_committed_space(new_end, mr.end());
_free_list.add_as_tail(hr);
// And we used up an expansion region to create it.
_expansion_regions--;
expand_bytes -= HeapRegion::GrainBytes;
base += HeapRegion::GrainWords;
} }
assert(base == new_end, "sanity"); _free_list.add_as_tail(&expansion_list);
// Now update max_committed if necessary.
_g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
} else { } else {
// The expansion of the virtual storage space was unsuccessful. // The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap. // Let's see if it was because we ran out of swap.
...@@ -1667,37 +1672,31 @@ bool G1CollectedHeap::expand(size_t expand_bytes) { ...@@ -1667,37 +1672,31 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
return successful; return successful;
} }
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
{
size_t old_mem_size = _g1_storage.committed_size(); size_t old_mem_size = _g1_storage.committed_size();
size_t aligned_shrink_bytes = size_t aligned_shrink_bytes =
ReservedSpace::page_align_size_down(shrink_bytes); ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
size_t num_regions_deleted = 0; size_t num_regions_deleted = 0;
MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
HeapWord* old_end = (HeapWord*) _g1_storage.high();
assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); assert(mr.end() == old_end, "post-condition");
if (mr.byte_size() > 0) if (mr.byte_size() > 0) {
_g1_storage.shrink_by(mr.byte_size()); _g1_storage.shrink_by(mr.byte_size());
assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); HeapWord* new_end = (HeapWord*) _g1_storage.high();
assert(mr.start() == new_end, "post-condition");
_g1_committed.set_end(mr.start());
_expansion_regions += num_regions_deleted; _expansion_regions += num_regions_deleted;
update_committed_space(old_end, new_end);
// Tell the cardtable about it. HeapRegionRemSet::shrink_heap(n_regions());
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
if (Verbose && PrintGC) {
// And the offset table as well. size_t new_mem_size = _g1_storage.committed_size();
_bot_shared->resize(_g1_committed.word_size()); gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
old_mem_size/K, aligned_shrink_bytes/K,
HeapRegionRemSet::shrink_heap(n_regions()); new_mem_size/K);
}
if (Verbose && PrintGC) {
size_t new_mem_size = _g1_storage.committed_size();
gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
old_mem_size/K, aligned_shrink_bytes/K,
new_mem_size/K);
} }
} }
...@@ -1712,6 +1711,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) { ...@@ -1712,6 +1711,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
shrink_helper(shrink_bytes); shrink_helper(shrink_bytes);
rebuild_region_lists(); rebuild_region_lists();
_hrs.verify_optional();
verify_region_sets_optional(); verify_region_sets_optional();
} }
...@@ -1890,9 +1890,9 @@ jint G1CollectedHeap::initialize() { ...@@ -1890,9 +1890,9 @@ jint G1CollectedHeap::initialize() {
_g1_storage.initialize(g1_rs, 0); _g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_g1_max_committed = _g1_committed; _hrs.initialize((HeapWord*) _g1_reserved.start(),
_hrs = new HeapRegionSeq(_expansion_regions); (HeapWord*) _g1_reserved.end(),
guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); _expansion_regions);
// 6843694 - ensure that the maximum region index can fit // 6843694 - ensure that the maximum region index can fit
// in the remembered set structures. // in the remembered set structures.
...@@ -1991,8 +1991,9 @@ jint G1CollectedHeap::initialize() { ...@@ -1991,8 +1991,9 @@ jint G1CollectedHeap::initialize() {
// Here we allocate the dummy full region that is required by the // Here we allocate the dummy full region that is required by the
// G1AllocRegion class. If we don't pass an address in the reserved // G1AllocRegion class. If we don't pass an address in the reserved
// space here, lots of asserts fire. // space here, lots of asserts fire.
MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
_g1_reserved.start());
// We'll re-use the same region whether the alloc region will // We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young // require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without // region will complain that it cannot support allocations without
...@@ -2100,7 +2101,7 @@ public: ...@@ -2100,7 +2101,7 @@ public:
size_t G1CollectedHeap::recalculate_used() const { size_t G1CollectedHeap::recalculate_used() const {
SumUsedClosure blk; SumUsedClosure blk;
_hrs->iterate(&blk); heap_region_iterate(&blk);
return blk.result(); return blk.result();
} }
...@@ -2120,7 +2121,7 @@ public: ...@@ -2120,7 +2121,7 @@ public:
size_t G1CollectedHeap::recalculate_used_regions() const { size_t G1CollectedHeap::recalculate_used_regions() const {
SumUsedRegionsClosure blk; SumUsedRegionsClosure blk;
_hrs->iterate(&blk); heap_region_iterate(&blk);
return blk.result(); return blk.result();
} }
#endif // PRODUCT #endif // PRODUCT
...@@ -2285,8 +2286,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { ...@@ -2285,8 +2286,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
} }
bool G1CollectedHeap::is_in(const void* p) const { bool G1CollectedHeap::is_in(const void* p) const {
if (_g1_committed.contains(p)) { HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
HeapRegion* hr = _hrs->addr_to_region(p); if (hr != NULL) {
return hr->is_in(p); return hr->is_in(p);
} else { } else {
return _perm_gen->as_gen()->is_in(p); return _perm_gen->as_gen()->is_in(p);
...@@ -2314,7 +2315,7 @@ public: ...@@ -2314,7 +2315,7 @@ public:
void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
IterateOopClosureRegionClosure blk(_g1_committed, cl); IterateOopClosureRegionClosure blk(_g1_committed, cl);
_hrs->iterate(&blk); heap_region_iterate(&blk);
if (do_perm) { if (do_perm) {
perm_gen()->oop_iterate(cl); perm_gen()->oop_iterate(cl);
} }
...@@ -2322,7 +2323,7 @@ void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { ...@@ -2322,7 +2323,7 @@ void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
IterateOopClosureRegionClosure blk(mr, cl); IterateOopClosureRegionClosure blk(mr, cl);
_hrs->iterate(&blk); heap_region_iterate(&blk);
if (do_perm) { if (do_perm) {
perm_gen()->oop_iterate(cl); perm_gen()->oop_iterate(cl);
} }
...@@ -2344,7 +2345,7 @@ public: ...@@ -2344,7 +2345,7 @@ public:
void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
IterateObjectClosureRegionClosure blk(cl); IterateObjectClosureRegionClosure blk(cl);
_hrs->iterate(&blk); heap_region_iterate(&blk);
if (do_perm) { if (do_perm) {
perm_gen()->object_iterate(cl); perm_gen()->object_iterate(cl);
} }
...@@ -2369,25 +2370,18 @@ public: ...@@ -2369,25 +2370,18 @@ public:
void G1CollectedHeap::space_iterate(SpaceClosure* cl) { void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
SpaceClosureRegionClosure blk(cl); SpaceClosureRegionClosure blk(cl);
_hrs->iterate(&blk); heap_region_iterate(&blk);
} }
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrs->iterate(cl); _hrs.iterate(cl);
} }
void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
HeapRegionClosure* cl) { HeapRegionClosure* cl) const {
_hrs->iterate_from(r, cl); _hrs.iterate_from(r, cl);
} }
void
G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
_hrs->iterate_from(idx, cl);
}
HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
void void
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
int worker, int worker,
...@@ -2568,7 +2562,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, ...@@ -2568,7 +2562,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
} }
CompactibleSpace* G1CollectedHeap::first_compactible_space() { CompactibleSpace* G1CollectedHeap::first_compactible_space() {
return _hrs->length() > 0 ? _hrs->at(0) : NULL; return n_regions() > 0 ? region_at(0) : NULL;
} }
...@@ -2881,7 +2875,7 @@ void G1CollectedHeap::verify(bool allow_dirty, ...@@ -2881,7 +2875,7 @@ void G1CollectedHeap::verify(bool allow_dirty,
"sanity check"); "sanity check");
} else { } else {
VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
_hrs->iterate(&blk); heap_region_iterate(&blk);
if (blk.failures()) { if (blk.failures()) {
failures = true; failures = true;
} }
...@@ -2950,7 +2944,7 @@ void G1CollectedHeap::print_on(outputStream* st, bool extended) const { ...@@ -2950,7 +2944,7 @@ void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
void G1CollectedHeap::print_on_extended(outputStream* st) const { void G1CollectedHeap::print_on_extended(outputStream* st) const {
PrintRegionClosure blk(st); PrintRegionClosure blk(st);
_hrs->iterate(&blk); heap_region_iterate(&blk);
} }
void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
...@@ -2989,15 +2983,6 @@ void G1CollectedHeap::print_tracing_info() const { ...@@ -2989,15 +2983,6 @@ void G1CollectedHeap::print_tracing_info() const {
SpecializationStats::print(); SpecializationStats::print();
} }
int G1CollectedHeap::addr_to_arena_id(void* addr) const {
HeapRegion* hr = heap_region_containing(addr);
if (hr == NULL) {
return 0;
} else {
return 1;
}
}
G1CollectedHeap* G1CollectedHeap::heap() { G1CollectedHeap* G1CollectedHeap::heap() {
assert(_sh->kind() == CollectedHeap::G1CollectedHeap, assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
"not a garbage-first heap"); "not a garbage-first heap");
...@@ -3477,6 +3462,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3477,6 +3462,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
} }
} }
_hrs.verify_optional();
verify_region_sets_optional(); verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
...@@ -3609,8 +3595,8 @@ class FindGCAllocRegion: public HeapRegionClosure { ...@@ -3609,8 +3595,8 @@ class FindGCAllocRegion: public HeapRegionClosure {
public: public:
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {
if (r->is_gc_alloc_region()) { if (r->is_gc_alloc_region()) {
gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region",
r->hrs_index(), r->bottom()); HR_FORMAT_PARAMS(r));
} }
return false; return false;
} }
...@@ -3695,9 +3681,8 @@ void G1CollectedHeap::get_gc_alloc_regions() { ...@@ -3695,9 +3681,8 @@ void G1CollectedHeap::get_gc_alloc_regions() {
// the region was retained from the last collection // the region was retained from the last collection
++_gc_alloc_region_counts[ap]; ++_gc_alloc_region_counts[ap];
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " gclog_or_tty->print_cr("new alloc region "HR_FORMAT,
"top "PTR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
} }
} }
...@@ -4908,10 +4893,10 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr, ...@@ -4908,10 +4893,10 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
hr->set_notHumongous(); hr->set_notHumongous();
free_region(hr, &hr_pre_used, free_list, par); free_region(hr, &hr_pre_used, free_list, par);
int i = hr->hrs_index() + 1; size_t i = hr->hrs_index() + 1;
size_t num = 1; size_t num = 1;
while ((size_t) i < n_regions()) { while (i < n_regions()) {
HeapRegion* curr_hr = _hrs->at(i); HeapRegion* curr_hr = region_at(i);
if (!curr_hr->continuesHumongous()) { if (!curr_hr->continuesHumongous()) {
break; break;
} }
...@@ -5271,16 +5256,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() { ...@@ -5271,16 +5256,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() {
} }
} }
size_t G1CollectedHeap::n_regions() {
return _hrs->length();
}
size_t G1CollectedHeap::max_regions() {
return
(size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
HeapRegion::GrainBytes;
}
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
assert(heap_lock_held_for_gc(), assert(heap_lock_held_for_gc(),
"the heap lock should already be held by or for this thread"); "the heap lock should already be held by or for this thread");
...@@ -5477,6 +5452,15 @@ public: ...@@ -5477,6 +5452,15 @@ public:
} }
}; };
HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
HeapWord* bottom) {
HeapWord* end = bottom + HeapRegion::GrainWords;
MemRegion mr(bottom, end);
assert(_g1_reserved.contains(mr), "invariant");
// This might return NULL if the allocation fails
return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
}
void G1CollectedHeap::verify_region_sets() { void G1CollectedHeap::verify_region_sets() {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
...@@ -42,7 +43,6 @@ ...@@ -42,7 +43,6 @@
// heap subsets that will yield large amounts of garbage. // heap subsets that will yield large amounts of garbage.
class HeapRegion; class HeapRegion;
class HeapRegionSeq;
class HRRSCleanupTask; class HRRSCleanupTask;
class PermanentGenerationSpec; class PermanentGenerationSpec;
class GenerationSpec; class GenerationSpec;
...@@ -196,9 +196,6 @@ private: ...@@ -196,9 +196,6 @@ private:
// The part of _g1_storage that is currently committed. // The part of _g1_storage that is currently committed.
MemRegion _g1_committed; MemRegion _g1_committed;
// The maximum part of _g1_storage that has ever been committed.
MemRegion _g1_max_committed;
// The master free list. It will satisfy all new region allocations. // The master free list. It will satisfy all new region allocations.
MasterFreeRegionList _free_list; MasterFreeRegionList _free_list;
...@@ -222,7 +219,7 @@ private: ...@@ -222,7 +219,7 @@ private:
void rebuild_region_lists(); void rebuild_region_lists();
// The sequence of all heap regions in the heap. // The sequence of all heap regions in the heap.
HeapRegionSeq* _hrs; HeapRegionSeq _hrs;
// Alloc region used to satisfy mutator allocation requests. // Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region; MutatorAllocRegion _mutator_alloc_region;
...@@ -421,13 +418,15 @@ protected: ...@@ -421,13 +418,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given // Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions // size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the // length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful. // index of the first region or G1_NULL_HRS_INDEX if the search
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); // was unsuccessful.
size_t humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size);
// Initialize a contiguous set of free regions of length num_regions // Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single // and starting at index first so that they appear as a single
// humongous region. // humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(int first, HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
size_t num_regions, size_t num_regions,
size_t word_size); size_t word_size);
...@@ -587,8 +586,8 @@ public: ...@@ -587,8 +586,8 @@ public:
void register_region_with_in_cset_fast_test(HeapRegion* r) { void register_region_with_in_cset_fast_test(HeapRegion* r) {
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant"); assert(r->in_collection_set(), "invariant");
int index = r->hrs_index(); size_t index = r->hrs_index();
assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); assert(index < _in_cset_fast_test_length, "invariant");
assert(!_in_cset_fast_test_base[index], "invariant"); assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true; _in_cset_fast_test_base[index] = true;
} }
...@@ -754,6 +753,11 @@ protected: ...@@ -754,6 +753,11 @@ protected:
HumongousRegionSet* humongous_proxy_set, HumongousRegionSet* humongous_proxy_set,
bool par); bool par);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
// The concurrent marker (and the thread it runs in.) // The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm; ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread; ConcurrentMarkThread* _cmThread;
...@@ -816,7 +820,6 @@ protected: ...@@ -816,7 +820,6 @@ protected:
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m); void handle_evacuation_failure_common(oop obj, markOop m);
// Ensure that the relevant gc_alloc regions are set. // Ensure that the relevant gc_alloc regions are set.
void get_gc_alloc_regions(); void get_gc_alloc_regions();
// We're done with GC alloc regions. We are going to tear down the // We're done with GC alloc regions. We are going to tear down the
...@@ -967,15 +970,13 @@ public: ...@@ -967,15 +970,13 @@ public:
} }
// The total number of regions in the heap. // The total number of regions in the heap.
size_t n_regions(); size_t n_regions() { return _hrs.length(); }
// The number of regions that are completely free. // The max number of regions in the heap.
size_t max_regions(); size_t max_regions() { return _hrs.max_length(); }
// The number of regions that are completely free. // The number of regions that are completely free.
size_t free_regions() { size_t free_regions() { return _free_list.length(); }
return _free_list.length();
}
// The number of regions that are not completely free. // The number of regions that are not completely free.
size_t used_regions() { return n_regions() - free_regions(); } size_t used_regions() { return n_regions() - free_regions(); }
...@@ -983,6 +984,10 @@ public: ...@@ -983,6 +984,10 @@ public:
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } size_t expansion_regions() { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
...@@ -1144,17 +1149,15 @@ public: ...@@ -1144,17 +1149,15 @@ public:
// Iterate over heap regions, in address order, terminating the // Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true". // iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk); void heap_region_iterate(HeapRegionClosure* blk) const;
// Iterate over heap regions starting with r (or the first region if "r" // Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion" // is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true". // method returns "true".
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
// As above but starting from the region at index idx. // Return the region with the given index. It assumes the index is valid.
void heap_region_iterate_from(int idx, HeapRegionClosure* blk); HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
HeapRegion* region_at(size_t idx);
// Divide the heap region sequence into "chunks" of some size (the number // Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some // of regions divided by the number of parallel threads times some
...@@ -1195,12 +1198,14 @@ public: ...@@ -1195,12 +1198,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This // A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL. // finds the region containing a given address, or else returns NULL.
HeapRegion* heap_region_containing(const void* addr) const; template <class T>
inline HeapRegion* heap_region_containing(const T addr) const;
// Like the above, but requires "addr" to be in the heap (to avoid a // Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous // null-check), and unlike the above, may return an continuing humongous
// region. // region.
HeapRegion* heap_region_containing_raw(const void* addr) const; template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is, // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly // each address in the (reserved) heap is a member of exactly
...@@ -1262,7 +1267,7 @@ public: ...@@ -1262,7 +1267,7 @@ public:
return true; return true;
} }
bool is_in_young(oop obj) { bool is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj); HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young(); return hr != NULL && hr->is_young();
} }
...@@ -1368,11 +1373,6 @@ public: ...@@ -1368,11 +1373,6 @@ public:
// Override // Override
void print_tracing_info() const; void print_tracing_info() const;
// If "addr" is a pointer into the (reserved?) heap, returns a positive
// number indicating the "arena" within the heap in which "addr" falls.
// Or else returns 0.
virtual int addr_to_arena_id(void* addr) const;
// Convenience function to be used in situations where the heap type can be // Convenience function to be used in situations where the heap type can be
// asserted to be this type. // asserted to be this type.
static G1CollectedHeap* heap(); static G1CollectedHeap* heap();
......
...@@ -34,9 +34,10 @@ ...@@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap // Inline functions for G1CollectedHeap
template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing(const void* addr) const { G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = _hrs->addr_to_region(addr); HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
// hr can be null if addr in perm_gen // hr can be null if addr in perm_gen
if (hr != NULL && hr->continuesHumongous()) { if (hr != NULL && hr->continuesHumongous()) {
hr = hr->humongous_start_region(); hr = hr->humongous_start_region();
...@@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const { ...@@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return hr; return hr;
} }
template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const void* addr) const { G1CollectedHeap::heap_region_containing_raw(const T addr) const {
assert(_g1_reserved.contains(addr), "invariant"); assert(_g1_reserved.contains((const void*) addr), "invariant");
size_t index = pointer_delta(addr, _g1_reserved.start(), 1) HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
>> HeapRegion::LogOfHRGrainBytes;
HeapRegion* res = _hrs->at(index);
assert(res == _hrs->addr_to_region(addr), "sanity");
return res; return res;
} }
inline bool G1CollectedHeap::obj_in_cs(oop obj) { inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrs->addr_to_region(obj); HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set(); return r != NULL && r->in_collection_set();
} }
......
...@@ -2639,11 +2639,8 @@ add_to_collection_set(HeapRegion* hr) { ...@@ -2639,11 +2639,8 @@ add_to_collection_set(HeapRegion* hr) {
assert(!hr->is_young(), "non-incremental add of young region"); assert(!hr->is_young(), "non-incremental add of young region");
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("added region to cset " gclog_or_tty->print_cr("added region to cset "HR_FORMAT,
"%d:["PTR_FORMAT", "PTR_FORMAT"], " HR_FORMAT_PARAMS(hr));
"top "PTR_FORMAT", %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
} }
if (_g1->mark_in_progress()) if (_g1->mark_in_progress())
...@@ -2813,11 +2810,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { ...@@ -2813,11 +2810,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
_inc_cset_tail = hr; _inc_cset_tail = hr;
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr(" added region to incremental cset (RHS) " gclog_or_tty->print_cr(" added region to incremental cset (RHS) "HR_FORMAT,
"%d:["PTR_FORMAT", "PTR_FORMAT"], " HR_FORMAT_PARAMS(hr));
"top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), (hr->is_young()) ? "YES" : "NO");
} }
} }
...@@ -2838,11 +2832,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { ...@@ -2838,11 +2832,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
_inc_cset_head = hr; _inc_cset_head = hr;
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr(" added region to incremental cset (LHS) " gclog_or_tty->print_cr(" added region to incremental cset (LHS) "HR_FORMAT,
"%d:["PTR_FORMAT", "PTR_FORMAT"], " HR_FORMAT_PARAMS(hr));
"top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), (hr->is_young()) ? "YES" : "NO");
} }
} }
......
...@@ -159,20 +159,16 @@ public: ...@@ -159,20 +159,16 @@ public:
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
} }
gclog_or_tty->print_cr("Missing rem set entry:"); gclog_or_tty->print_cr("Missing rem set entry:");
gclog_or_tty->print_cr("Field "PTR_FORMAT gclog_or_tty->print_cr("Field "PTR_FORMAT" "
" of obj "PTR_FORMAT "of obj "PTR_FORMAT", "
", in region %d ["PTR_FORMAT "in region "HR_FORMAT,
", "PTR_FORMAT"),", p, (void*) _containing_obj,
p, (void*) _containing_obj, HR_FORMAT_PARAMS(from));
from->hrs_index(),
from->bottom(),
from->end());
_containing_obj->print_on(gclog_or_tty); _containing_obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("points to obj "PTR_FORMAT gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
" in region %d ["PTR_FORMAT "in region "HR_FORMAT,
", "PTR_FORMAT").", (void*) obj,
(void*) obj, to->hrs_index(), HR_FORMAT_PARAMS(to));
to->bottom(), to->end());
obj->print_on(gclog_or_tty); obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
cv_obj, cv_field); cv_obj, cv_field);
...@@ -484,11 +480,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { ...@@ -484,11 +480,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
HeapRegion:: HeapRegion::
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed) MemRegion mr, bool is_zeroed)
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_next_fk(HeapRegionDCTOC::NoFilterKind), _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
_hrs_index(-1),
_humongous_type(NotHumongous), _humongous_start_region(NULL), _humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _is_gc_alloc_region(false), _in_collection_set(false), _is_gc_alloc_region(false),
_next_in_special_set(NULL), _orig_end(NULL), _next_in_special_set(NULL), _orig_end(NULL),
......
...@@ -52,9 +52,11 @@ class HeapRegionRemSetIterator; ...@@ -52,9 +52,11 @@ class HeapRegionRemSetIterator;
class HeapRegion; class HeapRegion;
class HeapRegionSetBase; class HeapRegionSetBase;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" #define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \ #define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->top(), (_hr_)->end() (_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It // A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap // knows how to get the G1 heap and how to use the bitmap
...@@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
protected: protected:
// If this region is a member of a HeapRegionSeq, the index in that // The index of this region in the heap region sequence.
// sequence, otherwise -1. size_t _hrs_index;
int _hrs_index;
HumongousType _humongous_type; HumongousType _humongous_type;
// For a humongous region, region in which it starts. // For a humongous region, region in which it starts.
...@@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum YoungType { enum YoungType {
NotYoung, // a region is not young NotYoung, // a region is not young
Young, // a region is young Young, // a region is young
Survivor // a region is young and it contains Survivor // a region is young and it contains survivors
// survivor
}; };
volatile YoungType _young_type; volatile YoungType _young_type;
...@@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public: public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, HeapRegion(size_t hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed); MemRegion mr, bool is_zeroed);
static int LogOfHRGrainBytes; static int LogOfHRGrainBytes;
...@@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that // If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1. // sequence, otherwise -1.
int hrs_index() const { return _hrs_index; } size_t hrs_index() const { return _hrs_index; }
void set_hrs_index(int index) { _hrs_index = index; }
// The number of bytes marked live in the region in the last marking phase. // The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; } size_t marked_bytes() { return _prev_marked_bytes; }
...@@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
HeapWord* orig_end() { return _orig_end; }
// Allows logical separation between objects allocated before and after. // Allows logical separation between objects allocated before and after.
void save_marks(); void save_marks();
......
...@@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() { ...@@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif #endif
// Set the corresponding coarse bit. // Set the corresponding coarse bit.
int max_hrs_index = max->hr()->hrs_index(); size_t max_hrs_index = max->hr()->hrs_index();
if (!_coarse_map.at(max_hrs_index)) { if (!_coarse_map.at(max_hrs_index)) {
_coarse_map.at_put(max_hrs_index, true); _coarse_map.at_put(max_hrs_index, true);
_n_coarse_entries++; _n_coarse_entries++;
...@@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, ...@@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) { BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map. // First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose) if (G1RSScrubVerbose)
gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index()); gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
hr()->hrs_index());
assert(_coarse_map.size() == region_bm->size(), "Precondition"); assert(_coarse_map.size() == region_bm->size(), "Precondition");
if (G1RSScrubVerbose) if (G1RSScrubVerbose)
...@@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, ...@@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
PosParPRT* nxt = cur->next(); PosParPRT* nxt = cur->next();
// If the entire region is dead, eliminate. // If the entire region is dead, eliminate.
if (G1RSScrubVerbose) if (G1RSScrubVerbose)
gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index()); gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
cur->hr()->hrs_index());
if (!region_bm->at(cur->hr()->hrs_index())) { if (!region_bm->at(cur->hr()->hrs_index())) {
*prev = nxt; *prev = nxt;
cur->set_next(NULL); cur->set_next(NULL);
...@@ -994,7 +996,7 @@ void OtherRegionsTable::clear() { ...@@ -994,7 +996,7 @@ void OtherRegionsTable::clear() {
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
size_t hrs_ind = (size_t)from_hr->hrs_index(); size_t hrs_ind = from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask; size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) { if (del_single_region_table(ind, from_hr)) {
assert(!_coarse_map.at(hrs_ind), "Inv"); assert(!_coarse_map.at(hrs_ind), "Inv");
...@@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { ...@@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map.par_at_put(hrs_ind, 0); _coarse_map.par_at_put(hrs_ind, 0);
} }
// Check to see if any of the fcc entries come from here. // Check to see if any of the fcc entries come from here.
int hr_ind = hr()->hrs_index(); size_t hr_ind = hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind]; int fcc_ent = _from_card_cache[tid][hr_ind];
if (fcc_ent != -1) { if (fcc_ent != -1) {
......
...@@ -23,259 +23,182 @@ ...@@ -23,259 +23,182 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
// Local to this file. // Private
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
else if (*hr1p == *hr2p) return 0;
else {
assert(false, "We should never compare distinct overlapping regions.");
}
return 0;
}
HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
_alloc_search_start(0),
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
// that it should allocate its elem array(s) on the C heap.
//
// The first argument, however, is actually a comma expression
// (set_allocation_type(this, C_HEAP), 100). The purpose of the
// set_allocation_type() call is to replace the default allocation
// type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
// allow to pass the assert in GenericGrowableArray() which checks
// that a growable array object must be on C heap if elements are.
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
_regions((ResourceObj::set_allocation_type((address)&_regions,
ResourceObj::C_HEAP),
(int)max_size),
true),
_next_rr_candidate(0),
_seq_bottom(NULL)
{}
// Private methods.
void HeapRegionSeq::print_empty_runs() {
int empty_run = 0;
int n_empty = 0;
int empty_run_start;
for (int i = 0; i < _regions.length(); i++) {
HeapRegion* r = _regions.at(i);
if (r->continuesHumongous()) continue;
if (r->is_empty()) {
assert(!r->isHumongous(), "H regions should not be empty.");
if (empty_run == 0) empty_run_start = i;
empty_run++;
n_empty++;
} else {
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
empty_run = 0;
}
}
}
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
}
gclog_or_tty->print_cr(" [tot = %d]", n_empty);
}
int HeapRegionSeq::find(HeapRegion* hr) {
// FIXME: optimized for adjacent regions of fixed size.
int ind = hr->hrs_index();
if (ind != -1) {
assert(_regions.at(ind) == hr, "Mismatch");
}
return ind;
}
size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
size_t len = length();
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(from <= len,
err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
from, len));
// Public methods. size_t curr = from;
size_t first = G1_NULL_HRS_INDEX;
void HeapRegionSeq::insert(HeapRegion* hr) {
assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
if (_regions.length() == 0
|| _regions.top()->end() <= hr->bottom()) {
hr->set_hrs_index(_regions.length());
_regions.append(hr);
} else {
_regions.append(hr);
_regions.sort(orderRegions);
for (int i = 0; i < _regions.length(); i++) {
_regions.at(i)->set_hrs_index(i);
}
}
char* bot = (char*)_regions.at(0)->bottom();
if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
}
size_t HeapRegionSeq::length() {
return _regions.length();
}
size_t HeapRegionSeq::free_suffix() {
size_t res = 0;
int first = _regions.length() - 1;
int cur = first;
while (cur >= 0 &&
(_regions.at(cur)->is_empty()
&& (first == cur
|| (_regions.at(cur+1)->bottom() ==
_regions.at(cur)->end())))) {
res++;
cur--;
}
return res;
}
int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
assert(num > 1, "pre-condition");
assert(0 <= from && from <= _regions.length(),
err_msg("from: %d should be valid and <= than %d",
from, _regions.length()));
int curr = from;
int first = -1;
size_t num_so_far = 0; size_t num_so_far = 0;
while (curr < _regions.length() && num_so_far < num) { while (curr < len && num_so_far < num) {
HeapRegion* curr_hr = _regions.at(curr); if (at(curr)->is_empty()) {
if (curr_hr->is_empty()) { if (first == G1_NULL_HRS_INDEX) {
if (first == -1) {
first = curr; first = curr;
num_so_far = 1; num_so_far = 1;
} else { } else {
num_so_far += 1; num_so_far += 1;
} }
} else { } else {
first = -1; first = G1_NULL_HRS_INDEX;
num_so_far = 0; num_so_far = 0;
} }
curr += 1; curr += 1;
} }
assert(num_so_far <= num, "post-condition"); assert(num_so_far <= num, "post-condition");
if (num_so_far == num) { if (num_so_far == num) {
// we found enough space for the humongous object // we found enough space for the humongous object
assert(from <= first && first < _regions.length(), "post-condition"); assert(from <= first && first < len, "post-condition");
assert(first < curr && (curr - first) == (int) num, "post-condition"); assert(first < curr && (curr - first) == num, "post-condition");
for (int i = first; i < first + (int) num; ++i) { for (size_t i = first; i < first + num; ++i) {
assert(_regions.at(i)->is_empty(), "post-condition"); assert(at(i)->is_empty(), "post-condition");
} }
return first; return first;
} else { } else {
// we failed to find enough space for the humongous object // we failed to find enough space for the humongous object
return -1; return G1_NULL_HRS_INDEX;
} }
} }
int HeapRegionSeq::find_contiguous(size_t num) { // Public
assert(num > 1, "otherwise we should not be calling this");
assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid and <= than %d",
_alloc_search_start, _regions.length()));
int start = _alloc_search_start; void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
int res = find_contiguous_from(start, num); size_t max_length) {
if (res == -1 && start != 0) { assert((size_t) bottom % HeapRegion::GrainBytes == 0,
// Try starting from the beginning. If _alloc_search_start was 0, "bottom should be heap region aligned");
// no point in doing this again. assert((size_t) end % HeapRegion::GrainBytes == 0,
res = find_contiguous_from(0, num); "end should be heap region aligned");
}
if (res != -1) {
assert(0 <= res && res < _regions.length(),
err_msg("res: %d should be valid", res));
_alloc_search_start = res + (int) num;
assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid",
_alloc_search_start));
}
return res;
}
void HeapRegionSeq::iterate(HeapRegionClosure* blk) { _length = 0;
iterate_from((HeapRegion*)NULL, blk); _heap_bottom = bottom;
} _heap_end = end;
_region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
memset(_regions, 0, max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((size_t) bottom >> _region_shift);
// The first argument r is the heap region at which iteration begins. assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
// This operation runs fastest when r is NULL, or the heap region for "bottom should be included in the region with index 0");
// which a HeapRegionClosure most recently returned true, or the }
// heap region immediately to its right in the sequence. In all
// other cases a linear search is required to find the index of r.
void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
HeapWord* new_end,
FreeRegionList* list) {
assert(old_end < new_end, "don't call it otherwise");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// :::: FIXME :::: HeapWord* next_bottom = old_end;
// Static cache value is bad, especially when we start doing parallel assert(_heap_bottom <= next_bottom, "invariant");
// remembered set update. For now just don't cache anything (the while (next_bottom < new_end) {
// code in the def'd out blocks). assert(next_bottom < _heap_end, "invariant");
size_t index = length();
#if 0 assert(index < _max_length, "otherwise we cannot expand further");
static int cached_j = 0; if (index == 0) {
#endif // We have not allocated any regions so far
int len = _regions.length(); assert(next_bottom == _heap_bottom, "invariant");
int j = 0;
// Find the index of r.
if (r != NULL) {
#if 0
assert(cached_j >= 0, "Invariant.");
if ((cached_j < len) && (r == _regions.at(cached_j))) {
j = cached_j;
} else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
j = cached_j + 1;
} else { } else {
j = find(r); // next_bottom should match the end of the last/previous region
#endif assert(next_bottom == at(index - 1)->end(), "invariant");
if (j < 0) { }
j = 0;
if (index == _allocated_length) {
// We have to allocate a new HeapRegion.
HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
if (new_hr == NULL) {
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
} }
#if 0 assert(_regions[index] == NULL, "invariant");
_regions[index] = new_hr;
increment_length(&_allocated_length);
} }
#endif // Have to increment the length first, otherwise we will get an
// assert failure at(index) below.
increment_length(&_length);
HeapRegion* hr = at(index);
list->add_as_tail(hr);
next_bottom = hr->end();
} }
int i; assert(next_bottom == new_end, "post-condition");
for (i = j; i < len; i += 1) { return MemRegion(old_end, next_bottom);
int res = blk->doHeapRegion(_regions.at(i)); }
if (res) {
#if 0 size_t HeapRegionSeq::free_suffix() {
cached_j = i; size_t res = 0;
#endif size_t index = length();
blk->incomplete(); while (index > 0) {
return; index -= 1;
if (!at(index)->is_empty()) {
break;
} }
res += 1;
} }
for (i = 0; i < j; i += 1) { return res;
int res = blk->doHeapRegion(_regions.at(i)); }
if (res) {
#if 0 size_t HeapRegionSeq::find_contiguous(size_t num) {
cached_j = i; assert(num > 1, "use this only for sequences of length 2 or greater");
#endif assert(_next_search_index <= length(),
blk->incomplete(); err_msg("_next_search_indeex: "SIZE_FORMAT" "
return; "should be valid and <= than "SIZE_FORMAT,
} _next_search_index, length()));
size_t start = _next_search_index;
size_t res = find_contiguous_from(start, num);
if (res == G1_NULL_HRS_INDEX && start > 0) {
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
}
if (res != G1_NULL_HRS_INDEX) {
assert(res < length(),
err_msg("res: "SIZE_FORMAT" should be valid", res));
_next_search_index = res + num;
assert(_next_search_index <= length(),
err_msg("_next_search_indeex: "SIZE_FORMAT" "
"should be valid and <= than "SIZE_FORMAT,
_next_search_index, length()));
} }
return res;
}
void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
iterate_from((HeapRegion*) NULL, blk);
} }
void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
int len = _regions.length(); size_t hr_index = 0;
int i; if (hr != NULL) {
for (i = idx; i < len; i++) { hr_index = (size_t) hr->hrs_index();
if (blk->doHeapRegion(_regions.at(i))) { }
size_t len = length();
for (size_t i = hr_index; i < len; i += 1) {
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete(); blk->incomplete();
return; return;
} }
} }
for (i = 0; i < idx; i++) { for (size_t i = 0; i < hr_index; i += 1) {
if (blk->doHeapRegion(_regions.at(i))) { bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete(); blk->incomplete();
return; return;
} }
...@@ -283,54 +206,92 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { ...@@ -283,54 +206,92 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
} }
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t& num_regions_deleted) { size_t* num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that // Reset this in case it's currently pointing into the regions that
// we just removed. // we just removed.
_alloc_search_start = 0; _next_search_index = 0;
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
assert(length() > 0, "the region sequence should not be empty");
assert(length() <= _allocated_length, "invariant");
assert(_allocated_length > 0, "we should have at least one region committed");
if (_regions.length() == 0) { // around the loop, i will be the next region to be removed
num_regions_deleted = 0; size_t i = length() - 1;
return MemRegion(); assert(i > 0, "we should never remove all regions");
} // [last_start, end) is the MemRegion that covers the regions we will remove.
int j = _regions.length() - 1; HeapWord* end = at(i)->end();
HeapWord* end = _regions.at(j)->end();
HeapWord* last_start = end; HeapWord* last_start = end;
while (j >= 0 && shrink_bytes > 0) { *num_regions_deleted = 0;
HeapRegion* cur = _regions.at(j); while (shrink_bytes > 0) {
// We have to leave humongous regions where they are, HeapRegion* cur = at(i);
// and work around them. // We should leave the humongous regions where they are.
if (cur->isHumongous()) { if (cur->isHumongous()) break;
return MemRegion(last_start, end); // We should stop shrinking if we come across a non-empty region.
}
assert(cur == _regions.top(), "Should be top");
if (!cur->is_empty()) break; if (!cur->is_empty()) break;
i -= 1;
*num_regions_deleted += 1;
shrink_bytes -= cur->capacity(); shrink_bytes -= cur->capacity();
num_regions_deleted++;
_regions.pop();
last_start = cur->bottom(); last_start = cur->bottom();
// We need to delete these somehow, but can't currently do so here: if decrement_length(&_length);
// we do, the ZF thread may still access the deleted region. We'll // We will reclaim the HeapRegion. _allocated_length should be
// leave this here as a reminder that we have to do something about // covering this index. So, even though we removed the region from
// this. // the active set by decreasing _length, we still have it
// delete cur; // available in the future if we need to re-use it.
j--; assert(i > 0, "we should never remove all regions");
assert(length() > 0, "we should never remove all regions");
} }
return MemRegion(last_start, end); return MemRegion(last_start, end);
} }
class PrintHeapRegionClosure : public HeapRegionClosure { #ifndef PRODUCT
public: void HeapRegionSeq::verify_optional() {
bool doHeapRegion(HeapRegion* r) { guarantee(_length <= _allocated_length,
gclog_or_tty->print(PTR_FORMAT ":", r); err_msg("invariant: _length: "SIZE_FORMAT" "
r->print(); "_allocated_length: "SIZE_FORMAT,
return false; _length, _allocated_length));
} guarantee(_allocated_length <= _max_length,
}; err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
"_max_length: "SIZE_FORMAT,
_allocated_length, _max_length));
guarantee(_next_search_index <= _length,
err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
"_length: "SIZE_FORMAT,
_next_search_index, _length));
void HeapRegionSeq::print() { HeapWord* prev_end = _heap_bottom;
PrintHeapRegionClosure cl; for (size_t i = 0; i < _allocated_length; i += 1) {
iterate(&cl); HeapRegion* hr = _regions[i];
guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
"prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
i, hr->hrs_index()));
if (i < _length) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
} else {
guarantee(hr->is_empty(), "sanity");
guarantee(!hr->isHumongous(), "sanity");
// using assert instead of guarantee here since containing_set()
// is only available in non-product builds.
assert(hr->containing_set() == NULL, "sanity");
}
if (hr->startsHumongous()) {
prev_end = hr->orig_end();
} else {
prev_end = hr->end();
}
}
for (size_t i = _allocated_length; i < _max_length; i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
}
} }
#endif // PRODUCT
...@@ -25,92 +25,143 @@ ...@@ -25,92 +25,143 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
class HeapRegion; class HeapRegion;
class HeapRegionClosure; class HeapRegionClosure;
class FreeRegionList;
#define G1_NULL_HRS_INDEX ((size_t) -1)
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
// the one after it, etc.). Two regions that are consecutive in the
// array should also be adjacent in the address space (i.e.,
// region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
// region we retain the HeapRegion to be able to re-use it in the
// future (in case we recommit it).
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
class HeapRegionSeq: public CHeapObj { class HeapRegionSeq: public CHeapObj {
// _regions is kept sorted by start address order, and no two regions are // The array that holds the HeapRegions.
// overlapping. HeapRegion** _regions;
GrowableArray<HeapRegion*> _regions;
// The index in "_regions" at which to start the next allocation search. // Version of _regions biased to address 0
// (For efficiency only; private to obj_allocate after initialization.) HeapRegion** _regions_biased;
int _alloc_search_start;
// Finds a contiguous set of empty regions of length num, starting // The number of regions committed in the heap.
// from a given index. size_t _length;
int find_contiguous_from(int from, size_t num);
// Currently, we're choosing collection sets in a round-robin fashion, // The address of the first reserved word in the heap.
// starting here. HeapWord* _heap_bottom;
int _next_rr_candidate;
// The bottom address of the bottom-most region, or else NULL if there // The address of the last reserved word in the heap - 1.
// are no regions in the sequence. HeapWord* _heap_end;
char* _seq_bottom;
public: // The log of the region byte size.
// Initializes "this" to the empty sequence of regions. size_t _region_shift;
HeapRegionSeq(const size_t max_size);
// Adds "hr" to "this" sequence. Requires "hr" not to overlap with // A hint for which index to start searching from for humongous
// any region already in "this". (Will perform better if regions are // allocations.
// inserted in ascending address order.) size_t _next_search_index;
void insert(HeapRegion* hr);
// Given a HeapRegion*, returns its index within _regions, // The number of regions for which we have allocated HeapRegions for.
// or returns -1 if not found. size_t _allocated_length;
int find(HeapRegion* hr);
// Requires the index to be valid, and return the region at the index. // The maximum number of regions in the heap.
HeapRegion* at(size_t i) { return _regions.at((int)i); } size_t _max_length;
// Return the number of regions in the sequence. // Find a contiguous set of empty regions of length num, starting
size_t length(); // from the given index.
size_t find_contiguous_from(size_t from, size_t num);
// Returns the number of contiguous regions at the end of the sequence // Map a heap address to a biased region index. Assume that the
// that are available for allocation. // address is valid.
size_t free_suffix(); inline size_t addr_to_index_biased(HeapWord* addr) const;
// Find a contiguous set of empty regions of length num and return void increment_length(size_t* length) {
// the index of the first region or -1 if the search was unsuccessful. assert(*length < _max_length, "pre-condition");
int find_contiguous(size_t num); *length += 1;
}
void decrement_length(size_t* length) {
assert(*length > 0, "pre-condition");
*length -= 1;
}
// Apply the "doHeapRegion" method of "blk" to all regions in "this", public:
// in address order, terminating the iteration early // Empty contructor, we'll initialize it with the initialize() method.
// if the "doHeapRegion" method returns "true". HeapRegionSeq() { }
void iterate(HeapRegionClosure* blk);
void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
inline HeapRegion* at(size_t index) const;
// Apply the "doHeapRegion" method of "blk" to all regions in "this", // If addr is within the committed space return its corresponding
// starting at "r" (or first region, if "r" is NULL), in a circular // HeapRegion, otherwise return NULL.
// manner, terminating the iteration early if the "doHeapRegion" method inline HeapRegion* addr_to_region(HeapWord* addr) const;
// returns "true".
void iterate_from(HeapRegion* r, HeapRegionClosure* blk);
// As above, but start from a given index in the sequence // Return the HeapRegion that corresponds to the given
// instead of a given heap region. // address. Assume the address is valid.
void iterate_from(int idx, HeapRegionClosure* blk); inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Requires "shrink_bytes" to be a multiple of the page size and heap // Return the number of regions that have been committed in the heap.
// region granularity. Deletes as many "rightmost" completely free heap size_t length() const { return _length; }
// regions from the sequence as comprise shrink_bytes bytes. Returns the
// MemRegion indicating the region those regions comprised, and sets
// "num_regions_deleted" to the number of regions deleted.
MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted);
// If "addr" falls within a region in the sequence, return that region, // Return the maximum number of regions in the heap.
// or else NULL. size_t max_length() const { return _max_length; }
inline HeapRegion* addr_to_region(const void* addr);
void print(); // Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
// existing ones, and return them in the given list. Returns the
// memory region that covers the newly-created regions. If a
// HeapRegion allocation fails, the result memory region might be
// smaller than the desired one.
MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
FreeRegionList* list);
// Prints out runs of empty regions. // Return the number of contiguous regions at the end of the sequence
void print_empty_runs(); // that are available for allocation.
size_t free_suffix();
// Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
size_t find_contiguous(size_t num);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;
// As above, but start the iteration from hr and loop around. If hr
// is NULL, we start from the first region in the heap.
void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
// Tag as uncommitted as many regions that are completely free as
// possible, up to shrink_bytes, from the suffix of the committed
// sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned.
MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
// Do some sanity checking.
void verify_optional() PRODUCT_RETURN;
}; };
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -25,23 +25,42 @@ ...@@ -25,23 +25,42 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp"
inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) { inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region"); assert(_heap_bottom <= addr && addr < _heap_end,
if ((char*) addr >= _seq_bottom) { err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
size_t diff = (size_t) pointer_delta((HeapWord*) addr, addr, _heap_bottom, _heap_end));
(HeapWord*) _seq_bottom); size_t index = (size_t) addr >> _region_shift;
int index = (int) (diff >> HeapRegion::LogOfHRGrainWords); return index;
assert(index >= 0, "invariant / paranoia"); }
if (index < _regions.length()) {
HeapRegion* hr = _regions.at(index); inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(hr->is_in_reserved(addr), assert(_heap_bottom <= addr && addr < _heap_end,
"addr_to_region is wrong..."); err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
return hr; addr, _heap_bottom, _heap_end));
} size_t index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < _heap_end) {
assert(addr >= _heap_bottom,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
return addr_to_region_unsafe(addr);
} }
return NULL; return NULL;
} }
inline HeapRegion* HeapRegionSeq::at(size_t index) const {
assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index];
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
...@@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const { ...@@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const {
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
#if SPARSE_PRT_VERBOSE #if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Adding card %d from region %d to region %d sparse.", gclog_or_tty->print_cr(" Adding card %d from region %d to region "
card_index, region_id, _hr->hrs_index()); SIZE_FORMAT" sparse.",
card_index, region_id, _hr->hrs_index());
#endif #endif
if (_next->occupied_entries() * 2 > _next->capacity()) { if (_next->occupied_entries() * 2 > _next->capacity()) {
expand(); expand();
...@@ -533,8 +534,8 @@ void SparsePRT::expand() { ...@@ -533,8 +534,8 @@ void SparsePRT::expand() {
_next = new RSHashTable(last->capacity() * 2); _next = new RSHashTable(last->capacity() * 2);
#if SPARSE_PRT_VERBOSE #if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Expanded sparse table for %d to %d.", gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.",
_hr->hrs_index(), _next->capacity()); _hr->hrs_index(), _next->capacity());
#endif #endif
for (size_t i = 0; i < last->capacity(); i++) { for (size_t i = 0; i < last->capacity(); i++) {
SparsePRTEntry* e = last->entry((int)i); SparsePRTEntry* e = last->entry((int)i);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册