提交 60259367 编写于 作者: B brutisso

8040722: G1: Clean up usages of heap_region_containing

Reviewed-by: tschatzl, jmasa
上级 1dc50540
...@@ -2800,7 +2800,6 @@ public: ...@@ -2800,7 +2800,6 @@ public:
str = " O"; str = " O";
} else { } else {
HeapRegion* hr = _g1h->heap_region_containing(obj); HeapRegion* hr = _g1h->heap_region_containing(obj);
guarantee(hr != NULL, "invariant");
bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
bool marked = _g1h->is_marked(obj, _vo); bool marked = _g1h->is_marked(obj, _vo);
...@@ -3565,9 +3564,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, ...@@ -3565,9 +3564,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
} }
void CMTask::setup_for_region(HeapRegion* hr) { void CMTask::setup_for_region(HeapRegion* hr) {
// Separated the asserts so that we know which one fires.
assert(hr != NULL, assert(hr != NULL,
"claim_region() should have filtered out continues humongous regions"); "claim_region() should have filtered out NULL regions");
assert(!hr->continuesHumongous(), assert(!hr->continuesHumongous(),
"claim_region() should have filtered out continues humongous regions"); "claim_region() should have filtered out continues humongous regions");
......
...@@ -442,24 +442,18 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region() ...@@ -442,24 +442,18 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
// implementation of is_scavengable() for G1 will indicate that // implementation of is_scavengable() for G1 will indicate that
// all nmethods must be scanned during a partial collection. // all nmethods must be scanned during a partial collection.
bool G1CollectedHeap::is_in_partial_collection(const void* p) { bool G1CollectedHeap::is_in_partial_collection(const void* p) {
HeapRegion* hr = heap_region_containing(p); if (p == NULL) {
return hr != NULL && hr->in_collection_set(); return false;
}
return heap_region_containing(p)->in_collection_set();
} }
#endif #endif
// Returns true if the reference points to an object that // Returns true if the reference points to an object that
// can move in an incremental collection. // can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) { bool G1CollectedHeap::is_scavengable(const void* p) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
HeapRegion* hr = heap_region_containing(p); HeapRegion* hr = heap_region_containing(p);
if (hr == NULL) { return !hr->isHumongous();
// null
assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
return false;
} else {
return !hr->isHumongous();
}
} }
void G1CollectedHeap::check_ct_logs_at_safepoint() { void G1CollectedHeap::check_ct_logs_at_safepoint() {
...@@ -2984,21 +2978,16 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons ...@@ -2984,21 +2978,16 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons
} }
Space* G1CollectedHeap::space_containing(const void* addr) const { Space* G1CollectedHeap::space_containing(const void* addr) const {
Space* res = heap_region_containing(addr); return heap_region_containing(addr);
return res;
} }
HeapWord* G1CollectedHeap::block_start(const void* addr) const { HeapWord* G1CollectedHeap::block_start(const void* addr) const {
Space* sp = space_containing(addr); Space* sp = space_containing(addr);
if (sp != NULL) { return sp->block_start(addr);
return sp->block_start(addr);
}
return NULL;
} }
size_t G1CollectedHeap::block_size(const HeapWord* addr) const { size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
Space* sp = space_containing(addr); Space* sp = space_containing(addr);
assert(sp != NULL, "block_size of address outside of heap");
return sp->block_size(addr); return sp->block_size(addr);
} }
...@@ -4652,30 +4641,19 @@ G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : ...@@ -4652,30 +4641,19 @@ G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { } ParGCAllocBuffer(gclab_word_size), _retired(true) { }
void G1ParCopyHelper::mark_object(oop obj) { void G1ParCopyHelper::mark_object(oop obj) {
#ifdef ASSERT assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
HeapRegion* hr = _g1->heap_region_containing(obj);
assert(hr != NULL, "sanity");
assert(!hr->in_collection_set(), "should not mark objects in the CSet");
#endif // ASSERT
// We know that the object is not moving so it's safe to read its size. // We know that the object is not moving so it's safe to read its size.
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id); _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
} }
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
#ifdef ASSERT
assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->is_forwarded(), "from obj should be forwarded");
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
assert(from_obj != to_obj, "should not be self-forwarded"); assert(from_obj != to_obj, "should not be self-forwarded");
HeapRegion* from_hr = _g1->heap_region_containing(from_obj); assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
assert(from_hr != NULL, "sanity"); assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
assert(from_hr->in_collection_set(), "from obj should be in the CSet");
HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
assert(to_hr != NULL, "sanity");
assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
#endif // ASSERT
// The object might be in the process of being copied by another // The object might be in the process of being copied by another
// worker so we cannot trust that its to-space image is // worker so we cannot trust that its to-space image is
...@@ -6912,11 +6890,7 @@ void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { ...@@ -6912,11 +6890,7 @@ void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
bool G1CollectedHeap::is_in_closed_subset(const void* p) const { bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
HeapRegion* hr = heap_region_containing(p); HeapRegion* hr = heap_region_containing(p);
if (hr == NULL) { return hr->is_in(p);
return false;
} else {
return hr->is_in(p);
}
} }
// Methods for the mutator alloc region // Methods for the mutator alloc region
......
...@@ -1490,16 +1490,14 @@ public: ...@@ -1490,16 +1490,14 @@ public:
// space containing a given address, or else returns NULL. // space containing a given address, or else returns NULL.
virtual Space* space_containing(const void* addr) const; virtual Space* space_containing(const void* addr) const;
// A G1CollectedHeap will contain some number of heap regions. This // Returns the HeapRegion that contains addr. addr must not be NULL.
// finds the region containing a given address, or else returns NULL.
template <class T> template <class T>
inline HeapRegion* heap_region_containing(const T addr) const; inline HeapRegion* heap_region_containing_raw(const T addr) const;
// Like the above, but requires "addr" to be in the heap (to avoid a // Returns the HeapRegion that contains addr. addr must not be NULL.
// null-check), and unlike the above, may return an continuing humongous // If addr is within a humongous continues region, it returns its humongous start region.
// region.
template <class T> template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const; inline HeapRegion* heap_region_containing(const T addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is, // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly // each address in the (reserved) heap is a member of exactly
...@@ -1642,7 +1640,6 @@ public: ...@@ -1642,7 +1640,6 @@ public:
// the region to which the object belongs. An object is dead // the region to which the object belongs. An object is dead
// iff a) it was not allocated since the last mark and b) it // iff a) it was not allocated since the last mark and b) it
// is not marked. // is not marked.
bool is_obj_dead(const oop obj, const HeapRegion* hr) const { bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
return return
!hr->obj_allocated_since_prev_marking(obj) && !hr->obj_allocated_since_prev_marking(obj) &&
...@@ -1652,7 +1649,6 @@ public: ...@@ -1652,7 +1649,6 @@ public:
// This function returns true when an object has been // This function returns true when an object has been
// around since the previous marking and hasn't yet // around since the previous marking and hasn't yet
// been marked during this marking. // been marked during this marking.
bool is_obj_ill(const oop obj, const HeapRegion* hr) const { bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
return return
!hr->obj_allocated_since_next_marking(obj) && !hr->obj_allocated_since_next_marking(obj) &&
......
...@@ -49,21 +49,22 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { ...@@ -49,21 +49,22 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
template <class T> template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing(const T addr) const { G1CollectedHeap::heap_region_containing_raw(const T addr) const {
HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); assert(addr != NULL, "invariant");
// hr can be null if addr in perm_gen assert(_g1_reserved.contains((const void*) addr),
if (hr != NULL && hr->continuesHumongous()) { err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
hr = hr->humongous_start_region(); p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
} return _hrs.addr_to_region((HeapWord*) addr);
return hr;
} }
template <class T> template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const T addr) const { G1CollectedHeap::heap_region_containing(const T addr) const {
assert(_g1_reserved.contains((const void*) addr), "invariant"); HeapRegion* hr = heap_region_containing_raw(addr);
HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); if (hr->continuesHumongous()) {
return res; return hr->humongous_start_region();
}
return hr;
} }
inline void G1CollectedHeap::reset_gc_time_stamp() { inline void G1CollectedHeap::reset_gc_time_stamp() {
...@@ -154,8 +155,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { ...@@ -154,8 +155,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
// have to keep calling heap_region_containing_raw() in the // have to keep calling heap_region_containing_raw() in the
// asserts below. // asserts below.
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
assert(containing_hr != NULL && start != NULL && word_size > 0, assert(word_size > 0, "pre-condition");
"pre-condition");
assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_in(start), "it should contain start");
assert(containing_hr->is_young(), "it should be young"); assert(containing_hr->is_young(), "it should be young");
assert(!containing_hr->isHumongous(), "it should not be humongous"); assert(!containing_hr->isHumongous(), "it should not be humongous");
...@@ -277,8 +277,10 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() { ...@@ -277,8 +277,10 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
#endif // #ifndef PRODUCT #endif // #ifndef PRODUCT
inline bool G1CollectedHeap::is_in_young(const oop obj) { inline bool G1CollectedHeap::is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj); if (obj == NULL) {
return hr != NULL && hr->is_young(); return false;
}
return heap_region_containing(obj)->is_young();
} }
// We don't need barriers for initializing stores to objects // We don't need barriers for initializing stores to objects
...@@ -291,21 +293,17 @@ inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { ...@@ -291,21 +293,17 @@ inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
} }
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj); if (obj == NULL) {
if (hr == NULL) { return false;
if (obj == NULL) return false;
else return true;
} }
else return is_obj_dead(obj, hr); return is_obj_dead(obj, heap_region_containing(obj));
} }
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj); if (obj == NULL) {
if (hr == NULL) { return false;
if (obj == NULL) return false;
else return true;
} }
else return is_obj_ill(obj, hr); return is_obj_ill(obj, heap_region_containing(obj));
} }
inline void G1CollectedHeap::set_humongous_is_live(oop obj) { inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
......
...@@ -130,9 +130,7 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) { ...@@ -130,9 +130,7 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj); HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
if (hr != NULL) { _cm->grayRoot(obj, obj->size(), _worker_id, hr);
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
}
} }
} }
...@@ -159,57 +157,61 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) { ...@@ -159,57 +157,61 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
template <class T> template <class T>
inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) { inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p); oop obj = oopDesc::load_decode_heap_oop(p);
if (obj == NULL) {
return;
}
#ifdef ASSERT #ifdef ASSERT
// can't do because of races // can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop"); // assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop // Do the safe subset of is_oop
if (obj != NULL) {
#ifdef CHECK_UNHANDLED_OOPS #ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj(); oopDesc* o = obj.obj();
#else #else
oopDesc* o = obj; oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS #endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
}
#endif // ASSERT #endif // ASSERT
assert(_from != NULL, "from region must be non-NULL"); assert(_from != NULL, "from region must be non-NULL");
assert(_from->is_in_reserved(p), "p is not in from"); assert(_from->is_in_reserved(p), "p is not in from");
HeapRegion* to = _g1->heap_region_containing(obj); HeapRegion* to = _g1->heap_region_containing(obj);
if (to != NULL && _from != to) { if (_from == to) {
// The _record_refs_into_cset flag is true during the RSet // Normally this closure should only be called with cross-region references.
// updating part of an evacuation pause. It is false at all // But since Java threads are manipulating the references concurrently and we
// other times: // reload the values things may have changed.
// * rebuilding the rembered sets after a full GC return;
// * during concurrent refinement. }
// * updating the remembered sets of regions in the collection // The _record_refs_into_cset flag is true during the RSet
// set in the event of an evacuation failure (when deferred // updating part of an evacuation pause. It is false at all
// updates are enabled). // other times:
// * rebuilding the remembered sets after a full GC
if (_record_refs_into_cset && to->in_collection_set()) { // * during concurrent refinement.
// We are recording references that point into the collection // * updating the remembered sets of regions in the collection
// set and this particular reference does exactly that... // set in the event of an evacuation failure (when deferred
// If the referenced object has already been forwarded // updates are enabled).
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object if (_record_refs_into_cset && to->in_collection_set()) {
// there is no need to retry. // We are recording references that point into the collection
if (!self_forwarded(obj)) { // set and this particular reference does exactly that...
assert(_push_ref_cl != NULL, "should not be null"); // If the referenced object has already been forwarded
// Push the reference in the refs queue of the G1ParScanThreadState // to itself, we are handling an evacuation failure and
// instance for this worker thread. // we have already visited/tried to copy this object
_push_ref_cl->do_oop(p); // there is no need to retry.
} if (!self_forwarded(obj)) {
assert(_push_ref_cl != NULL, "should not be null");
// Deferred updates to the CSet are either discarded (in the normal case), // Push the reference in the refs queue of the G1ParScanThreadState
// or processed (if an evacuation failure occurs) at the end // instance for this worker thread.
// of the collection. _push_ref_cl->do_oop(p);
// See G1RemSet::cleanup_after_oops_into_collection_set_do(). }
return;
} // Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
} else {
// We either don't care about pushing references that point into the // We either don't care about pushing references that point into the
// collection set (i.e. we're not during an evacuation pause) _or_ // collection set (i.e. we're not during an evacuation pause) _or_
// the reference doesn't point into the collection set. Either way // the reference doesn't point into the collection set. Either way
......
...@@ -211,7 +211,6 @@ public: ...@@ -211,7 +211,6 @@ public:
#endif #endif
HeapRegion* card_region = _g1h->heap_region_containing(card_start); HeapRegion* card_region = _g1h->heap_region_containing(card_start);
assert(card_region != NULL, "Yielding cards not in the heap?");
_cards++; _cards++;
if (!card_region->is_on_dirty_cards_region_list()) { if (!card_region->is_on_dirty_cards_region_list()) {
...@@ -406,7 +405,6 @@ public: ...@@ -406,7 +405,6 @@ public:
HeapWord* start = _ct_bs->addr_for(card_ptr); HeapWord* start = _ct_bs->addr_for(card_ptr);
// And find the region containing it. // And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start); HeapRegion* r = _g1->heap_region_containing(start);
assert(r != NULL, "unexpected null");
// Scan oops in the card looking for references into the collection set // Scan oops in the card looking for references into the collection set
// Don't use addr_for(card_ptr + 1) which can ask for // Don't use addr_for(card_ptr + 1) which can ask for
...@@ -568,11 +566,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, ...@@ -568,11 +566,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
HeapWord* start = _ct_bs->addr_for(card_ptr); HeapWord* start = _ct_bs->addr_for(card_ptr);
// And find the region containing it. // And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start); HeapRegion* r = _g1->heap_region_containing(start);
if (r == NULL) {
// Again no need to return that this card contains refs that
// point into the collection set.
return false; // Not in the G1 heap (might be in perm, for example.)
}
// Why do we have to check here whether a card is on a young region, // Why do we have to check here whether a card is on a young region,
// given that we dirty young regions and, as a result, the // given that we dirty young regions and, as a result, the
...@@ -625,10 +618,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, ...@@ -625,10 +618,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
start = _ct_bs->addr_for(card_ptr); start = _ct_bs->addr_for(card_ptr);
r = _g1->heap_region_containing(start); r = _g1->heap_region_containing(start);
if (r == NULL) {
// Not in the G1 heap
return false;
}
// Checking whether the region we got back from the cache // Checking whether the region we got back from the cache
// is young here is inappropriate. The region could have been // is young here is inappropriate. The region could have been
......
...@@ -46,26 +46,28 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) { ...@@ -46,26 +46,28 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
template <class T> template <class T>
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) { inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
oop obj = oopDesc::load_decode_heap_oop(p); oop obj = oopDesc::load_decode_heap_oop(p);
if (obj == NULL) {
return;
}
#ifdef ASSERT #ifdef ASSERT
// can't do because of races // can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop"); // assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop // Do the safe subset of is_oop
if (obj != NULL) {
#ifdef CHECK_UNHANDLED_OOPS #ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj(); oopDesc* o = obj.obj();
#else #else
oopDesc* o = obj; oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS #endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
}
#endif // ASSERT #endif // ASSERT
assert(from == NULL || from->is_in_reserved(p), "p is not in from"); assert(from == NULL || from->is_in_reserved(p), "p is not in from");
HeapRegion* to = _g1->heap_region_containing(obj); HeapRegion* to = _g1->heap_region_containing(obj);
if (to != NULL && from != to) { if (from != to) {
assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
to->rem_set()->add_reference(p, tid); to->rem_set()->add_reference(p, tid);
} }
......
...@@ -802,7 +802,6 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { ...@@ -802,7 +802,6 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
HeapRegion* hr = _g1h->heap_region_containing_raw(from); HeapRegion* hr = _g1h->heap_region_containing_raw(from);
if (hr == NULL) return false;
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
// Is this region in the coarse map? // Is this region in the coarse map?
if (_coarse_map.at(hr_ind)) return true; if (_coarse_map.at(hr_ind)) return true;
......
...@@ -240,7 +240,6 @@ void HeapRegionSeq::verify_optional() { ...@@ -240,7 +240,6 @@ void HeapRegionSeq::verify_optional() {
// Asserts will fire if i is >= _length // Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom(); HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity"); guarantee(addr_to_region(addr) == hr, "sanity");
guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
} else { } else {
guarantee(hr->is_empty(), "sanity"); guarantee(hr->is_empty(), "sanity");
guarantee(!hr->isHumongous(), "sanity"); guarantee(!hr->isHumongous(), "sanity");
......
...@@ -110,10 +110,6 @@ class HeapRegionSeq: public CHeapObj<mtGC> { ...@@ -110,10 +110,6 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// HeapRegion, otherwise return NULL. // HeapRegion, otherwise return NULL.
inline HeapRegion* addr_to_region(HeapWord* addr) const; inline HeapRegion* addr_to_region(HeapWord* addr) const;
// Return the HeapRegion that corresponds to the given
// address. Assume the address is valid.
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap. // Return the number of regions that have been committed in the heap.
uint length() const { return _committed_length; } uint length() const { return _committed_length; }
......
...@@ -28,21 +28,17 @@ ...@@ -28,21 +28,17 @@
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp"
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
assert(addr < heap_end(),
err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
assert(addr >= heap_bottom(),
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
HeapRegion* hr = _regions.get_by_address(addr); HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant"); assert(hr != NULL, "invariant");
return hr; return hr;
} }
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < heap_end()) {
assert(addr >= heap_bottom(),
err_msg("addr: " PTR_FORMAT " bottom: " PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
return addr_to_region_unsafe(addr);
}
return NULL;
}
inline HeapRegion* HeapRegionSeq::at(uint index) const { inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition"); assert(index < length(), "pre-condition");
HeapRegion* hr = _regions.get_by_index(index); HeapRegion* hr = _regions.get_by_index(index);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册