提交 7f3be981 编写于 作者: M mgerdin

8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS

Reviewed-by: brutisso, tschatzl, stefank
上级 bdd04bef
...@@ -853,7 +853,58 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, ...@@ -853,7 +853,58 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure* cl) { UpwardsObjectClosure* cl) {
assert_locked(freelistLock()); assert_locked(freelistLock());
NOT_PRODUCT(verify_objects_initialized()); NOT_PRODUCT(verify_objects_initialized());
Space::object_iterate_mem(mr, cl); assert(!mr.is_empty(), "Should be non-empty");
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert(MemRegion(bottom(), end()).contains(mr),
"Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *blk_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
blk_start_addr = prev;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else {
region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (blk_start_addr < region_end_addr) {
const size_t size = block_size(blk_start_addr);
if (block_is_obj(blk_start_addr)) {
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
} else {
last_was_obj_array = false;
}
blk_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
"Should be within (closed) used space");
assert(blk_start_addr > prev, "Invariant");
cl->set_previous(blk_start_addr); // min address for next time
}
} }
// Callers of this iterator beware: The closure application should // Callers of this iterator beware: The closure application should
......
...@@ -362,6 +362,12 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -362,6 +362,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// obj_is_alive() to determine whether it is safe to iterate of // obj_is_alive() to determine whether it is safe to iterate of
// an object. // an object.
void safe_object_iterate(ObjectClosure* blk); void safe_object_iterate(ObjectClosure* blk);
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Requires that "mr" be entirely within the space. // Requires that "mr" be entirely within the space.
......
...@@ -1499,6 +1499,19 @@ class FalseBitMapClosure: public BitMapClosure { ...@@ -1499,6 +1499,19 @@ class FalseBitMapClosure: public BitMapClosure {
} }
}; };
// A version of ObjectClosure with "memory" (see _previous_address below)
class UpwardsObjectClosure: public BoolObjectClosure {
HeapWord* _previous_address;
public:
UpwardsObjectClosure() : _previous_address(NULL) { }
void set_previous(HeapWord* addr) { _previous_address = addr; }
HeapWord* previous() { return _previous_address; }
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
};
// This closure is used during the second checkpointing phase // This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod // to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via // union table and the card table proper. It's invoked via
......
...@@ -177,19 +177,6 @@ public: ...@@ -177,19 +177,6 @@ public:
ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {} ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
}; };
// A version of ObjectClosure with "memory" (see _previous_address below)
class UpwardsObjectClosure: public BoolObjectClosure {
HeapWord* _previous_address;
public:
UpwardsObjectClosure() : _previous_address(NULL) { }
void set_previous(HeapWord* addr) { _previous_address = addr; }
HeapWord* previous() { return _previous_address; }
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
};
// A version of ObjectClosure that is expected to be robust // A version of ObjectClosure that is expected to be robust
// in the face of possibly uninitialized objects. // in the face of possibly uninitialized objects.
class ObjectClosureCareful : public ObjectClosure { class ObjectClosureCareful : public ObjectClosure {
......
...@@ -562,104 +562,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr, ...@@ -562,104 +562,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr,
return bottom(); return bottom();
} }
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert(MemRegion(bottom(), end()).contains(mr),
"Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *blk_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
blk_start_addr = prev;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else {
region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (blk_start_addr < region_end_addr) {
const size_t size = block_size(blk_start_addr);
if (block_is_obj(blk_start_addr)) {
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
} else {
last_was_obj_array = false;
}
blk_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
"Should be within (closed) used space");
assert(blk_start_addr > prev, "Invariant");
cl->set_previous(blk_start_addr); // min address for next time
}
}
bool Space::obj_is_alive(const HeapWord* p) const { bool Space::obj_is_alive(const HeapWord* p) const {
assert (block_is_obj(p), "The address should point to an object"); assert (block_is_obj(p), "The address should point to an object");
return true; return true;
} }
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
assert(used_region().contains(mr), "Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// See comment above (in more general method above) in case you
// happen to use this method.
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *obj_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
obj_start_addr = prev;
assert(obj_start_addr == block_start(region_start_addr), "invariant");
} else {
region_start_addr = mr.start();
obj_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (obj_start_addr < region_end_addr) {
oop obj = oop(obj_start_addr);
const size_t size = obj->size();
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
obj_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
"Should be within (closed) used space");
assert(obj_start_addr > prev, "Invariant");
cl->set_previous(obj_start_addr); // min address for next time
}
}
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\ \
......
...@@ -188,13 +188,6 @@ class Space: public CHeapObj<mtGC> { ...@@ -188,13 +188,6 @@ class Space: public CHeapObj<mtGC> {
// objects whose internal references point to objects in the space. // objects whose internal references point to objects in the space.
virtual void safe_object_iterate(ObjectClosure* blk) = 0; virtual void safe_object_iterate(ObjectClosure* blk) = 0;
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Iterate over as many initialized objects in the space as possible, // Iterate over as many initialized objects in the space as possible,
// calling "cl.do_object_careful" on each. Return NULL if all objects // calling "cl.do_object_careful" on each. Return NULL if all objects
// in the space (at the start of the iteration) were iterated over. // in the space (at the start of the iteration) were iterated over.
...@@ -558,7 +551,6 @@ class ContiguousSpace: public CompactibleSpace { ...@@ -558,7 +551,6 @@ class ContiguousSpace: public CompactibleSpace {
// For contiguous spaces this method will iterate safely over objects // For contiguous spaces this method will iterate safely over objects
// in the space (i.e., between bottom and top) when at a safepoint. // in the space (i.e., between bottom and top) when at a safepoint.
void safe_object_iterate(ObjectClosure* blk); void safe_object_iterate(ObjectClosure* blk);
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// iterates on objects up to the safe limit // iterates on objects up to the safe limit
HeapWord* object_iterate_careful(ObjectClosureCareful* cl); HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
HeapWord* concurrent_iteration_safe_limit() { HeapWord* concurrent_iteration_safe_limit() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册