diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp index 36a893b125d0b76fcf92ce4efcb2ed478c04ecdf..eb5d6a86a1eaa44c50e6b6171221ef9a4ba09478 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @@ -853,7 +853,58 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { assert_locked(freelistLock()); NOT_PRODUCT(verify_objects_initialized()); - Space::object_iterate_mem(mr, cl); + assert(!mr.is_empty(), "Should be non-empty"); + // We use MemRegion(bottom(), end()) rather than used_region() below + // because the two are not necessarily equal for some kinds of + // spaces, in particular, certain kinds of free list spaces. + // We could use the more complicated but more precise: + // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) + // but the slight imprecision seems acceptable in the assertion check. + assert(MemRegion(bottom(), end()).contains(mr), + "Should be within used space"); + HeapWord* prev = cl->previous(); // max address from last time + if (prev >= mr.end()) { // nothing to do + return; + } + // This assert will not work when we go from cms space to perm + // space, and use same closure. Easy fix deferred for later. XXX YSR + // assert(prev == NULL || contains(prev), "Should be within space"); + + bool last_was_obj_array = false; + HeapWord *blk_start_addr, *region_start_addr; + if (prev > mr.start()) { + region_start_addr = prev; + blk_start_addr = prev; + // The previous invocation may have pushed "prev" beyond the + // last allocated block yet there may be still be blocks + // in this region due to a particular coalescing policy. + // Relax the assertion so that the case where the unallocated + // block is maintained and "prev" is beyond the unallocated + // block does not cause the assertion to fire. + assert((BlockOffsetArrayUseUnallocatedBlock && + (!is_in(prev))) || + (blk_start_addr == block_start(region_start_addr)), "invariant"); + } else { + region_start_addr = mr.start(); + blk_start_addr = block_start(region_start_addr); + } + HeapWord* region_end_addr = mr.end(); + MemRegion derived_mr(region_start_addr, region_end_addr); + while (blk_start_addr < region_end_addr) { + const size_t size = block_size(blk_start_addr); + if (block_is_obj(blk_start_addr)) { + last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); + } else { + last_was_obj_array = false; + } + blk_start_addr += size; + } + if (!last_was_obj_array) { + assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), + "Should be within (closed) used space"); + assert(blk_start_addr > prev, "Invariant"); + cl->set_previous(blk_start_addr); // min address for next time + } } // Callers of this iterator beware: The closure application should diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp index 2a74d99583df9d04257fb30b131de34fa6cc88e2..4d12625eb111749c6d39d33f8b71af396de5acbe 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @@ -362,6 +362,12 @@ class CompactibleFreeListSpace: public CompactibleSpace { // obj_is_alive() to determine whether it is safe to iterate of // an object. void safe_object_iterate(ObjectClosure* blk); + + // Iterate over all objects that intersect with mr, calling "cl->do_object" + // on each. There is an exception to this: if this closure has already + // been invoked on an object, it may skip such objects in some cases. This is + // Most likely to happen in an "upwards" (ascending address) iteration of + // MemRegions. void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // Requires that "mr" be entirely within the space. diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index 82c9cd2548933647d29f43178fe4926ddf315dfd..f5ae039a963453edebbfc212f83b6c198ec17877 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1499,6 +1499,19 @@ class FalseBitMapClosure: public BitMapClosure { } }; +// A version of ObjectClosure with "memory" (see _previous_address below) +class UpwardsObjectClosure: public BoolObjectClosure { + HeapWord* _previous_address; + public: + UpwardsObjectClosure() : _previous_address(NULL) { } + void set_previous(HeapWord* addr) { _previous_address = addr; } + HeapWord* previous() { return _previous_address; } + // A return value of "true" can be used by the caller to decide + // if this object's end should *NOT* be recorded in + // _previous_address above. + virtual bool do_object_bm(oop obj, MemRegion mr) = 0; +}; + // This closure is used during the second checkpointing phase // to rescan the marked objects on the dirty cards in the mod // union table and the card table proper. It's invoked via diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp index 33d10d69ef6809f1d6528f0082907dfdb55147d2..1e1b17b14053ddf73bc56035db5ad6f07a9d3482 100644 --- a/src/share/vm/memory/iterator.hpp +++ b/src/share/vm/memory/iterator.hpp @@ -177,19 +177,6 @@ public: ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {} }; -// A version of ObjectClosure with "memory" (see _previous_address below) -class UpwardsObjectClosure: public BoolObjectClosure { - HeapWord* _previous_address; - public: - UpwardsObjectClosure() : _previous_address(NULL) { } - void set_previous(HeapWord* addr) { _previous_address = addr; } - HeapWord* previous() { return _previous_address; } - // A return value of "true" can be used by the caller to decide - // if this object's end should *NOT* be recorded in - // _previous_address above. - virtual bool do_object_bm(oop obj, MemRegion mr) = 0; -}; - // A version of ObjectClosure that is expected to be robust // in the face of possibly uninitialized objects. class ObjectClosureCareful : public ObjectClosure { diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp index f22f419d13e78906a8b055ff504ac5b5067f8d9b..99d9802b1a649f378f105ac0e87449af7f89d07c 100644 --- a/src/share/vm/memory/space.cpp +++ b/src/share/vm/memory/space.cpp @@ -562,104 +562,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr, return bottom(); } - -void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { - assert(!mr.is_empty(), "Should be non-empty"); - // We use MemRegion(bottom(), end()) rather than used_region() below - // because the two are not necessarily equal for some kinds of - // spaces, in particular, certain kinds of free list spaces. - // We could use the more complicated but more precise: - // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) - // but the slight imprecision seems acceptable in the assertion check. - assert(MemRegion(bottom(), end()).contains(mr), - "Should be within used space"); - HeapWord* prev = cl->previous(); // max address from last time - if (prev >= mr.end()) { // nothing to do - return; - } - // This assert will not work when we go from cms space to perm - // space, and use same closure. Easy fix deferred for later. XXX YSR - // assert(prev == NULL || contains(prev), "Should be within space"); - - bool last_was_obj_array = false; - HeapWord *blk_start_addr, *region_start_addr; - if (prev > mr.start()) { - region_start_addr = prev; - blk_start_addr = prev; - // The previous invocation may have pushed "prev" beyond the - // last allocated block yet there may be still be blocks - // in this region due to a particular coalescing policy. - // Relax the assertion so that the case where the unallocated - // block is maintained and "prev" is beyond the unallocated - // block does not cause the assertion to fire. - assert((BlockOffsetArrayUseUnallocatedBlock && - (!is_in(prev))) || - (blk_start_addr == block_start(region_start_addr)), "invariant"); - } else { - region_start_addr = mr.start(); - blk_start_addr = block_start(region_start_addr); - } - HeapWord* region_end_addr = mr.end(); - MemRegion derived_mr(region_start_addr, region_end_addr); - while (blk_start_addr < region_end_addr) { - const size_t size = block_size(blk_start_addr); - if (block_is_obj(blk_start_addr)) { - last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); - } else { - last_was_obj_array = false; - } - blk_start_addr += size; - } - if (!last_was_obj_array) { - assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), - "Should be within (closed) used space"); - assert(blk_start_addr > prev, "Invariant"); - cl->set_previous(blk_start_addr); // min address for next time - } -} - bool Space::obj_is_alive(const HeapWord* p) const { assert (block_is_obj(p), "The address should point to an object"); return true; } -void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { - assert(!mr.is_empty(), "Should be non-empty"); - assert(used_region().contains(mr), "Should be within used space"); - HeapWord* prev = cl->previous(); // max address from last time - if (prev >= mr.end()) { // nothing to do - return; - } - // See comment above (in more general method above) in case you - // happen to use this method. - assert(prev == NULL || is_in_reserved(prev), "Should be within space"); - - bool last_was_obj_array = false; - HeapWord *obj_start_addr, *region_start_addr; - if (prev > mr.start()) { - region_start_addr = prev; - obj_start_addr = prev; - assert(obj_start_addr == block_start(region_start_addr), "invariant"); - } else { - region_start_addr = mr.start(); - obj_start_addr = block_start(region_start_addr); - } - HeapWord* region_end_addr = mr.end(); - MemRegion derived_mr(region_start_addr, region_end_addr); - while (obj_start_addr < region_end_addr) { - oop obj = oop(obj_start_addr); - const size_t size = obj->size(); - last_was_obj_array = cl->do_object_bm(obj, derived_mr); - obj_start_addr += size; - } - if (!last_was_obj_array) { - assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), - "Should be within (closed) used space"); - assert(obj_start_addr > prev, "Invariant"); - cl->set_previous(obj_start_addr); // min address for next time - } -} - #if INCLUDE_ALL_GCS #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp index 2ba368bec0bc272e6accdc8e5f3bcfa4140319d9..18ed5d5d86f18a873c144c5f6882a67414094389 100644 --- a/src/share/vm/memory/space.hpp +++ b/src/share/vm/memory/space.hpp @@ -188,13 +188,6 @@ class Space: public CHeapObj { // objects whose internal references point to objects in the space. virtual void safe_object_iterate(ObjectClosure* blk) = 0; - // Iterate over all objects that intersect with mr, calling "cl->do_object" - // on each. There is an exception to this: if this closure has already - // been invoked on an object, it may skip such objects in some cases. This is - // Most likely to happen in an "upwards" (ascending address) iteration of - // MemRegions. - virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); - // Iterate over as many initialized objects in the space as possible, // calling "cl.do_object_careful" on each. Return NULL if all objects // in the space (at the start of the iteration) were iterated over. @@ -558,7 +551,6 @@ class ContiguousSpace: public CompactibleSpace { // For contiguous spaces this method will iterate safely over objects // in the space (i.e., between bottom and top) when at a safepoint. void safe_object_iterate(ObjectClosure* blk); - void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // iterates on objects up to the safe limit HeapWord* object_iterate_careful(ObjectClosureCareful* cl); HeapWord* concurrent_iteration_safe_limit() {