提交 e7d93ed3 编写于 作者: J jcoomes

Merge

...@@ -232,7 +232,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ...@@ -232,7 +232,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
length >= 1, "Illegal constant pool size %u in class file %s", length >= 1, "Illegal constant pool size %u in class file %s",
length, CHECK_(nullHandle)); length, CHECK_(nullHandle));
constantPoolOop constant_pool = constantPoolOop constant_pool =
oopFactory::new_constantPool(length, CHECK_(nullHandle)); oopFactory::new_constantPool(length,
methodOopDesc::IsSafeConc,
CHECK_(nullHandle));
constantPoolHandle cp (THREAD, constant_pool); constantPoolHandle cp (THREAD, constant_pool);
cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops
...@@ -1675,7 +1677,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf ...@@ -1675,7 +1677,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf
// All sizing information for a methodOop is finally available, now create it // All sizing information for a methodOop is finally available, now create it
methodOop m_oop = oopFactory::new_method( methodOop m_oop = oopFactory::new_method(
code_length, access_flags, linenumber_table_length, code_length, access_flags, linenumber_table_length,
total_lvt_length, checked_exceptions_length, CHECK_(nullHandle)); total_lvt_length, checked_exceptions_length,
methodOopDesc::IsSafeConc, CHECK_(nullHandle));
methodHandle m (THREAD, m_oop); methodHandle m (THREAD, m_oop);
ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize); ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
......
...@@ -706,6 +706,30 @@ void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { ...@@ -706,6 +706,30 @@ void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
} }
} }
// Apply the given closure to each live object in the space
// The usage of CompactibleFreeListSpace
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
// objects in the space with references to objects that are no longer
// valid. For example, an object may reference another object
// that has already been sweep up (collected). This method uses
// obj_is_alive() to determine whether it is safe to apply the closure to
// an object. See obj_is_alive() for details on how liveness of an
// object is decided.
void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
assert_lock_strong(freelistLock());
NOT_PRODUCT(verify_objects_initialized());
HeapWord *cur, *limit;
size_t curSize;
for (cur = bottom(), limit = end(); cur < limit;
cur += curSize) {
curSize = block_size(cur);
if (block_is_obj(cur) && obj_is_alive(cur)) {
blk->do_object(oop(cur));
}
}
}
void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure* cl) { UpwardsObjectClosure* cl) {
assert_locked(); assert_locked();
...@@ -861,7 +885,9 @@ const { ...@@ -861,7 +885,9 @@ const {
} else { } else {
// must read from what 'p' points to in each loop. // must read from what 'p' points to in each loop.
klassOop k = ((volatile oopDesc*)p)->klass_or_null(); klassOop k = ((volatile oopDesc*)p)->klass_or_null();
if (k != NULL && ((oopDesc*)p)->is_parsable()) { if (k != NULL &&
((oopDesc*)p)->is_parsable() &&
((oopDesc*)p)->is_conc_safe()) {
assert(k->is_oop(), "Should really be klass oop."); assert(k->is_oop(), "Should really be klass oop.");
oop o = (oop)p; oop o = (oop)p;
assert(o->is_oop(), "Should be an oop"); assert(o->is_oop(), "Should be an oop");
......
...@@ -481,6 +481,15 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -481,6 +481,15 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void oop_iterate(OopClosure* cl); void oop_iterate(OopClosure* cl);
void object_iterate(ObjectClosure* blk); void object_iterate(ObjectClosure* blk);
// Apply the closure to each object in the space whose references
// point to objects in the heap. The usage of CompactibleFreeListSpace
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
// objects in the space with references to objects that are no longer
// valid. For example, an object may reference another object
// that has already been sweep up (collected). This method uses
// obj_is_alive() to determine whether it is safe to iterate of
// an object.
void safe_object_iterate(ObjectClosure* blk);
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Requires that "mr" be entirely within the space. // Requires that "mr" be entirely within the space.
......
...@@ -3017,6 +3017,16 @@ ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { ...@@ -3017,6 +3017,16 @@ ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
} }
} }
void
ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::safe_object_iterate(cl);
} else {
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
Generation::safe_object_iterate(cl);
}
}
void void
ConcurrentMarkSweepGeneration::pre_adjust_pointers() { ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
} }
...@@ -6623,7 +6633,11 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( ...@@ -6623,7 +6633,11 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
if (_bitMap->isMarked(addr)) { if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized? // it's marked; is it potentially uninitialized?
if (p->klass_or_null() != NULL) { if (p->klass_or_null() != NULL) {
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { // If is_conc_safe is false, the object may be undergoing
// change by the VM outside a safepoint. Don't try to
// scan it, but rather leave it for the remark phase.
if (CMSPermGenPrecleaningEnabled &&
(!p->is_conc_safe() || !p->is_parsable())) {
// Signal precleaning to redirty the card since // Signal precleaning to redirty the card since
// the klass pointer is already installed. // the klass pointer is already installed.
assert(size == 0, "Initial value"); assert(size == 0, "Initial value");
...@@ -7001,7 +7015,6 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { ...@@ -7001,7 +7015,6 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
_mut->clear_range(mr); _mut->clear_range(mr);
} }
DEBUG_ONLY(}) DEBUG_ONLY(})
// Note: the finger doesn't advance while we drain // Note: the finger doesn't advance while we drain
// the stack below. // the stack below.
PushOrMarkClosure pushOrMarkClosure(_collector, PushOrMarkClosure pushOrMarkClosure(_collector,
...@@ -8062,9 +8075,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -8062,9 +8075,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
#ifdef DEBUG #ifdef DEBUG
if (oop(addr)->klass_or_null() != NULL && if (oop(addr)->klass_or_null() != NULL &&
( !_collector->should_unload_classes() ( !_collector->should_unload_classes()
|| oop(addr)->is_parsable())) { || (oop(addr)->is_parsable()) &&
oop(addr)->is_conc_safe())) {
// Ignore mark word because we are running concurrent with mutators // Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop"); assert(oop(addr)->is_oop(true), "live block should be an oop");
// is_conc_safe is checked before performing this assertion
// because an object that is not is_conc_safe may yet have
// the return from size() correct.
assert(size == assert(size ==
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
"P-mark and computed size do not agree"); "P-mark and computed size do not agree");
...@@ -8077,6 +8094,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -8077,6 +8094,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
(!_collector->should_unload_classes() (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()), || oop(addr)->is_parsable()),
"Should be an initialized object"); "Should be an initialized object");
// Note that there are objects used during class redefinition
// (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
// which are discarded with their is_conc_safe state still
// false. These object may be floating garbage so may be
// seen here. If they are floating garbage their size
// should be attainable from their klass. Do not that
// is_conc_safe() is true for oop(addr).
// Ignore mark word because we are running concurrent with mutators // Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop"); assert(oop(addr)->is_oop(true), "live block should be an oop");
// Verify that the bit map has no bits marked between // Verify that the bit map has no bits marked between
......
...@@ -1212,6 +1212,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1212,6 +1212,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// More iteration support // More iteration support
virtual void oop_iterate(MemRegion mr, OopClosure* cl); virtual void oop_iterate(MemRegion mr, OopClosure* cl);
virtual void oop_iterate(OopClosure* cl); virtual void oop_iterate(OopClosure* cl);
virtual void safe_object_iterate(ObjectClosure* cl);
virtual void object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl);
// Need to declare the full complement of closures, whether we'll // Need to declare the full complement of closures, whether we'll
......
...@@ -850,6 +850,7 @@ public: ...@@ -850,6 +850,7 @@ public:
// Iterate over all objects, calling "cl.do_object" on each. // Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl);
virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
// Iterate over all objects allocated since the last collection, calling // Iterate over all objects allocated since the last collection, calling
// "cl.do_object" on each. The heap must have been initialized properly // "cl.do_object" on each. The heap must have been initialized properly
......
...@@ -200,6 +200,7 @@ class ParallelScavengeHeap : public CollectedHeap { ...@@ -200,6 +200,7 @@ class ParallelScavengeHeap : public CollectedHeap {
void oop_iterate(OopClosure* cl); void oop_iterate(OopClosure* cl);
void object_iterate(ObjectClosure* cl); void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
void permanent_oop_iterate(OopClosure* cl); void permanent_oop_iterate(OopClosure* cl);
void permanent_object_iterate(ObjectClosure* cl); void permanent_object_iterate(ObjectClosure* cl);
......
...@@ -466,6 +466,10 @@ class CollectedHeap : public CHeapObj { ...@@ -466,6 +466,10 @@ class CollectedHeap : public CHeapObj {
// This includes objects in permanent memory. // This includes objects in permanent memory.
virtual void object_iterate(ObjectClosure* cl) = 0; virtual void object_iterate(ObjectClosure* cl) = 0;
// Similar to object_iterate() except iterates only
// over live objects.
virtual void safe_object_iterate(ObjectClosure* cl) = 0;
// Behaves the same as oop_iterate, except only traverses // Behaves the same as oop_iterate, except only traverses
// interior pointers contained in permanent memory. If there // interior pointers contained in permanent memory. If there
// is no permanent memory, does nothing. // is no permanent memory, does nothing.
......
...@@ -910,6 +910,13 @@ void GenCollectedHeap::object_iterate(ObjectClosure* cl) { ...@@ -910,6 +910,13 @@ void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
perm_gen()->object_iterate(cl); perm_gen()->object_iterate(cl);
} }
void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
for (int i = 0; i < _n_gens; i++) {
_gens[i]->safe_object_iterate(cl);
}
perm_gen()->safe_object_iterate(cl);
}
void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
for (int i = 0; i < _n_gens; i++) { for (int i = 0; i < _n_gens; i++) {
_gens[i]->object_iterate_since_last_GC(cl); _gens[i]->object_iterate_since_last_GC(cl);
......
...@@ -215,6 +215,7 @@ public: ...@@ -215,6 +215,7 @@ public:
void oop_iterate(OopClosure* cl); void oop_iterate(OopClosure* cl);
void oop_iterate(MemRegion mr, OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl);
void object_iterate(ObjectClosure* cl); void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl);
void object_iterate_since_last_GC(ObjectClosure* cl); void object_iterate_since_last_GC(ObjectClosure* cl);
Space* space_containing(const void* addr) const; Space* space_containing(const void* addr) const;
......
...@@ -319,6 +319,21 @@ void Generation::object_iterate(ObjectClosure* cl) { ...@@ -319,6 +319,21 @@ void Generation::object_iterate(ObjectClosure* cl) {
space_iterate(&blk); space_iterate(&blk);
} }
class GenerationSafeObjIterateClosure : public SpaceClosure {
private:
ObjectClosure* _cl;
public:
virtual void do_space(Space* s) {
s->safe_object_iterate(_cl);
}
GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
};
void Generation::safe_object_iterate(ObjectClosure* cl) {
GenerationSafeObjIterateClosure blk(cl);
space_iterate(&blk);
}
void Generation::prepare_for_compaction(CompactPoint* cp) { void Generation::prepare_for_compaction(CompactPoint* cp) {
// Generic implementation, can be specialized // Generic implementation, can be specialized
CompactibleSpace* space = first_compaction_space(); CompactibleSpace* space = first_compaction_space();
......
...@@ -518,6 +518,11 @@ class Generation: public CHeapObj { ...@@ -518,6 +518,11 @@ class Generation: public CHeapObj {
// each. // each.
virtual void object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl);
// Iterate over all safe objects in the generation, calling "cl.do_object" on
// each. An object is safe if its references point to other objects in
// the heap. This defaults to object_iterate() unless overridden.
virtual void safe_object_iterate(ObjectClosure* cl);
// Iterate over all objects allocated in the generation since the last // Iterate over all objects allocated in the generation since the last
// collection, calling "cl.do_object" on each. The generation must have // collection, calling "cl.do_object" on each. The generation must have
// been initialized properly to support this function, or else this call // been initialized properly to support this function, or else this call
......
...@@ -263,6 +263,9 @@ void HeapInspection::heap_inspection(outputStream* st) { ...@@ -263,6 +263,9 @@ void HeapInspection::heap_inspection(outputStream* st) {
if (!cit.allocation_failed()) { if (!cit.allocation_failed()) {
// Iterate over objects in the heap // Iterate over objects in the heap
RecordInstanceClosure ric(&cit); RecordInstanceClosure ric(&cit);
// If this operation encounters a bad object when using CMS,
// consider using safe_object_iterate() which avoids perm gen
// objects that may contain bad references.
Universe::heap()->object_iterate(&ric); Universe::heap()->object_iterate(&ric);
// Report if certain classes are not counted because of // Report if certain classes are not counted because of
...@@ -317,5 +320,8 @@ void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>* ...@@ -317,5 +320,8 @@ void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>*
// Iterate over objects in the heap // Iterate over objects in the heap
FindInstanceClosure fic(k, result); FindInstanceClosure fic(k, result);
// If this operation encounters a bad object when using CMS,
// consider using safe_object_iterate() which avoids perm gen
// objects that may contain bad references.
Universe::heap()->object_iterate(&fic); Universe::heap()->object_iterate(&fic);
} }
...@@ -82,9 +82,11 @@ objArrayOop oopFactory::new_system_objArray(int length, TRAPS) { ...@@ -82,9 +82,11 @@ objArrayOop oopFactory::new_system_objArray(int length, TRAPS) {
} }
constantPoolOop oopFactory::new_constantPool(int length, TRAPS) { constantPoolOop oopFactory::new_constantPool(int length,
bool is_conc_safe,
TRAPS) {
constantPoolKlass* ck = constantPoolKlass::cast(Universe::constantPoolKlassObj()); constantPoolKlass* ck = constantPoolKlass::cast(Universe::constantPoolKlassObj());
return ck->allocate(length, CHECK_NULL); return ck->allocate(length, is_conc_safe, CHECK_NULL);
} }
...@@ -105,11 +107,13 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size, ...@@ -105,11 +107,13 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size,
int compressed_line_number_size, int compressed_line_number_size,
int localvariable_table_length, int localvariable_table_length,
int checked_exceptions_length, int checked_exceptions_length,
bool is_conc_safe,
TRAPS) { TRAPS) {
klassOop cmkObj = Universe::constMethodKlassObj(); klassOop cmkObj = Universe::constMethodKlassObj();
constMethodKlass* cmk = constMethodKlass::cast(cmkObj); constMethodKlass* cmk = constMethodKlass::cast(cmkObj);
return cmk->allocate(byte_code_size, compressed_line_number_size, return cmk->allocate(byte_code_size, compressed_line_number_size,
localvariable_table_length, checked_exceptions_length, localvariable_table_length, checked_exceptions_length,
is_conc_safe,
CHECK_NULL); CHECK_NULL);
} }
...@@ -117,14 +121,17 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size, ...@@ -117,14 +121,17 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size,
methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags, methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags,
int compressed_line_number_size, int compressed_line_number_size,
int localvariable_table_length, int localvariable_table_length,
int checked_exceptions_length, TRAPS) { int checked_exceptions_length,
bool is_conc_safe,
TRAPS) {
methodKlass* mk = methodKlass::cast(Universe::methodKlassObj()); methodKlass* mk = methodKlass::cast(Universe::methodKlassObj());
assert(!access_flags.is_native() || byte_code_size == 0, assert(!access_flags.is_native() || byte_code_size == 0,
"native methods should not contain byte codes"); "native methods should not contain byte codes");
constMethodOop cm = new_constMethod(byte_code_size, constMethodOop cm = new_constMethod(byte_code_size,
compressed_line_number_size, compressed_line_number_size,
localvariable_table_length, localvariable_table_length,
checked_exceptions_length, CHECK_NULL); checked_exceptions_length,
is_conc_safe, CHECK_NULL);
constMethodHandle rw(THREAD, cm); constMethodHandle rw(THREAD, cm);
return mk->allocate(rw, access_flags, CHECK_NULL); return mk->allocate(rw, access_flags, CHECK_NULL);
} }
......
...@@ -81,7 +81,9 @@ class oopFactory: AllStatic { ...@@ -81,7 +81,9 @@ class oopFactory: AllStatic {
static symbolHandle new_symbol_handle(const char* name, TRAPS) { return new_symbol_handle(name, (int)strlen(name), CHECK_(symbolHandle())); } static symbolHandle new_symbol_handle(const char* name, TRAPS) { return new_symbol_handle(name, (int)strlen(name), CHECK_(symbolHandle())); }
// Constant pools // Constant pools
static constantPoolOop new_constantPool (int length, TRAPS); static constantPoolOop new_constantPool (int length,
bool is_conc_safe,
TRAPS);
static constantPoolCacheOop new_constantPoolCache(int length, TRAPS); static constantPoolCacheOop new_constantPoolCache(int length, TRAPS);
// Instance classes // Instance classes
...@@ -93,9 +95,20 @@ private: ...@@ -93,9 +95,20 @@ private:
static constMethodOop new_constMethod(int byte_code_size, static constMethodOop new_constMethod(int byte_code_size,
int compressed_line_number_size, int compressed_line_number_size,
int localvariable_table_length, int localvariable_table_length,
int checked_exceptions_length, TRAPS); int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
public: public:
static methodOop new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, TRAPS); // Set is_conc_safe for methods which cannot safely be
// processed by concurrent GC even after the return of
// the method.
static methodOop new_method(int byte_code_size,
AccessFlags access_flags,
int compressed_line_number_size,
int localvariable_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
// Method Data containers // Method Data containers
static methodDataOop new_methodData(methodHandle method, TRAPS); static methodDataOop new_methodData(methodHandle method, TRAPS);
......
...@@ -569,7 +569,15 @@ void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { ...@@ -569,7 +569,15 @@ void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
if (prev > mr.start()) { if (prev > mr.start()) {
region_start_addr = prev; region_start_addr = prev;
blk_start_addr = prev; blk_start_addr = prev;
assert(blk_start_addr == block_start(region_start_addr), "invariant"); // The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else { } else {
region_start_addr = mr.start(); region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr); blk_start_addr = block_start(region_start_addr);
...@@ -705,6 +713,12 @@ void ContiguousSpace::object_iterate(ObjectClosure* blk) { ...@@ -705,6 +713,12 @@ void ContiguousSpace::object_iterate(ObjectClosure* blk) {
object_iterate_from(bm, blk); object_iterate_from(bm, blk);
} }
// For a continguous space object_iterate() and safe_object_iterate()
// are the same.
void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
object_iterate(blk);
}
void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
assert(mark.space() == this, "Mark does not match space"); assert(mark.space() == this, "Mark does not match space");
HeapWord* p = mark.point(); HeapWord* p = mark.point();
......
...@@ -193,6 +193,9 @@ class Space: public CHeapObj { ...@@ -193,6 +193,9 @@ class Space: public CHeapObj {
// each. Objects allocated by applications of the closure are not // each. Objects allocated by applications of the closure are not
// included in the iteration. // included in the iteration.
virtual void object_iterate(ObjectClosure* blk) = 0; virtual void object_iterate(ObjectClosure* blk) = 0;
// Similar to object_iterate() except only iterates over
// objects whose internal references point to objects in the space.
virtual void safe_object_iterate(ObjectClosure* blk) = 0;
// Iterate over all objects that intersect with mr, calling "cl->do_object" // Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already // on each. There is an exception to this: if this closure has already
...@@ -843,6 +846,9 @@ class ContiguousSpace: public CompactibleSpace { ...@@ -843,6 +846,9 @@ class ContiguousSpace: public CompactibleSpace {
void oop_iterate(OopClosure* cl); void oop_iterate(OopClosure* cl);
void oop_iterate(MemRegion mr, OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl);
void object_iterate(ObjectClosure* blk); void object_iterate(ObjectClosure* blk);
// For contiguous spaces this method will iterate safely over objects
// in the space (i.e., between bottom and top) when at a safepoint.
void safe_object_iterate(ObjectClosure* blk);
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// iterates on objects up to the safe limit // iterates on objects up to the safe limit
HeapWord* object_iterate_careful(ObjectClosureCareful* cl); HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
......
...@@ -49,10 +49,16 @@ bool constMethodKlass::oop_is_parsable(oop obj) const { ...@@ -49,10 +49,16 @@ bool constMethodKlass::oop_is_parsable(oop obj) const {
return constMethodOop(obj)->object_is_parsable(); return constMethodOop(obj)->object_is_parsable();
} }
bool constMethodKlass::oop_is_conc_safe(oop obj) const {
assert(obj->is_constMethod(), "must be constMethod oop");
return constMethodOop(obj)->is_conc_safe();
}
constMethodOop constMethodKlass::allocate(int byte_code_size, constMethodOop constMethodKlass::allocate(int byte_code_size,
int compressed_line_number_size, int compressed_line_number_size,
int localvariable_table_length, int localvariable_table_length,
int checked_exceptions_length, int checked_exceptions_length,
bool is_conc_safe,
TRAPS) { TRAPS) {
int size = constMethodOopDesc::object_size(byte_code_size, int size = constMethodOopDesc::object_size(byte_code_size,
...@@ -75,6 +81,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size, ...@@ -75,6 +81,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size,
compressed_line_number_size, compressed_line_number_size,
localvariable_table_length); localvariable_table_length);
assert(cm->size() == size, "wrong size for object"); assert(cm->size() == size, "wrong size for object");
cm->set_is_conc_safe(is_conc_safe);
cm->set_partially_loaded(); cm->set_partially_loaded();
assert(cm->is_parsable(), "Is safely parsable by gc"); assert(cm->is_parsable(), "Is safely parsable by gc");
return cm; return cm;
......
...@@ -32,12 +32,16 @@ public: ...@@ -32,12 +32,16 @@ public:
// Testing // Testing
bool oop_is_constMethod() const { return true; } bool oop_is_constMethod() const { return true; }
virtual bool oop_is_parsable(oop obj) const; virtual bool oop_is_parsable(oop obj) const;
virtual bool oop_is_conc_safe(oop obj) const;
// Allocation // Allocation
DEFINE_ALLOCATE_PERMANENT(constMethodKlass); DEFINE_ALLOCATE_PERMANENT(constMethodKlass);
constMethodOop allocate(int byte_code_size, int compressed_line_number_size, constMethodOop allocate(int byte_code_size, int compressed_line_number_size,
int localvariable_table_length, int localvariable_table_length,
int checked_exceptions_length, TRAPS); int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
static klassOop create_klass(TRAPS); static klassOop create_klass(TRAPS);
// Sizing // Sizing
......
...@@ -104,6 +104,7 @@ private: ...@@ -104,6 +104,7 @@ private:
// loads and stores. This value may updated and read without a lock by // loads and stores. This value may updated and read without a lock by
// multiple threads, so is volatile. // multiple threads, so is volatile.
volatile uint64_t _fingerprint; volatile uint64_t _fingerprint;
volatile bool _is_conc_safe; // if true, safe for concurrent GC processing
public: public:
oop* oop_block_beg() const { return adr_method(); } oop* oop_block_beg() const { return adr_method(); }
...@@ -273,6 +274,8 @@ public: ...@@ -273,6 +274,8 @@ public:
oop* adr_method() const { return (oop*)&_method; } oop* adr_method() const { return (oop*)&_method; }
oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; }
oop* adr_exception_table() const { return (oop*)&_exception_table; } oop* adr_exception_table() const { return (oop*)&_exception_table; }
bool is_conc_safe() { return _is_conc_safe; }
void set_is_conc_safe(bool v) { _is_conc_safe = v; }
// Unique id for the method // Unique id for the method
static const u2 MAX_IDNUM; static const u2 MAX_IDNUM;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
# include "incls/_precompiled.incl" # include "incls/_precompiled.incl"
# include "incls/_constantPoolKlass.cpp.incl" # include "incls/_constantPoolKlass.cpp.incl"
constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { constantPoolOop constantPoolKlass::allocate(int length, bool is_conc_safe, TRAPS) {
int size = constantPoolOopDesc::object_size(length); int size = constantPoolOopDesc::object_size(length);
KlassHandle klass (THREAD, as_klassOop()); KlassHandle klass (THREAD, as_klassOop());
constantPoolOop c = constantPoolOop c =
...@@ -38,6 +38,9 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { ...@@ -38,6 +38,9 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) {
c->set_flags(0); c->set_flags(0);
// only set to non-zero if constant pool is merged by RedefineClasses // only set to non-zero if constant pool is merged by RedefineClasses
c->set_orig_length(0); c->set_orig_length(0);
// if constant pool may change during RedefineClasses, it is created
// unsafe for GC concurrent processing.
c->set_is_conc_safe(is_conc_safe);
// all fields are initialized; needed for GC // all fields are initialized; needed for GC
// initialize tag array // initialize tag array
...@@ -207,6 +210,11 @@ int constantPoolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) ...@@ -207,6 +210,11 @@ int constantPoolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr)
return size; return size;
} }
bool constantPoolKlass::oop_is_conc_safe(oop obj) const {
assert(obj->is_constantPool(), "must be constantPool");
return constantPoolOop(obj)->is_conc_safe();
}
#ifndef SERIALGC #ifndef SERIALGC
int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert (obj->is_constantPool(), "obj must be constant pool"); assert (obj->is_constantPool(), "obj must be constant pool");
......
...@@ -34,7 +34,7 @@ class constantPoolKlass : public Klass { ...@@ -34,7 +34,7 @@ class constantPoolKlass : public Klass {
// Allocation // Allocation
DEFINE_ALLOCATE_PERMANENT(constantPoolKlass); DEFINE_ALLOCATE_PERMANENT(constantPoolKlass);
constantPoolOop allocate(int length, TRAPS); constantPoolOop allocate(int length, bool is_conc_safe, TRAPS);
static klassOop create_klass(TRAPS); static klassOop create_klass(TRAPS);
// Casting from klassOop // Casting from klassOop
...@@ -48,6 +48,8 @@ class constantPoolKlass : public Klass { ...@@ -48,6 +48,8 @@ class constantPoolKlass : public Klass {
int object_size() const { return align_object_size(header_size()); } int object_size() const { return align_object_size(header_size()); }
// Garbage collection // Garbage collection
// Returns true is the object is safe for GC concurrent processing.
virtual bool oop_is_conc_safe(oop obj) const;
void oop_follow_contents(oop obj); void oop_follow_contents(oop obj);
int oop_adjust_pointers(oop obj); int oop_adjust_pointers(oop obj);
......
...@@ -43,6 +43,8 @@ class constantPoolOopDesc : public oopDesc { ...@@ -43,6 +43,8 @@ class constantPoolOopDesc : public oopDesc {
klassOop _pool_holder; // the corresponding class klassOop _pool_holder; // the corresponding class
int _flags; // a few header bits to describe contents for GC int _flags; // a few header bits to describe contents for GC
int _length; // number of elements in the array int _length; // number of elements in the array
volatile bool _is_conc_safe; // if true, safe for concurrent
// GC processing
// only set to non-zero if constant pool is merged by RedefineClasses // only set to non-zero if constant pool is merged by RedefineClasses
int _orig_length; int _orig_length;
...@@ -379,6 +381,9 @@ class constantPoolOopDesc : public oopDesc { ...@@ -379,6 +381,9 @@ class constantPoolOopDesc : public oopDesc {
static int object_size(int length) { return align_object_size(header_size() + length); } static int object_size(int length) { return align_object_size(header_size() + length); }
int object_size() { return object_size(length()); } int object_size() { return object_size(length()); }
bool is_conc_safe() { return _is_conc_safe; }
void set_is_conc_safe(bool v) { _is_conc_safe = v; }
friend class constantPoolKlass; friend class constantPoolKlass;
friend class ClassFileParser; friend class ClassFileParser;
friend class SystemDictionary; friend class SystemDictionary;
......
...@@ -606,8 +606,19 @@ class Klass : public Klass_vtbl { ...@@ -606,8 +606,19 @@ class Klass : public Klass_vtbl {
#undef assert_same_query #undef assert_same_query
// Unless overridden, oop is parsable if it has a klass pointer. // Unless overridden, oop is parsable if it has a klass pointer.
// Parsability of an object is object specific.
virtual bool oop_is_parsable(oop obj) const { return true; } virtual bool oop_is_parsable(oop obj) const { return true; }
// Unless overridden, oop is safe for concurrent GC processing
// after its allocation is complete. The exception to
// this is the case where objects are changed after allocation.
// Class redefinition is one of the known exceptions. During
// class redefinition, an allocated class can changed in order
// order to create a merged class (the combiniation of the
// old class definition that has to be perserved and the new class
// definition which is being created.
virtual bool oop_is_conc_safe(oop obj) const { return true; }
// Access flags // Access flags
AccessFlags access_flags() const { return _access_flags; } AccessFlags access_flags() const { return _access_flags; }
void set_access_flags(AccessFlags flags) { _access_flags = flags; } void set_access_flags(AccessFlags flags) { _access_flags = flags; }
......
...@@ -792,15 +792,34 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod ...@@ -792,15 +792,34 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
AccessFlags flags = m->access_flags(); AccessFlags flags = m->access_flags();
int checked_exceptions_len = m->checked_exceptions_length(); int checked_exceptions_len = m->checked_exceptions_length();
int localvariable_len = m->localvariable_table_length(); int localvariable_len = m->localvariable_table_length();
methodOop newm_oop = oopFactory::new_method(new_code_length, flags, new_compressed_linenumber_size, localvariable_len, checked_exceptions_len, CHECK_(methodHandle())); // Allocate newm_oop with the is_conc_safe parameter set
// to IsUnsafeConc to indicate that newm_oop is not yet
// safe for concurrent processing by a GC.
methodOop newm_oop = oopFactory::new_method(new_code_length,
flags,
new_compressed_linenumber_size,
localvariable_len,
checked_exceptions_len,
IsUnsafeConc,
CHECK_(methodHandle()));
methodHandle newm (THREAD, newm_oop); methodHandle newm (THREAD, newm_oop);
int new_method_size = newm->method_size(); int new_method_size = newm->method_size();
// Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop // Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop
constMethodOop newcm = newm->constMethod(); constMethodOop newcm = newm->constMethod();
int new_const_method_size = newm->constMethod()->object_size(); int new_const_method_size = newm->constMethod()->object_size();
memcpy(newm(), m(), sizeof(methodOopDesc)); memcpy(newm(), m(), sizeof(methodOopDesc));
// Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop // Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop
// is_conc_safe is set to false because that is the value of
// is_conc_safe initialzied into newcm and the copy should
// not overwrite that value. During the window during which it is
// tagged as unsafe, some extra work could be needed during precleaning
// or concurrent marking but those phases will be correct. Setting and
// resetting is done in preference to a careful copying into newcm to
// avoid having to know the precise layout of a constMethodOop.
m->constMethod()->set_is_conc_safe(false);
memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc)); memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc));
m->constMethod()->set_is_conc_safe(true);
// Reset correct method/const method, method size, and parameter info // Reset correct method/const method, method size, and parameter info
newcm->set_method(newm()); newcm->set_method(newm());
newm->set_constMethod(newcm); newm->set_constMethod(newcm);
...@@ -831,6 +850,10 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod ...@@ -831,6 +850,10 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
m->localvariable_table_start(), m->localvariable_table_start(),
localvariable_len * sizeof(LocalVariableTableElement)); localvariable_len * sizeof(LocalVariableTableElement));
} }
// Only set is_conc_safe to true when changes to newcm are
// complete.
newcm->set_is_conc_safe(true);
return newm; return newm;
} }
......
...@@ -129,6 +129,10 @@ class methodOopDesc : public oopDesc { ...@@ -129,6 +129,10 @@ class methodOopDesc : public oopDesc {
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
public: public:
static const bool IsUnsafeConc = false;
static const bool IsSafeConc = true;
// accessors for instance variables // accessors for instance variables
constMethodOop constMethod() const { return _constMethod; } constMethodOop constMethod() const { return _constMethod; }
void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); } void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); }
......
...@@ -108,6 +108,13 @@ class oopDesc { ...@@ -108,6 +108,13 @@ class oopDesc {
// installation of their klass pointer. // installation of their klass pointer.
bool is_parsable(); bool is_parsable();
// Some perm gen objects that have been allocated and initialized
// can be changed by the VM when not at a safe point (class rededfinition
// is an example). Such objects should not be examined by the
// concurrent processing of a garbage collector if is_conc_safe()
// returns false.
bool is_conc_safe();
// type test operations (inlined in oop.inline.h) // type test operations (inlined in oop.inline.h)
bool is_instance() const; bool is_instance() const;
bool is_instanceRef() const; bool is_instanceRef() const;
......
...@@ -435,6 +435,10 @@ inline bool oopDesc::is_parsable() { ...@@ -435,6 +435,10 @@ inline bool oopDesc::is_parsable() {
return blueprint()->oop_is_parsable(this); return blueprint()->oop_is_parsable(this);
} }
inline bool oopDesc::is_conc_safe() {
return blueprint()->oop_is_conc_safe(this);
}
inline void update_barrier_set(void* p, oop v) { inline void update_barrier_set(void* p, oop v) {
assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
oopDesc::bs()->write_ref_field(p, v); oopDesc::bs()->write_ref_field(p, v);
......
...@@ -1230,8 +1230,14 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( ...@@ -1230,8 +1230,14 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// Constant pools are not easily reused so we allocate a new one // Constant pools are not easily reused so we allocate a new one
// each time. // each time.
// merge_cp is created unsafe for concurrent GC processing. It
// should be marked safe before discarding it because, even if
// garbage. If it crosses a card boundary, it may be scanned
// in order to find the start of the first complete object on the card.
constantPoolHandle merge_cp(THREAD, constantPoolHandle merge_cp(THREAD,
oopFactory::new_constantPool(merge_cp_length, THREAD)); oopFactory::new_constantPool(merge_cp_length,
methodOopDesc::IsUnsafeConc,
THREAD));
int orig_length = old_cp->orig_length(); int orig_length = old_cp->orig_length();
if (orig_length == 0) { if (orig_length == 0) {
// This old_cp is an actual original constant pool. We save // This old_cp is an actual original constant pool. We save
...@@ -1274,6 +1280,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( ...@@ -1274,6 +1280,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// rewriting so we can't use the old constant pool with the new // rewriting so we can't use the old constant pool with the new
// class. // class.
merge_cp()->set_is_conc_safe(true);
merge_cp = constantPoolHandle(); // toss the merged constant pool merge_cp = constantPoolHandle(); // toss the merged constant pool
} else if (old_cp->length() < scratch_cp->length()) { } else if (old_cp->length() < scratch_cp->length()) {
// The old constant pool has fewer entries than the new constant // The old constant pool has fewer entries than the new constant
...@@ -1283,6 +1290,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( ...@@ -1283,6 +1290,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// rewriting so we can't use the new constant pool with the old // rewriting so we can't use the new constant pool with the old
// class. // class.
merge_cp()->set_is_conc_safe(true);
merge_cp = constantPoolHandle(); // toss the merged constant pool merge_cp = constantPoolHandle(); // toss the merged constant pool
} else { } else {
// The old constant pool has more entries than the new constant // The old constant pool has more entries than the new constant
...@@ -1296,6 +1304,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( ...@@ -1296,6 +1304,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true,
THREAD); THREAD);
// drop local ref to the merged constant pool // drop local ref to the merged constant pool
merge_cp()->set_is_conc_safe(true);
merge_cp = constantPoolHandle(); merge_cp = constantPoolHandle();
} }
} else { } else {
...@@ -1325,7 +1334,10 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( ...@@ -1325,7 +1334,10 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// GCed. // GCed.
set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true,
THREAD); THREAD);
merge_cp()->set_is_conc_safe(true);
} }
assert(old_cp()->is_conc_safe(), "Just checking");
assert(scratch_cp()->is_conc_safe(), "Just checking");
return JVMTI_ERROR_NONE; return JVMTI_ERROR_NONE;
} // end merge_cp_and_rewrite() } // end merge_cp_and_rewrite()
...@@ -2314,13 +2326,16 @@ void VM_RedefineClasses::set_new_constant_pool( ...@@ -2314,13 +2326,16 @@ void VM_RedefineClasses::set_new_constant_pool(
// worst case merge situation. We want to associate the minimum // worst case merge situation. We want to associate the minimum
// sized constant pool with the klass to save space. // sized constant pool with the klass to save space.
constantPoolHandle smaller_cp(THREAD, constantPoolHandle smaller_cp(THREAD,
oopFactory::new_constantPool(scratch_cp_length, THREAD)); oopFactory::new_constantPool(scratch_cp_length,
methodOopDesc::IsUnsafeConc,
THREAD));
// preserve orig_length() value in the smaller copy // preserve orig_length() value in the smaller copy
int orig_length = scratch_cp->orig_length(); int orig_length = scratch_cp->orig_length();
assert(orig_length != 0, "sanity check"); assert(orig_length != 0, "sanity check");
smaller_cp->set_orig_length(orig_length); smaller_cp->set_orig_length(orig_length);
scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD);
scratch_cp = smaller_cp; scratch_cp = smaller_cp;
smaller_cp()->set_is_conc_safe(true);
} }
// attach new constant pool to klass // attach new constant pool to klass
...@@ -2516,6 +2531,7 @@ void VM_RedefineClasses::set_new_constant_pool( ...@@ -2516,6 +2531,7 @@ void VM_RedefineClasses::set_new_constant_pool(
rewrite_cp_refs_in_stack_map_table(method, THREAD); rewrite_cp_refs_in_stack_map_table(method, THREAD);
} // end for each method } // end for each method
assert(scratch_cp()->is_conc_safe(), "Just checking");
} // end set_new_constant_pool() } // end set_new_constant_pool()
......
...@@ -1320,6 +1320,9 @@ class VM_HeapIterateOperation: public VM_Operation { ...@@ -1320,6 +1320,9 @@ class VM_HeapIterateOperation: public VM_Operation {
} }
// do the iteration // do the iteration
// If this operation encounters a bad object when using CMS,
// consider using safe_object_iterate() which avoids perm gen
// objects that may contain bad references.
Universe::heap()->object_iterate(_blk); Universe::heap()->object_iterate(_blk);
// when sharing is enabled we must iterate over the shared spaces // when sharing is enabled we must iterate over the shared spaces
......
...@@ -1700,7 +1700,7 @@ void VM_HeapDumper::doit() { ...@@ -1700,7 +1700,7 @@ void VM_HeapDumper::doit() {
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump. // of the heap dump.
HeapObjectDumper obj_dumper(this, writer()); HeapObjectDumper obj_dumper(this, writer());
Universe::heap()->object_iterate(&obj_dumper); Universe::heap()->safe_object_iterate(&obj_dumper);
// HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
do_threads(); do_threads();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册