diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp index 703217f9255305c4678ab0fd96062833c71f73f1..4f8ec88f8668223189e6ef7b702e1602458af80d 100644 --- a/src/share/vm/classfile/classFileParser.cpp +++ b/src/share/vm/classfile/classFileParser.cpp @@ -232,7 +232,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { length >= 1, "Illegal constant pool size %u in class file %s", length, CHECK_(nullHandle)); constantPoolOop constant_pool = - oopFactory::new_constantPool(length, CHECK_(nullHandle)); + oopFactory::new_constantPool(length, + methodOopDesc::IsSafeConc, + CHECK_(nullHandle)); constantPoolHandle cp (THREAD, constant_pool); cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops @@ -1675,7 +1677,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf // All sizing information for a methodOop is finally available, now create it methodOop m_oop = oopFactory::new_method( code_length, access_flags, linenumber_table_length, - total_lvt_length, checked_exceptions_length, CHECK_(nullHandle)); + total_lvt_length, checked_exceptions_length, + methodOopDesc::IsSafeConc, CHECK_(nullHandle)); methodHandle m (THREAD, m_oop); ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize); diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp index 4b1fecd7cb38d6789a55147a942e16e357227a05..6c6272a2f847dd0d71bdc18603dab6d14812486f 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @@ -706,6 +706,30 @@ void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { } } +// Apply the given closure to each live object in the space +// The usage of CompactibleFreeListSpace +// by the ConcurrentMarkSweepGeneration for concurrent GC's allows +// objects in the space with references to objects that are no longer +// valid. For example, an object may reference another object +// that has already been sweep up (collected). This method uses +// obj_is_alive() to determine whether it is safe to apply the closure to +// an object. See obj_is_alive() for details on how liveness of an +// object is decided. + +void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { + assert_lock_strong(freelistLock()); + NOT_PRODUCT(verify_objects_initialized()); + HeapWord *cur, *limit; + size_t curSize; + for (cur = bottom(), limit = end(); cur < limit; + cur += curSize) { + curSize = block_size(cur); + if (block_is_obj(cur) && obj_is_alive(cur)) { + blk->do_object(oop(cur)); + } + } +} + void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { assert_locked(); @@ -861,7 +885,9 @@ const { } else { // must read from what 'p' points to in each loop. klassOop k = ((volatile oopDesc*)p)->klass_or_null(); - if (k != NULL && ((oopDesc*)p)->is_parsable()) { + if (k != NULL && + ((oopDesc*)p)->is_parsable() && + ((oopDesc*)p)->is_conc_safe()) { assert(k->is_oop(), "Should really be klass oop."); oop o = (oop)p; assert(o->is_oop(), "Should be an oop"); diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp index 5306a8f3085db3446be1163e3f903729bea08932..e0c48a1b79a86bbd03bb9facc695dce6dabed6f3 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @@ -481,6 +481,15 @@ class CompactibleFreeListSpace: public CompactibleSpace { void oop_iterate(OopClosure* cl); void object_iterate(ObjectClosure* blk); + // Apply the closure to each object in the space whose references + // point to objects in the heap. The usage of CompactibleFreeListSpace + // by the ConcurrentMarkSweepGeneration for concurrent GC's allows + // objects in the space with references to objects that are no longer + // valid. For example, an object may reference another object + // that has already been sweep up (collected). This method uses + // obj_is_alive() to determine whether it is safe to iterate of + // an object. + void safe_object_iterate(ObjectClosure* blk); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // Requires that "mr" be entirely within the space. diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index e83441076594d11a6ed8746ef4e714d193044c91..fb10d1c153b1af1365713b3101747f33901ee281 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -3017,6 +3017,16 @@ ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { } } +void +ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { + if (freelistLock()->owned_by_self()) { + Generation::safe_object_iterate(cl); + } else { + MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); + Generation::safe_object_iterate(cl); + } +} + void ConcurrentMarkSweepGeneration::pre_adjust_pointers() { } @@ -6623,7 +6633,11 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( if (_bitMap->isMarked(addr)) { // it's marked; is it potentially uninitialized? if (p->klass_or_null() != NULL) { - if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { + // If is_conc_safe is false, the object may be undergoing + // change by the VM outside a safepoint. Don't try to + // scan it, but rather leave it for the remark phase. + if (CMSPermGenPrecleaningEnabled && + (!p->is_conc_safe() || !p->is_parsable())) { // Signal precleaning to redirty the card since // the klass pointer is already installed. assert(size == 0, "Initial value"); @@ -7001,7 +7015,6 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { _mut->clear_range(mr); } DEBUG_ONLY(}) - // Note: the finger doesn't advance while we drain // the stack below. PushOrMarkClosure pushOrMarkClosure(_collector, @@ -8062,9 +8075,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { #ifdef DEBUG if (oop(addr)->klass_or_null() != NULL && ( !_collector->should_unload_classes() - || oop(addr)->is_parsable())) { + || (oop(addr)->is_parsable()) && + oop(addr)->is_conc_safe())) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); + // is_conc_safe is checked before performing this assertion + // because an object that is not is_conc_safe may yet have + // the return from size() correct. assert(size == CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), "P-mark and computed size do not agree"); @@ -8077,6 +8094,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { (!_collector->should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); + // Note that there are objects used during class redefinition + // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() + // which are discarded with their is_conc_safe state still + // false. These object may be floating garbage so may be + // seen here. If they are floating garbage their size + // should be attainable from their klass. Do not that + // is_conc_safe() is true for oop(addr). // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); // Verify that the bit map has no bits marked between diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index 44ef14e8db329ec37f41f35e4f33c8a494c65046..dacf5873a7faedf296851e6ed08bdba4eac04384 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1212,6 +1212,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { // More iteration support virtual void oop_iterate(MemRegion mr, OopClosure* cl); virtual void oop_iterate(OopClosure* cl); + virtual void safe_object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl); // Need to declare the full complement of closures, whether we'll diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 3aa19ccb85d7e756af270b44df3ceb4177208403..ba3244ef7b757821910e4aaf6cf13c7add61b0c3 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1285,7 +1285,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : _unclean_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), - _surviving_young_words(NULL) + _surviving_young_words(NULL), + _in_cset_fast_test(NULL), + _in_cset_fast_test_base(NULL) { _g1h = this; // To catch bugs. if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { @@ -2485,6 +2487,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { g1_policy()->record_collection_pause_start(start_time_sec, start_used_bytes); + guarantee(_in_cset_fast_test == NULL, "invariant"); + guarantee(_in_cset_fast_test_base == NULL, "invariant"); + _in_cset_fast_test_length = n_regions(); + _in_cset_fast_test_base = + NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); + memset(_in_cset_fast_test_base, false, + _in_cset_fast_test_length * sizeof(bool)); + // We're biasing _in_cset_fast_test to avoid subtracting the + // beginning of the heap every time we want to index; basically + // it's the same with what we do with the card table. + _in_cset_fast_test = _in_cset_fast_test_base - + ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); + #if SCAN_ONLY_VERBOSE _young_list->print(); #endif // SCAN_ONLY_VERBOSE @@ -2553,6 +2568,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { free_collection_set(g1_policy()->collection_set()); g1_policy()->clear_collection_set(); + FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); + // this is more for peace of mind; we're nulling them here and + // we're expecting them to be null at the beginning of the next GC + _in_cset_fast_test = NULL; + _in_cset_fast_test_base = NULL; + if (popular_region != NULL) { // We have to wait until now, because we don't want the region to // be rescheduled for pop-evac during RS update. @@ -3560,6 +3581,9 @@ public: size_t undo_waste() { return _undo_waste; } void push_on_queue(oop* ref) { + assert(ref != NULL, "invariant"); + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); + if (!refs()->push(ref)) { overflowed_refs()->push(ref); IF_G1_DETAILED_STATS(note_overflow_push()); @@ -3572,6 +3596,10 @@ public: if (!refs()->pop_local(ref)) { ref = NULL; } else { + assert(ref != NULL, "invariant"); + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), + "invariant"); + IF_G1_DETAILED_STATS(note_pop()); } } @@ -3601,8 +3629,7 @@ public: obj = alloc_buf->allocate(word_sz); assert(obj != NULL, "buffer was definitely big enough..."); - } - else { + } else { obj = _g1h->par_allocate_during_gc(purpose, word_sz); } return obj; @@ -3695,24 +3722,57 @@ public: } } +private: + void deal_with_reference(oop* ref_to_scan) { + if (has_partial_array_mask(ref_to_scan)) { + _partial_scan_cl->do_oop_nv(ref_to_scan); + } else { + // Note: we can use "raw" versions of "region_containing" because + // "obj_to_scan" is definitely in the heap, and is not in a + // humongous region. + HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); + _evac_cl->set_region(r); + _evac_cl->do_oop_nv(ref_to_scan); + } + } + +public: void trim_queue() { + // I've replicated the loop twice, first to drain the overflow + // queue, second to drain the task queue. This is better than + // having a single loop, which checks both conditions and, inside + // it, either pops the overflow queue or the task queue, as each + // loop is tighter. Also, the decision to drain the overflow queue + // first is not arbitrary, as the overflow queue is not visible + // to the other workers, whereas the task queue is. So, we want to + // drain the "invisible" entries first, while allowing the other + // workers to potentially steal the "visible" entries. + while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { - oop *ref_to_scan = NULL; - if (overflowed_refs_to_scan() == 0) { - pop_from_queue(ref_to_scan); - } else { + while (overflowed_refs_to_scan() > 0) { + oop *ref_to_scan = NULL; pop_from_overflow_queue(ref_to_scan); + assert(ref_to_scan != NULL, "invariant"); + // We shouldn't have pushed it on the queue if it was not + // pointing into the CSet. + assert(ref_to_scan != NULL, "sanity"); + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "sanity"); + + deal_with_reference(ref_to_scan); } - if (ref_to_scan != NULL) { - if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) { - _partial_scan_cl->do_oop_nv(ref_to_scan); - } else { - // Note: we can use "raw" versions of "region_containing" because - // "obj_to_scan" is definitely in the heap, and is not in a - // humongous region. - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); - _evac_cl->set_region(r); - _evac_cl->do_oop_nv(ref_to_scan); + + while (refs_to_scan() > 0) { + oop *ref_to_scan = NULL; + pop_from_queue(ref_to_scan); + + if (ref_to_scan != NULL) { + // We shouldn't have pushed it on the queue if it was not + // pointing into the CSet. + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "sanity"); + + deal_with_reference(ref_to_scan); } } } @@ -3728,16 +3788,25 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* // Should probably be made inline and moved in g1OopClosures.inline.hpp. void G1ParScanClosure::do_oop_nv(oop* p) { oop obj = *p; + if (obj != NULL) { - if (_g1->obj_in_cs(obj)) { - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - _par_scan_state->push_on_queue(p); - return; - } + if (_g1->in_cset_fast_test(obj)) { + // We're not going to even bother checking whether the object is + // already forwarded or not, as this usually causes an immediate + // stall. We'll try to prefetch the object (for write, given that + // we might need to install the forwarding reference) and we'll + // get back to it when pop it from the queue + Prefetch::write(obj->mark_addr(), 0); + Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); + + // slightly paranoid test; I'm trying to catch potential + // problems before we go into push_on_queue to know where the + // problem is coming from + assert(obj == *p, "the value of *p should not have changed"); + _par_scan_state->push_on_queue(p); + } else { + _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); } - _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); } } @@ -3777,13 +3846,36 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) { return _g1->handle_evacuation_failure_par(cl, old); } + // We're going to allocate linearly, so might as well prefetch ahead. + Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); + oop forward_ptr = old->forward_to_atomic(obj); if (forward_ptr == NULL) { Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); - obj->set_mark(m); if (g1p->track_object_age(alloc_purpose)) { - obj->incr_age(); + // We could simply do obj->incr_age(). However, this causes a + // performance issue. obj->incr_age() will first check whether + // the object has a displaced mark by checking its mark word; + // getting the mark word from the new location of the object + // stalls. So, given that we already have the mark word and we + // are about to install it anyway, it's better to increase the + // age on the mark word, when the object does not have a + // displaced mark word. We're not expecting many objects to have + // a displaced marked word, so that case is not optimized + // further (it could be...) and we simply call obj->incr_age(). + + if (m->has_displaced_mark_helper()) { + // in this case, we have to install the mark word first, + // otherwise obj looks to be forwarded (the old mark word, + // which contains the forward pointer, was copied) + obj->set_mark(m); + obj->incr_age(); + } else { + m = m->incr_age(); + } } + obj->set_mark(m); + // preserve "next" mark bit if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { if (!use_local_bitmaps || @@ -3805,9 +3897,11 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) { if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { arrayOop(old)->set_length(0); - _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK)); + _par_scan_state->push_on_queue(set_partial_array_mask(old)); } else { - _scanner->set_region(_g1->heap_region_containing(obj)); + // No point in using the slower heap_region_containing() method, + // given that we know obj is in the heap. + _scanner->set_region(_g1->heap_region_containing_raw(obj)); obj->oop_iterate_backwards(_scanner); } } else { @@ -3817,47 +3911,55 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) { return obj; } -template -void G1ParCopyClosure::do_oop_work(oop* p) { +template +void G1ParCopyClosure::do_oop_work(oop* p) { oop obj = *p; assert(barrier != G1BarrierRS || obj != NULL, "Precondition: G1BarrierRS implies obj is nonNull"); - if (obj != NULL) { - if (_g1->obj_in_cs(obj)) { + // The only time we skip the cset test is when we're scanning + // references popped from the queue. And we only push on the queue + // references that we know point into the cset, so no point in + // checking again. But we'll leave an assert here for peace of mind. + assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); + + // here the null check is implicit in the cset_fast_test() test + if (skip_cset_test || _g1->in_cset_fast_test(obj)) { #if G1_REM_SET_LOGGING - gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.", - p, (void*) obj); + gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " + "into CS.", p, (void*) obj); #endif - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - *p = copy_to_survivor_space(obj); - } - // When scanning the RS, we only care about objs in CS. - if (barrier == G1BarrierRS) { - _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); - } + if (obj->is_forwarded()) { + *p = obj->forwardee(); + } else { + *p = copy_to_survivor_space(obj); } - // When scanning moved objs, must look at all oops. - if (barrier == G1BarrierEvac) { + // When scanning the RS, we only care about objs in CS. + if (barrier == G1BarrierRS) { _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); } + } - if (do_gen_barrier) { - par_do_barrier(p); - } + // When scanning moved objs, must look at all oops. + if (barrier == G1BarrierEvac && obj != NULL) { + _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); + } + + if (do_gen_barrier && obj != NULL) { + par_do_barrier(p); } } -template void G1ParCopyClosure::do_oop_work(oop* p); +template void G1ParCopyClosure::do_oop_work(oop* p); -template void G1ParScanPartialArrayClosure::process_array_chunk( +template void G1ParScanPartialArrayClosure::process_array_chunk( oop obj, int start, int end) { // process our set of indices (include header in first chunk) assert(start < end, "invariant"); T* const base = (T*)objArrayOop(obj)->base(); - T* const start_addr = base + start; + T* const start_addr = (start == 0) ? (T*) obj : base + start; T* const end_addr = base + end; MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); _scanner.set_region(_g1->heap_region_containing(obj)); @@ -3866,7 +3968,8 @@ template void G1ParScanPartialArrayClosure::process_array_chunk( void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); - oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK); + assert(has_partial_array_mask(p), "invariant"); + oop old = clear_partial_array_mask(p); assert(old->is_objArray(), "must be obj array"); assert(old->is_forwarded(), "must be forwarded"); assert(Universe::heap()->is_in_reserved(old), "must be in heap."); @@ -3884,7 +3987,7 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { end = start + ParGCArrayScanChunk; arrayOop(old)->set_length(end); // Push remainder. - _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK)); + _par_scan_state->push_on_queue(set_partial_array_mask(old)); } else { // Restore length so that the heap remains parsable in // case of evacuation failure. @@ -3893,11 +3996,6 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { // process our set of indices (include header in first chunk) process_array_chunk(obj, start, end); - oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start); - oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length - MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); - _scanner.set_region(_g1->heap_region_containing(obj)); - obj->oop_iterate(&_scanner, mr); } int G1ScanAndBalanceClosure::_nq = 0; @@ -3931,6 +4029,13 @@ public: pss->hash_seed(), ref_to_scan)) { IF_G1_DETAILED_STATS(pss->note_steal()); + + // slightly paranoid tests; I'm trying to catch potential + // problems before we go into push_on_queue to know where the + // problem is coming from + assert(ref_to_scan != NULL, "invariant"); + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "invariant"); pss->push_on_queue(ref_to_scan); continue; } @@ -3976,10 +4081,10 @@ public: ResourceMark rm; HandleMark hm; - G1ParScanThreadState pss(_g1h, i); - G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); - G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss); - G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); + G1ParScanThreadState pss(_g1h, i); + G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); + G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); + G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); pss.set_evac_closure(&scan_evac_cl); pss.set_evac_failure_closure(&evac_failure_cl); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index e83b1a7de1c70f4dec4ac878a1b5d24850eaf95b..738b4be04ec04ff69ebc71525c321452197951eb 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -247,6 +247,27 @@ private: NumberSeq _pop_obj_rc_at_copy; void print_popularity_summary_info() const; + // This is used for a quick test on whether a reference points into + // the collection set or not. Basically, we have an array, with one + // byte per region, and that byte denotes whether the corresponding + // region is in the collection set or not. The entry corresponding + // the bottom of the heap, i.e., region 0, is pointed to by + // _in_cset_fast_test_base. The _in_cset_fast_test field has been + // biased so that it actually points to address 0 of the address + // space, to make the test as fast as possible (we can simply shift + // the address to address into it, instead of having to subtract the + // bottom of the heap from the address before shifting it; basically + // it works in the same way the card table works). + bool* _in_cset_fast_test; + + // The allocated array used for the fast test on whether a reference + // points into the collection set or not. This field is also used to + // free the array. + bool* _in_cset_fast_test_base; + + // The length of the _in_cset_fast_test_base array. + size_t _in_cset_fast_test_length; + volatile unsigned _gc_time_stamp; size_t* _surviving_young_words; @@ -368,6 +389,38 @@ public: virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); + // We register a region with the fast "in collection set" test. We + // simply set to true the array slot corresponding to this region. + void register_region_with_in_cset_fast_test(HeapRegion* r) { + assert(_in_cset_fast_test_base != NULL, "sanity"); + assert(r->in_collection_set(), "invariant"); + int index = r->hrs_index(); + assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, + "invariant"); + assert(!_in_cset_fast_test_base[index], "invariant"); + _in_cset_fast_test_base[index] = true; + } + + // This is a fast test on whether a reference points into the + // collection set or not. It does not assume that the reference + // points into the heap; if it doesn't, it will return false. + bool in_cset_fast_test(oop obj) { + assert(_in_cset_fast_test != NULL, "sanity"); + if (_g1_committed.contains((HeapWord*) obj)) { + // no need to subtract the bottom of the heap from obj, + // _in_cset_fast_test is biased + size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; + bool ret = _in_cset_fast_test[index]; + // let's make sure the result is consistent with what the slower + // test returns + assert( ret || !obj_in_cs(obj), "sanity"); + assert(!ret || obj_in_cs(obj), "sanity"); + return ret; + } else { + return false; + } + } + protected: // Shrink the garbage-first heap by at most the given size (in bytes!). @@ -850,6 +903,7 @@ public: // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); + virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } // Iterate over all objects allocated since the last collection, calling // "cl.do_object" on each. The heap must have been initialized properly diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp index 8cafe3d98850486338d58927e81f0adb97707638..4d88ee4cbb60ed0deab6c266124fe2d50b918df7 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @@ -36,8 +36,11 @@ G1CollectedHeap::heap_region_containing(const void* addr) const { inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const void* addr) const { - HeapRegion* res = _hrs->addr_to_region(addr); - assert(res != NULL, "addr outside of heap?"); + assert(_g1_reserved.contains(addr), "invariant"); + size_t index = ((intptr_t) addr - (intptr_t) _g1_reserved.start()) + >> HeapRegion::LogOfHRGrainBytes; + HeapRegion* res = _hrs->at(index); + assert(res == _hrs->addr_to_region(addr), "sanity"); return res; } diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index a5d0165bb4d779b5ec0d45f42b3fd50dc2f4c29e..e467e4d807088ce4e9794123a7d63fd59a7c0090 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -2985,6 +2985,7 @@ add_to_collection_set(HeapRegion* hr) { _collection_set = hr; _collection_set_size++; _collection_set_bytes_used_before += hr->used(); + _g1->register_region_with_in_cset_fast_test(hr); } void diff --git a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp index 58653196a3608064b4ac4d3bef379363e8548eaa..2791bf6ee6bce230690930f5429780476dfefece 100644 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp @@ -77,6 +77,18 @@ public: #define G1_PARTIAL_ARRAY_MASK 1 +inline bool has_partial_array_mask(oop* ref) { + return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK; +} + +inline oop* set_partial_array_mask(oop obj) { + return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK); +} + +inline oop clear_partial_array_mask(oop* ref) { + return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK); +} + class G1ParScanPartialArrayClosure : public G1ParClosureSuper { G1ParScanClosure _scanner; template void process_array_chunk(oop obj, int start, int end); @@ -101,7 +113,8 @@ public: G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { } }; -template +template class G1ParCopyClosure : public G1ParCopyHelper { G1ParScanClosure _scanner; void do_oop_work(oop* p); @@ -119,14 +132,22 @@ public: virtual void do_oop(narrowOop* p) { do_oop_nv(p); } }; -typedef G1ParCopyClosure G1ParScanExtRootClosure; -typedef G1ParCopyClosure G1ParScanPermClosure; -typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; -typedef G1ParCopyClosure G1ParScanAndMarkPermClosure; -typedef G1ParCopyClosure G1ParScanHeapRSClosure; -typedef G1ParCopyClosure G1ParScanAndMarkHeapRSClosure; -typedef G1ParCopyClosure G1ParScanHeapEvacClosure; - +typedef G1ParCopyClosure G1ParScanExtRootClosure; +typedef G1ParCopyClosure G1ParScanPermClosure; +typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; +typedef G1ParCopyClosure G1ParScanAndMarkPermClosure; +typedef G1ParCopyClosure G1ParScanHeapRSClosure; +typedef G1ParCopyClosure G1ParScanAndMarkHeapRSClosure; +// This is the only case when we set skip_cset_test. Basically, this +// closure is (should?) only be called directly while we're draining +// the overflow and task queues. In that case we know that the +// reference in question points into the collection set, otherwise we +// would not have pushed it on the queue. +typedef G1ParCopyClosure G1ParScanHeapEvacClosure; +// We need a separate closure to handle references during evacuation +// failure processing, as it cannot asume that the reference already + // points to the collection set (like G1ParScanHeapEvacClosure does). +typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; class FilterIntoCSClosure: public OopClosure { G1CollectedHeap* _g1; diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index 72a684812d5bc5b2d1c4b75a4364e59a247d3e3a..e7e9d7363d61b0287453bf56fdaf13801c94b290 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -28,7 +28,7 @@ #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ \ - product(intx, ParallelGCG1AllocBufferSize, 4*K, \ + product(intx, ParallelGCG1AllocBufferSize, 8*K, \ "Size of parallel G1 allocation buffers in to-space.") \ \ product(intx, G1TimeSliceMS, 500, \ diff --git a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp index 39f72a616b7748e8f2c6b66dc63357461bfbd543..4cfb76464e44192cdbea73fa1307b593f89edb95 100644 --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp @@ -32,11 +32,13 @@ enum G1Barrier { G1BarrierNone, G1BarrierRS, G1BarrierEvac }; -template +template class G1ParCopyClosure; class G1ParScanClosure; -typedef G1ParCopyClosure G1ParScanHeapEvacClosure; +typedef G1ParCopyClosure + G1ParScanHeapEvacClosure; class FilterIntoCSClosure; class FilterOutOfRegionClosure; diff --git a/src/share/vm/gc_implementation/includeDB_gc_g1 b/src/share/vm/gc_implementation/includeDB_gc_g1 index 8aeaf9e5e40ab828d07b26a9c0148fd1f0fd174b..375154d89a568a8624b421aeb94e88142347bd4b 100644 --- a/src/share/vm/gc_implementation/includeDB_gc_g1 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1 @@ -31,7 +31,7 @@ bufferingOopClosure.hpp os.hpp cardTableRS.cpp concurrentMark.hpp cardTableRS.cpp g1SATBCardTableModRefBS.hpp -collectionSetChooser.cpp g1CollectedHeap.hpp +collectionSetChooser.cpp g1CollectedHeap.inline.hpp collectionSetChooser.cpp g1CollectorPolicy.hpp collectionSetChooser.cpp collectionSetChooser.hpp collectionSetChooser.cpp space.inline.hpp @@ -43,7 +43,7 @@ concurrentG1Refine.cpp atomic.hpp concurrentG1Refine.cpp concurrentG1Refine.hpp concurrentG1Refine.cpp concurrentG1RefineThread.hpp concurrentG1Refine.cpp copy.hpp -concurrentG1Refine.cpp g1CollectedHeap.hpp +concurrentG1Refine.cpp g1CollectedHeap.inline.hpp concurrentG1Refine.cpp g1RemSet.hpp concurrentG1Refine.cpp space.inline.hpp @@ -51,7 +51,7 @@ concurrentG1Refine.hpp globalDefinitions.hpp concurrentG1RefineThread.cpp concurrentG1Refine.hpp concurrentG1RefineThread.cpp concurrentG1RefineThread.hpp -concurrentG1RefineThread.cpp g1CollectedHeap.hpp +concurrentG1RefineThread.cpp g1CollectedHeap.inline.hpp concurrentG1RefineThread.cpp g1CollectorPolicy.hpp concurrentG1RefineThread.cpp handles.inline.hpp concurrentG1RefineThread.cpp mutexLocker.hpp @@ -168,7 +168,7 @@ g1CollectorPolicy.cpp concurrentMark.hpp g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp g1CollectorPolicy.cpp debug.hpp g1CollectorPolicy.cpp java.hpp -g1CollectorPolicy.cpp g1CollectedHeap.hpp +g1CollectorPolicy.cpp g1CollectedHeap.inline.hpp g1CollectorPolicy.cpp g1CollectorPolicy.hpp g1CollectorPolicy.cpp heapRegionRemSet.hpp g1CollectorPolicy.cpp mutexLocker.hpp @@ -189,7 +189,7 @@ g1MarkSweep.cpp biasedLocking.hpp g1MarkSweep.cpp codeCache.hpp g1MarkSweep.cpp events.hpp g1MarkSweep.cpp fprofiler.hpp -g1MarkSweep.hpp g1CollectedHeap.hpp +g1MarkSweep.hpp g1CollectedHeap.inline.hpp g1MarkSweep.cpp g1MarkSweep.hpp g1MarkSweep.cpp gcLocker.hpp g1MarkSweep.cpp genCollectedHeap.hpp @@ -285,7 +285,7 @@ heapRegionRemSet.cpp globalDefinitions.hpp heapRegionRemSet.cpp space.inline.hpp heapRegionSeq.cpp allocation.hpp -heapRegionSeq.cpp g1CollectedHeap.hpp +heapRegionSeq.cpp g1CollectedHeap.inline.hpp heapRegionSeq.cpp heapRegionSeq.hpp heapRegionSeq.hpp growableArray.hpp @@ -336,18 +336,18 @@ specialized_oop_closures.hpp g1_specialized_oop_closures.hpp survRateGroup.hpp numberSeq.hpp survRateGroup.cpp allocation.hpp -survRateGroup.cpp g1CollectedHeap.hpp +survRateGroup.cpp g1CollectedHeap.inline.hpp survRateGroup.cpp g1CollectorPolicy.hpp survRateGroup.cpp heapRegion.hpp survRateGroup.cpp survRateGroup.hpp thread.cpp concurrentMarkThread.inline.hpp -universe.cpp g1CollectedHeap.hpp +universe.cpp g1CollectedHeap.inline.hpp universe.cpp g1CollectorPolicy.hpp vm_operations_g1.hpp vmGCOperations.hpp vm_operations_g1.cpp vm_operations_g1.hpp -vm_operations_g1.cpp g1CollectedHeap.hpp +vm_operations_g1.cpp g1CollectedHeap.inline.hpp vm_operations_g1.cpp isGCActiveMark.hpp diff --git a/src/share/vm/gc_implementation/includeDB_gc_shared b/src/share/vm/gc_implementation/includeDB_gc_shared index 7ea2265898d30aed4fc0897dfb0bf7878792a596..99ce759a474e90d0eb9cf657ae2d3ee688660170 100644 --- a/src/share/vm/gc_implementation/includeDB_gc_shared +++ b/src/share/vm/gc_implementation/includeDB_gc_shared @@ -100,4 +100,4 @@ spaceCounters.hpp mutableSpace.hpp spaceCounters.hpp perfData.hpp spaceCounters.hpp generationCounters.hpp -vmGCOperations.cpp g1CollectedHeap.hpp +vmGCOperations.cpp g1CollectedHeap.inline.hpp diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp index 087cdf93c017881c54036cff7c802a9e97e9e3ef..c268b6a2f86737e85e700a6e816a529d5b2a3405 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp @@ -200,6 +200,7 @@ class ParallelScavengeHeap : public CollectedHeap { void oop_iterate(OopClosure* cl); void object_iterate(ObjectClosure* cl); + void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } void permanent_oop_iterate(OopClosure* cl); void permanent_object_iterate(ObjectClosure* cl); diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp index 13d30c70b6dfecfb17bbe626f896b5515d94b382..41d80a3a90524d94f3de1cdb8c9a0db6d886935c 100644 --- a/src/share/vm/gc_interface/collectedHeap.hpp +++ b/src/share/vm/gc_interface/collectedHeap.hpp @@ -466,6 +466,10 @@ class CollectedHeap : public CHeapObj { // This includes objects in permanent memory. virtual void object_iterate(ObjectClosure* cl) = 0; + // Similar to object_iterate() except iterates only + // over live objects. + virtual void safe_object_iterate(ObjectClosure* cl) = 0; + // Behaves the same as oop_iterate, except only traverses // interior pointers contained in permanent memory. If there // is no permanent memory, does nothing. diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp index 832a3331e1999679e8928ab258d910c1b93ba962..41dfac1b5c883a53e083b445f306e85f930b39c0 100644 --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -910,6 +910,13 @@ void GenCollectedHeap::object_iterate(ObjectClosure* cl) { perm_gen()->object_iterate(cl); } +void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { + for (int i = 0; i < _n_gens; i++) { + _gens[i]->safe_object_iterate(cl); + } + perm_gen()->safe_object_iterate(cl); +} + void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { for (int i = 0; i < _n_gens; i++) { _gens[i]->object_iterate_since_last_GC(cl); diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp index f9a4d50875397f569ba32d4333b95a4727279159..3ab20cfd19ff133c5190ab4d51d8eb68a9c3a42b 100644 --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -215,6 +215,7 @@ public: void oop_iterate(OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl); void object_iterate(ObjectClosure* cl); + void safe_object_iterate(ObjectClosure* cl); void object_iterate_since_last_GC(ObjectClosure* cl); Space* space_containing(const void* addr) const; diff --git a/src/share/vm/memory/generation.cpp b/src/share/vm/memory/generation.cpp index 5167db243b6f83b0eaa34dc2de0b9176fffc5302..d7b1d9f871dedc53b56be4d388870de0e41b79ed 100644 --- a/src/share/vm/memory/generation.cpp +++ b/src/share/vm/memory/generation.cpp @@ -319,6 +319,21 @@ void Generation::object_iterate(ObjectClosure* cl) { space_iterate(&blk); } +class GenerationSafeObjIterateClosure : public SpaceClosure { + private: + ObjectClosure* _cl; + public: + virtual void do_space(Space* s) { + s->safe_object_iterate(_cl); + } + GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} +}; + +void Generation::safe_object_iterate(ObjectClosure* cl) { + GenerationSafeObjIterateClosure blk(cl); + space_iterate(&blk); +} + void Generation::prepare_for_compaction(CompactPoint* cp) { // Generic implementation, can be specialized CompactibleSpace* space = first_compaction_space(); diff --git a/src/share/vm/memory/generation.hpp b/src/share/vm/memory/generation.hpp index 0f0e74e4794b0896d81d54794b9b8fb22c080760..206949901e4b2b8271ae06d84b7caf845d22eec6 100644 --- a/src/share/vm/memory/generation.hpp +++ b/src/share/vm/memory/generation.hpp @@ -518,6 +518,11 @@ class Generation: public CHeapObj { // each. virtual void object_iterate(ObjectClosure* cl); + // Iterate over all safe objects in the generation, calling "cl.do_object" on + // each. An object is safe if its references point to other objects in + // the heap. This defaults to object_iterate() unless overridden. + virtual void safe_object_iterate(ObjectClosure* cl); + // Iterate over all objects allocated in the generation since the last // collection, calling "cl.do_object" on each. The generation must have // been initialized properly to support this function, or else this call diff --git a/src/share/vm/memory/heapInspection.cpp b/src/share/vm/memory/heapInspection.cpp index e3d6fbd7b5a57080ef8e674df71abb0e4f5ed835..3bc17bea6008e4cc993008e4819a3099fdbe885f 100644 --- a/src/share/vm/memory/heapInspection.cpp +++ b/src/share/vm/memory/heapInspection.cpp @@ -263,6 +263,9 @@ void HeapInspection::heap_inspection(outputStream* st) { if (!cit.allocation_failed()) { // Iterate over objects in the heap RecordInstanceClosure ric(&cit); + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(&ric); // Report if certain classes are not counted because of @@ -317,5 +320,8 @@ void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray* // Iterate over objects in the heap FindInstanceClosure fic(k, result); + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(&fic); } diff --git a/src/share/vm/memory/oopFactory.cpp b/src/share/vm/memory/oopFactory.cpp index e9ea08d3b23f0ee6b8f903f2f5d88712d8f41a20..24c67634e8355f96d6555207ef11e2e95042332d 100644 --- a/src/share/vm/memory/oopFactory.cpp +++ b/src/share/vm/memory/oopFactory.cpp @@ -82,9 +82,11 @@ objArrayOop oopFactory::new_system_objArray(int length, TRAPS) { } -constantPoolOop oopFactory::new_constantPool(int length, TRAPS) { +constantPoolOop oopFactory::new_constantPool(int length, + bool is_conc_safe, + TRAPS) { constantPoolKlass* ck = constantPoolKlass::cast(Universe::constantPoolKlassObj()); - return ck->allocate(length, CHECK_NULL); + return ck->allocate(length, is_conc_safe, CHECK_NULL); } @@ -105,11 +107,13 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, + bool is_conc_safe, TRAPS) { klassOop cmkObj = Universe::constMethodKlassObj(); constMethodKlass* cmk = constMethodKlass::cast(cmkObj); return cmk->allocate(byte_code_size, compressed_line_number_size, localvariable_table_length, checked_exceptions_length, + is_conc_safe, CHECK_NULL); } @@ -117,14 +121,17 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size, methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS) { + int checked_exceptions_length, + bool is_conc_safe, + TRAPS) { methodKlass* mk = methodKlass::cast(Universe::methodKlassObj()); assert(!access_flags.is_native() || byte_code_size == 0, "native methods should not contain byte codes"); constMethodOop cm = new_constMethod(byte_code_size, compressed_line_number_size, localvariable_table_length, - checked_exceptions_length, CHECK_NULL); + checked_exceptions_length, + is_conc_safe, CHECK_NULL); constMethodHandle rw(THREAD, cm); return mk->allocate(rw, access_flags, CHECK_NULL); } diff --git a/src/share/vm/memory/oopFactory.hpp b/src/share/vm/memory/oopFactory.hpp index 855e0fcf43e5af2d3c6b462a817315e4a4f01f0a..e5a80073bafe22ad6f978f2d348f091343a1bd3b 100644 --- a/src/share/vm/memory/oopFactory.hpp +++ b/src/share/vm/memory/oopFactory.hpp @@ -81,7 +81,9 @@ class oopFactory: AllStatic { static symbolHandle new_symbol_handle(const char* name, TRAPS) { return new_symbol_handle(name, (int)strlen(name), CHECK_(symbolHandle())); } // Constant pools - static constantPoolOop new_constantPool (int length, TRAPS); + static constantPoolOop new_constantPool (int length, + bool is_conc_safe, + TRAPS); static constantPoolCacheOop new_constantPoolCache(int length, TRAPS); // Instance classes @@ -93,9 +95,20 @@ private: static constMethodOop new_constMethod(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS); + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); public: - static methodOop new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, TRAPS); + // Set is_conc_safe for methods which cannot safely be + // processed by concurrent GC even after the return of + // the method. + static methodOop new_method(int byte_code_size, + AccessFlags access_flags, + int compressed_line_number_size, + int localvariable_table_length, + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); // Method Data containers static methodDataOop new_methodData(methodHandle method, TRAPS); diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp index 652d585bb8f72539ef5b8156918be5349b348d4e..00f9700138482adb416dd63754e615fba3826fa0 100644 --- a/src/share/vm/memory/space.cpp +++ b/src/share/vm/memory/space.cpp @@ -569,7 +569,15 @@ void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { if (prev > mr.start()) { region_start_addr = prev; blk_start_addr = prev; - assert(blk_start_addr == block_start(region_start_addr), "invariant"); + // The previous invocation may have pushed "prev" beyond the + // last allocated block yet there may be still be blocks + // in this region due to a particular coalescing policy. + // Relax the assertion so that the case where the unallocated + // block is maintained and "prev" is beyond the unallocated + // block does not cause the assertion to fire. + assert((BlockOffsetArrayUseUnallocatedBlock && + (!is_in(prev))) || + (blk_start_addr == block_start(region_start_addr)), "invariant"); } else { region_start_addr = mr.start(); blk_start_addr = block_start(region_start_addr); @@ -705,6 +713,12 @@ void ContiguousSpace::object_iterate(ObjectClosure* blk) { object_iterate_from(bm, blk); } +// For a continguous space object_iterate() and safe_object_iterate() +// are the same. +void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { + object_iterate(blk); +} + void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { assert(mark.space() == this, "Mark does not match space"); HeapWord* p = mark.point(); diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp index a102c5f220ceb83a062052dccb99b1ce0a714872..f055638e8dd750a2859910b17478d81e8c189fb4 100644 --- a/src/share/vm/memory/space.hpp +++ b/src/share/vm/memory/space.hpp @@ -193,6 +193,9 @@ class Space: public CHeapObj { // each. Objects allocated by applications of the closure are not // included in the iteration. virtual void object_iterate(ObjectClosure* blk) = 0; + // Similar to object_iterate() except only iterates over + // objects whose internal references point to objects in the space. + virtual void safe_object_iterate(ObjectClosure* blk) = 0; // Iterate over all objects that intersect with mr, calling "cl->do_object" // on each. There is an exception to this: if this closure has already @@ -843,6 +846,9 @@ class ContiguousSpace: public CompactibleSpace { void oop_iterate(OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl); void object_iterate(ObjectClosure* blk); + // For contiguous spaces this method will iterate safely over objects + // in the space (i.e., between bottom and top) when at a safepoint. + void safe_object_iterate(ObjectClosure* blk); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // iterates on objects up to the safe limit HeapWord* object_iterate_careful(ObjectClosureCareful* cl); diff --git a/src/share/vm/oops/constMethodKlass.cpp b/src/share/vm/oops/constMethodKlass.cpp index 802c4430afa107784ccdb23d3b0d60908f9af9f1..f2fe1706a1649ea63cae5f156ddea692751cf178 100644 --- a/src/share/vm/oops/constMethodKlass.cpp +++ b/src/share/vm/oops/constMethodKlass.cpp @@ -49,10 +49,16 @@ bool constMethodKlass::oop_is_parsable(oop obj) const { return constMethodOop(obj)->object_is_parsable(); } +bool constMethodKlass::oop_is_conc_safe(oop obj) const { + assert(obj->is_constMethod(), "must be constMethod oop"); + return constMethodOop(obj)->is_conc_safe(); +} + constMethodOop constMethodKlass::allocate(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, + bool is_conc_safe, TRAPS) { int size = constMethodOopDesc::object_size(byte_code_size, @@ -75,6 +81,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size, compressed_line_number_size, localvariable_table_length); assert(cm->size() == size, "wrong size for object"); + cm->set_is_conc_safe(is_conc_safe); cm->set_partially_loaded(); assert(cm->is_parsable(), "Is safely parsable by gc"); return cm; diff --git a/src/share/vm/oops/constMethodKlass.hpp b/src/share/vm/oops/constMethodKlass.hpp index 87fc9c3156d2abd6d7759424f05b62e94e77a8a5..a3f7d9710f75777f2ddab39b0f55dfa491b46e16 100644 --- a/src/share/vm/oops/constMethodKlass.hpp +++ b/src/share/vm/oops/constMethodKlass.hpp @@ -32,12 +32,16 @@ public: // Testing bool oop_is_constMethod() const { return true; } virtual bool oop_is_parsable(oop obj) const; + virtual bool oop_is_conc_safe(oop obj) const; + // Allocation DEFINE_ALLOCATE_PERMANENT(constMethodKlass); constMethodOop allocate(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS); + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); static klassOop create_klass(TRAPS); // Sizing diff --git a/src/share/vm/oops/constMethodOop.hpp b/src/share/vm/oops/constMethodOop.hpp index e9ffa4b66bcba9a63cc4b514021fb6e3106fda5a..4669e6a852fdffe5d74021823a73f8548b60a3be 100644 --- a/src/share/vm/oops/constMethodOop.hpp +++ b/src/share/vm/oops/constMethodOop.hpp @@ -104,6 +104,7 @@ private: // loads and stores. This value may updated and read without a lock by // multiple threads, so is volatile. volatile uint64_t _fingerprint; + volatile bool _is_conc_safe; // if true, safe for concurrent GC processing public: oop* oop_block_beg() const { return adr_method(); } @@ -273,6 +274,8 @@ public: oop* adr_method() const { return (oop*)&_method; } oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } oop* adr_exception_table() const { return (oop*)&_exception_table; } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } // Unique id for the method static const u2 MAX_IDNUM; diff --git a/src/share/vm/oops/constantPoolKlass.cpp b/src/share/vm/oops/constantPoolKlass.cpp index 593b74668ff4df3093cf6fbd784a085cad1bbe58..2a17c00e1ceec7b4cb420f1ad7ea5e5c6c47e32f 100644 --- a/src/share/vm/oops/constantPoolKlass.cpp +++ b/src/share/vm/oops/constantPoolKlass.cpp @@ -25,7 +25,7 @@ # include "incls/_precompiled.incl" # include "incls/_constantPoolKlass.cpp.incl" -constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { +constantPoolOop constantPoolKlass::allocate(int length, bool is_conc_safe, TRAPS) { int size = constantPoolOopDesc::object_size(length); KlassHandle klass (THREAD, as_klassOop()); constantPoolOop c = @@ -38,6 +38,9 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { c->set_flags(0); // only set to non-zero if constant pool is merged by RedefineClasses c->set_orig_length(0); + // if constant pool may change during RedefineClasses, it is created + // unsafe for GC concurrent processing. + c->set_is_conc_safe(is_conc_safe); // all fields are initialized; needed for GC // initialize tag array @@ -207,6 +210,11 @@ int constantPoolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) return size; } +bool constantPoolKlass::oop_is_conc_safe(oop obj) const { + assert(obj->is_constantPool(), "must be constantPool"); + return constantPoolOop(obj)->is_conc_safe(); +} + #ifndef SERIALGC int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { assert (obj->is_constantPool(), "obj must be constant pool"); diff --git a/src/share/vm/oops/constantPoolKlass.hpp b/src/share/vm/oops/constantPoolKlass.hpp index 324efbec023413d0d738a73fbbbd3689e2c6e349..2f9efc285efc6a5c89f618a8beb6cbfcfd372f30 100644 --- a/src/share/vm/oops/constantPoolKlass.hpp +++ b/src/share/vm/oops/constantPoolKlass.hpp @@ -34,7 +34,7 @@ class constantPoolKlass : public Klass { // Allocation DEFINE_ALLOCATE_PERMANENT(constantPoolKlass); - constantPoolOop allocate(int length, TRAPS); + constantPoolOop allocate(int length, bool is_conc_safe, TRAPS); static klassOop create_klass(TRAPS); // Casting from klassOop @@ -48,6 +48,8 @@ class constantPoolKlass : public Klass { int object_size() const { return align_object_size(header_size()); } // Garbage collection + // Returns true is the object is safe for GC concurrent processing. + virtual bool oop_is_conc_safe(oop obj) const; void oop_follow_contents(oop obj); int oop_adjust_pointers(oop obj); diff --git a/src/share/vm/oops/constantPoolOop.hpp b/src/share/vm/oops/constantPoolOop.hpp index 5627059091f72f334a1ae5e3f03612851f6a5c1b..fd2264c1eab1c6d2a46de01ee8c24f474734b40f 100644 --- a/src/share/vm/oops/constantPoolOop.hpp +++ b/src/share/vm/oops/constantPoolOop.hpp @@ -43,6 +43,8 @@ class constantPoolOopDesc : public oopDesc { klassOop _pool_holder; // the corresponding class int _flags; // a few header bits to describe contents for GC int _length; // number of elements in the array + volatile bool _is_conc_safe; // if true, safe for concurrent + // GC processing // only set to non-zero if constant pool is merged by RedefineClasses int _orig_length; @@ -379,6 +381,9 @@ class constantPoolOopDesc : public oopDesc { static int object_size(int length) { return align_object_size(header_size() + length); } int object_size() { return object_size(length()); } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } + friend class constantPoolKlass; friend class ClassFileParser; friend class SystemDictionary; diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp index 46852c02ee1da05157f0c31fae9a851609eebf5c..881da970d034b0537a9a855a998ea25b5d4900d2 100644 --- a/src/share/vm/oops/klass.hpp +++ b/src/share/vm/oops/klass.hpp @@ -606,8 +606,19 @@ class Klass : public Klass_vtbl { #undef assert_same_query // Unless overridden, oop is parsable if it has a klass pointer. + // Parsability of an object is object specific. virtual bool oop_is_parsable(oop obj) const { return true; } + // Unless overridden, oop is safe for concurrent GC processing + // after its allocation is complete. The exception to + // this is the case where objects are changed after allocation. + // Class redefinition is one of the known exceptions. During + // class redefinition, an allocated class can changed in order + // order to create a merged class (the combiniation of the + // old class definition that has to be perserved and the new class + // definition which is being created. + virtual bool oop_is_conc_safe(oop obj) const { return true; } + // Access flags AccessFlags access_flags() const { return _access_flags; } void set_access_flags(AccessFlags flags) { _access_flags = flags; } diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp index 8a5507cbac8dafb4ab33782e3bcf87f3d8ce6e7f..c239ccf1a2c96acbf883a2f576035dda17d9a8bb 100644 --- a/src/share/vm/oops/methodOop.cpp +++ b/src/share/vm/oops/methodOop.cpp @@ -792,15 +792,34 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod AccessFlags flags = m->access_flags(); int checked_exceptions_len = m->checked_exceptions_length(); int localvariable_len = m->localvariable_table_length(); - methodOop newm_oop = oopFactory::new_method(new_code_length, flags, new_compressed_linenumber_size, localvariable_len, checked_exceptions_len, CHECK_(methodHandle())); + // Allocate newm_oop with the is_conc_safe parameter set + // to IsUnsafeConc to indicate that newm_oop is not yet + // safe for concurrent processing by a GC. + methodOop newm_oop = oopFactory::new_method(new_code_length, + flags, + new_compressed_linenumber_size, + localvariable_len, + checked_exceptions_len, + IsUnsafeConc, + CHECK_(methodHandle())); methodHandle newm (THREAD, newm_oop); int new_method_size = newm->method_size(); // Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop constMethodOop newcm = newm->constMethod(); int new_const_method_size = newm->constMethod()->object_size(); + memcpy(newm(), m(), sizeof(methodOopDesc)); // Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop + // is_conc_safe is set to false because that is the value of + // is_conc_safe initialzied into newcm and the copy should + // not overwrite that value. During the window during which it is + // tagged as unsafe, some extra work could be needed during precleaning + // or concurrent marking but those phases will be correct. Setting and + // resetting is done in preference to a careful copying into newcm to + // avoid having to know the precise layout of a constMethodOop. + m->constMethod()->set_is_conc_safe(false); memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc)); + m->constMethod()->set_is_conc_safe(true); // Reset correct method/const method, method size, and parameter info newcm->set_method(newm()); newm->set_constMethod(newcm); @@ -831,6 +850,10 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod m->localvariable_table_start(), localvariable_len * sizeof(LocalVariableTableElement)); } + + // Only set is_conc_safe to true when changes to newcm are + // complete. + newcm->set_is_conc_safe(true); return newm; } diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp index 965181c664d0fa610102f449ccbf148463767684..8b03a68380ad6b0bfc9df0a18754c1918b9a6537 100644 --- a/src/share/vm/oops/methodOop.hpp +++ b/src/share/vm/oops/methodOop.hpp @@ -129,6 +129,10 @@ class methodOopDesc : public oopDesc { volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry public: + + static const bool IsUnsafeConc = false; + static const bool IsSafeConc = true; + // accessors for instance variables constMethodOop constMethod() const { return _constMethod; } void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); } diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp index 07c2ba5ac522c008ad44a85d53afc6ffc5e8485a..ba13ca63c449bab98ab11ff201bcf488fc158ce1 100644 --- a/src/share/vm/oops/oop.hpp +++ b/src/share/vm/oops/oop.hpp @@ -108,6 +108,13 @@ class oopDesc { // installation of their klass pointer. bool is_parsable(); + // Some perm gen objects that have been allocated and initialized + // can be changed by the VM when not at a safe point (class rededfinition + // is an example). Such objects should not be examined by the + // concurrent processing of a garbage collector if is_conc_safe() + // returns false. + bool is_conc_safe(); + // type test operations (inlined in oop.inline.h) bool is_instance() const; bool is_instanceRef() const; diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp index 0c7a3967f48241fe132a5c9303b115029915835c..9161310de60f70f85e9fc421957511559508e8d8 100644 --- a/src/share/vm/oops/oop.inline.hpp +++ b/src/share/vm/oops/oop.inline.hpp @@ -435,6 +435,10 @@ inline bool oopDesc::is_parsable() { return blueprint()->oop_is_parsable(this); } +inline bool oopDesc::is_conc_safe() { + return blueprint()->oop_is_conc_safe(this); +} + inline void update_barrier_set(void* p, oop v) { assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); oopDesc::bs()->write_ref_field(p, v); diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp index bd8fe2b356b9d993a1909a8d889e4925f9661fd1..4cc6b577b4718ff1d26bd29d9f73e3cb1a8e4303 100644 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp @@ -1230,8 +1230,14 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( // Constant pools are not easily reused so we allocate a new one // each time. + // merge_cp is created unsafe for concurrent GC processing. It + // should be marked safe before discarding it because, even if + // garbage. If it crosses a card boundary, it may be scanned + // in order to find the start of the first complete object on the card. constantPoolHandle merge_cp(THREAD, - oopFactory::new_constantPool(merge_cp_length, THREAD)); + oopFactory::new_constantPool(merge_cp_length, + methodOopDesc::IsUnsafeConc, + THREAD)); int orig_length = old_cp->orig_length(); if (orig_length == 0) { // This old_cp is an actual original constant pool. We save @@ -1274,6 +1280,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( // rewriting so we can't use the old constant pool with the new // class. + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); // toss the merged constant pool } else if (old_cp->length() < scratch_cp->length()) { // The old constant pool has fewer entries than the new constant @@ -1283,6 +1290,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( // rewriting so we can't use the new constant pool with the old // class. + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); // toss the merged constant pool } else { // The old constant pool has more entries than the new constant @@ -1296,6 +1304,7 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, THREAD); // drop local ref to the merged constant pool + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); } } else { @@ -1325,7 +1334,10 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( // GCed. set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, THREAD); + merge_cp()->set_is_conc_safe(true); } + assert(old_cp()->is_conc_safe(), "Just checking"); + assert(scratch_cp()->is_conc_safe(), "Just checking"); return JVMTI_ERROR_NONE; } // end merge_cp_and_rewrite() @@ -2314,13 +2326,16 @@ void VM_RedefineClasses::set_new_constant_pool( // worst case merge situation. We want to associate the minimum // sized constant pool with the klass to save space. constantPoolHandle smaller_cp(THREAD, - oopFactory::new_constantPool(scratch_cp_length, THREAD)); + oopFactory::new_constantPool(scratch_cp_length, + methodOopDesc::IsUnsafeConc, + THREAD)); // preserve orig_length() value in the smaller copy int orig_length = scratch_cp->orig_length(); assert(orig_length != 0, "sanity check"); smaller_cp->set_orig_length(orig_length); scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); scratch_cp = smaller_cp; + smaller_cp()->set_is_conc_safe(true); } // attach new constant pool to klass @@ -2516,6 +2531,7 @@ void VM_RedefineClasses::set_new_constant_pool( rewrite_cp_refs_in_stack_map_table(method, THREAD); } // end for each method + assert(scratch_cp()->is_conc_safe(), "Just checking"); } // end set_new_constant_pool() diff --git a/src/share/vm/prims/jvmtiTagMap.cpp b/src/share/vm/prims/jvmtiTagMap.cpp index 152789815af181798f195480362ee4e17e762d6b..dcf83bb22d44b40a675e5518fb15343136302984 100644 --- a/src/share/vm/prims/jvmtiTagMap.cpp +++ b/src/share/vm/prims/jvmtiTagMap.cpp @@ -1320,6 +1320,9 @@ class VM_HeapIterateOperation: public VM_Operation { } // do the iteration + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(_blk); // when sharing is enabled we must iterate over the shared spaces diff --git a/src/share/vm/services/heapDumper.cpp b/src/share/vm/services/heapDumper.cpp index bf7aaf1a91440462a3a58a0c8e5f4a94e22cc33a..18bd9f477d76cb02e4ed3b0b1cb7a83c59d44303 100644 --- a/src/share/vm/services/heapDumper.cpp +++ b/src/share/vm/services/heapDumper.cpp @@ -1700,7 +1700,7 @@ void VM_HeapDumper::doit() { // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk // of the heap dump. HeapObjectDumper obj_dumper(this, writer()); - Universe::heap()->object_iterate(&obj_dumper); + Universe::heap()->safe_object_iterate(&obj_dumper); // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals do_threads();