diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp index ead077be1e333066b6629e79d23e7d1867f9eef0..9fd85d6b4f142e7b9265eb0a3771616d602439fc 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp @@ -44,52 +44,12 @@ HeapWord* CMSPermGen::mem_allocate(size_t size) { bool lock_owned = lock->owned_by_self(); if (lock_owned) { MutexUnlocker mul(lock); - return mem_allocate_work(size); + return mem_allocate_in_gen(size, _gen); } else { - return mem_allocate_work(size); + return mem_allocate_in_gen(size, _gen); } } -HeapWord* CMSPermGen::mem_allocate_work(size_t size) { - assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock"); - - MutexLocker ml(Heap_lock); - HeapWord* obj = NULL; - - obj = _gen->allocate(size, false); - // Since we want to minimize pause times, we will prefer - // expanding the perm gen rather than doing a stop-world - // collection to satisfy the allocation request. - if (obj == NULL) { - // Try to expand the perm gen and allocate space. - obj = _gen->expand_and_allocate(size, false, false); - if (obj == NULL) { - // Let's see if a normal stop-world full collection will - // free up enough space. - SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // The collection above may have shrunk the space, so try - // to expand again and allocate space. - obj = _gen->expand_and_allocate(size, false, false); - } - if (obj == NULL) { - // We have not been able to allocate space despite a - // full stop-world collection. We now make a last-ditch collection - // attempt (in which soft refs are all aggressively freed) - // that will try to reclaim as much space as possible. - SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // Expand generation in case it was shrunk following the collection. - obj = _gen->expand_and_allocate(size, false, false); - } - } - } - } - return obj; -} - void CMSPermGen::compute_new_size() { _gen->compute_new_size(); } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp index e7b7096f89c23dbab024b8da28c3586ff225890e..8e1d07760ab1a5205e58edc0b3f881e6a7aa539b 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp @@ -29,7 +29,6 @@ class ConcurrentMarkSweepGeneration; class CMSPermGen: public PermGen { friend class VMStructs; - HeapWord* mem_allocate_work(size_t size); protected: // The "generation" view. ConcurrentMarkSweepGeneration* _gen; diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp index 8088f0c5d0d1cc0cf4b49559cc5861cd684fc440..6fd50b39fc597022dfdc70da4cef7bf596b9b4c5 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -590,6 +590,31 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { full_gc_count = Universe::heap()->total_full_collections(); result = perm_gen()->allocate_permanent(size); + + if (result != NULL) { + return result; + } + + if (GC_locker::is_active_and_needs_gc()) { + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + GC_locker::stall_until_clear(); + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } } if (result == NULL) { @@ -622,6 +647,12 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { if (op.prologue_succeeded()) { assert(Universe::heap()->is_in_permanent_or_null(op.result()), "result not in heap"); + // If GC was locked out during VM operation then retry allocation + // and/or stall as necessary. + if (op.gc_locked()) { + assert(op.result() == NULL, "must be NULL if gc_locked() is true"); + continue; // retry and/or stall as necessary + } // If a NULL results is being returned, an out-of-memory // will be thrown now. Clear the gc_time_limit_exceeded // flag to avoid the following situation. diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 81c2d47a647763c5b38efc5cc2a8344e06430611..b42b130894d1beb968ab8bcad98742751ce7fc37 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -999,7 +999,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) // Increment the invocation count - heap->increment_total_collections(); + heap->increment_total_collections(true); // We need to track unique mark sweep invocations as well. _total_invocations++; @@ -1964,7 +1964,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); - if (GC_locker::is_active()) { + if (GC_locker::check_active_before_gc()) { return; } diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp index 7f12cf55c55e9efa7e9dfab88b5789b2840a8722..0b21861397b3c428d3923b1cc9868aa62bf46725 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp @@ -69,6 +69,9 @@ void VM_ParallelGCFailedPermanentAllocation::doit() { GCCauseSetter gccs(heap, _gc_cause); _result = heap->failed_permanent_mem_allocate(_size); + if (_result == NULL && GC_locker::is_active_and_needs_gc()) { + set_gc_locked(); + } notify_gc_end(); } diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp index 7108f318ec03e6191e2efdbf9a9a93e7ed64a7bd..7cab57dd727610876e18ba3f70f1d1455c07ddb8 100644 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -144,3 +144,18 @@ void VM_GenCollectFull::doit() { gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); notify_gc_end(); } + +void VM_GenCollectForPermanentAllocation::doit() { + JvmtiGCForAllocationMarker jgcm; + notify_gc_begin(true); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + GCCauseSetter gccs(gch, _gc_cause); + gch->do_full_collection(gch->must_clear_all_soft_refs(), + gch->n_gens() - 1); + _res = gch->perm_gen()->allocate(_size, false); + assert(gch->is_in_reserved_or_null(_res), "result not in heap"); + if (_res == NULL && GC_locker::is_active_and_needs_gc()) { + set_gc_locked(); + } + notify_gc_end(); +} diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp index ee850f70e8f40da825ae7788208956b0c47c2a32..7777dc71c843685083d3427f36b809f8bd28da66 100644 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp @@ -43,6 +43,7 @@ // is specified; and also the attach "inspectheap" operation // // VM_GenCollectForAllocation +// VM_GenCollectForPermanentAllocation // VM_ParallelGCFailedAllocation // VM_ParallelGCFailedPermanentAllocation // - this operation is invoked when allocation is failed; @@ -166,3 +167,23 @@ class VM_GenCollectFull: public VM_GC_Operation { virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual void doit(); }; + +class VM_GenCollectForPermanentAllocation: public VM_GC_Operation { + private: + HeapWord* _res; + size_t _size; // size of object to be allocated + public: + VM_GenCollectForPermanentAllocation(size_t size, + unsigned int gc_count_before, + unsigned int full_gc_count_before, + GCCause::Cause gc_cause) + : VM_GC_Operation(gc_count_before, full_gc_count_before, true), + _size(size) { + _res = NULL; + _gc_cause = gc_cause; + } + ~VM_GenCollectForPermanentAllocation() {} + virtual VMOp_Type type() const { return VMOp_GenCollectForPermanentAllocation; } + virtual void doit(); + HeapWord* result() const { return _res; } +}; diff --git a/src/share/vm/includeDB_core b/src/share/vm/includeDB_core index 17404cec6583a82c69bbf0d785732581761e27e6..4893e97b2b71780876afd92ed84389706261cd39 100644 --- a/src/share/vm/includeDB_core +++ b/src/share/vm/includeDB_core @@ -719,6 +719,11 @@ ciObjArray.cpp ciNullObject.hpp ciObjArray.cpp ciUtilities.hpp ciObjArray.cpp objArrayOop.hpp +ciObjArray.cpp ciObjArray.hpp +ciObjArray.cpp ciNullObject.hpp +ciObjArray.cpp ciUtilities.hpp +ciObjArray.cpp objArrayOop.hpp + ciObjArrayKlass.cpp ciInstanceKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp @@ -1636,6 +1641,7 @@ frame_.inline.hpp generate_platform_dependent_include gcLocker.cpp gcLocker.inline.hpp gcLocker.cpp sharedHeap.hpp +gcLocker.cpp resourceArea.hpp gcLocker.hpp collectedHeap.hpp gcLocker.hpp genCollectedHeap.hpp @@ -3061,13 +3067,14 @@ oopMap.cpp scopeDesc.hpp oopMap.cpp signature.hpp oopMap.hpp allocation.hpp +oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMap.hpp compressedStream.hpp oopMap.hpp growableArray.hpp oopMap.hpp vmreg.hpp oopMapCache.cpp allocation.inline.hpp -oopMapCache.cpp handles.inline.hpp oopMapCache.cpp jvmtiRedefineClassesTrace.hpp +oopMapCache.cpp handles.inline.hpp oopMapCache.cpp oop.inline.hpp oopMapCache.cpp oopMapCache.hpp oopMapCache.cpp resourceArea.hpp @@ -3315,6 +3322,10 @@ permGen.cpp java.hpp permGen.cpp oop.inline.hpp permGen.cpp permGen.hpp permGen.cpp universe.hpp +permGen.cpp gcLocker.hpp +permGen.cpp gcLocker.inline.hpp +permGen.cpp vmGCOperations.hpp +permGen.cpp vmThread.hpp permGen.hpp gcCause.hpp permGen.hpp generation.hpp diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp index 428a5d27553150a1095cae7130b439d0b5bc330a..4e770e01b36942de34bf123f06e27ceb593284af 100644 --- a/src/share/vm/memory/gcLocker.cpp +++ b/src/share/vm/memory/gcLocker.cpp @@ -32,6 +32,12 @@ volatile bool GC_locker::_doing_gc = false; void GC_locker::stall_until_clear() { assert(!JavaThread::current()->in_critical(), "Would deadlock"); + if (PrintJNIGCStalls && PrintGCDetails) { + ResourceMark rm; // JavaThread::name() allocates to convert to UTF8 + gclog_or_tty->print_cr( + "Allocation failed. Thread \"%s\" is stalled by JNI critical section.", + JavaThread::current()->name()); + } MutexLocker ml(JNICritical_lock); // Wait for _needs_gc to be cleared while (GC_locker::needs_gc()) { diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp index b3cf2de0f4b65d5a755563b9d5d230b59312c6ce..36cf0a6959d659e0dcfd5e944d05df9f2a8559eb 100644 --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -35,6 +35,7 @@ class GenCollectedHeap : public SharedHeap { friend class CMSCollector; friend class GenMarkSweep; friend class VM_GenCollectForAllocation; + friend class VM_GenCollectForPermanentAllocation; friend class VM_GenCollectFull; friend class VM_GenCollectFullConcurrent; friend class VM_GC_HeapInspection; diff --git a/src/share/vm/memory/permGen.cpp b/src/share/vm/memory/permGen.cpp index f611cc36e1a3caf14d5b7f7ea18912940dced343..1c8fa9681ecffb2b506b92848a9d7465be454213 100644 --- a/src/share/vm/memory/permGen.cpp +++ b/src/share/vm/memory/permGen.cpp @@ -25,6 +25,70 @@ #include "incls/_precompiled.incl" #include "incls/_permGen.cpp.incl" +HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { + MutexLocker ml(Heap_lock); + GCCause::Cause next_cause = GCCause::_permanent_generation_full; + GCCause::Cause prev_cause = GCCause::_no_gc; + + for (;;) { + HeapWord* obj = gen->allocate(size, false); + if (obj != NULL) { + return obj; + } + if (gen->capacity() < _capacity_expansion_limit || + prev_cause != GCCause::_no_gc) { + obj = gen->expand_and_allocate(size, false); + } + if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) { + if (GC_locker::is_active_and_needs_gc()) { + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + // Wait for JNI critical section to be exited + GC_locker::stall_until_clear(); + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } + + // Read the GC count while holding the Heap_lock + unsigned int gc_count_before = SharedHeap::heap()->total_collections(); + unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections(); + { + MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back + VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, + next_cause); + VMThread::execute(&op); + if (!op.prologue_succeeded() || op.gc_locked()) { + assert(op.result() == NULL, "must be NULL if gc_locked() is true"); + continue; // retry and/or stall as necessary + } + obj = op.result(); + assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), + "result not in heap"); + if (obj != NULL) { + return obj; + } + } + prev_cause = next_cause; + next_cause = GCCause::_last_ditch_collection; + } else { + return obj; + } + } +} + CompactingPermGen::CompactingPermGen(ReservedSpace rs, ReservedSpace shared_rs, size_t initial_byte_size, @@ -44,40 +108,7 @@ CompactingPermGen::CompactingPermGen(ReservedSpace rs, } HeapWord* CompactingPermGen::mem_allocate(size_t size) { - MutexLocker ml(Heap_lock); - HeapWord* obj = _gen->allocate(size, false); - bool tried_collection = false; - bool tried_expansion = false; - while (obj == NULL) { - if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) { - // Expansion limit reached, try collection before expanding further - // For now we force a full collection, this could be changed - SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full); - obj = _gen->allocate(size, false); - tried_collection = true; - tried_expansion = false; // ... following the collection: - // the collection may have shrunk the space. - } - if (obj == NULL && !tried_expansion) { - obj = _gen->expand_and_allocate(size, false); - tried_expansion = true; - } - if (obj == NULL && tried_collection && tried_expansion) { - // We have not been able to allocate despite a collection and - // an attempted space expansion. We now make a last-ditch collection - // attempt that will try to reclaim as much space as possible (for - // example by aggressively clearing all soft refs). - SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // An expansion attempt is necessary since the previous - // collection may have shrunk the space. - obj = _gen->expand_and_allocate(size, false); - } - break; - } - } - return obj; + return mem_allocate_in_gen(size, _gen); } void CompactingPermGen::compute_new_size() { diff --git a/src/share/vm/memory/permGen.hpp b/src/share/vm/memory/permGen.hpp index 47f16b83b0fed7182efd721090a31935cdefff0f..263a589b272143a500e3fd8dee063d035b2b6ac0 100644 --- a/src/share/vm/memory/permGen.hpp +++ b/src/share/vm/memory/permGen.hpp @@ -38,6 +38,8 @@ class PermGen : public CHeapObj { size_t _capacity_expansion_limit; // maximum expansion allowed without a // full gc occuring + HeapWord* mem_allocate_in_gen(size_t size, Generation* gen); + public: enum Name { MarkSweepCompact, MarkSweep, ConcurrentMarkSweep diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp index 19f005aa887c8def9ad4d0ce286a1bd812d4a5d0..66ff00b31d786c092128293e0c1b046993c9df43 100644 --- a/src/share/vm/runtime/globals.hpp +++ b/src/share/vm/runtime/globals.hpp @@ -1919,6 +1919,10 @@ class CommandLineFlags { develop(bool, IgnoreLibthreadGPFault, false, \ "Suppress workaround for libthread GP fault") \ \ + product(bool, PrintJNIGCStalls, false, \ + "Print diagnostic message when GC is stalled" \ + "by JNI critical section") \ + \ /* JVMTI heap profiling */ \ \ diagnostic(bool, TraceJVMTIObjectTagging, false, \ diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp index 778a46dd7181e8a25868faf9461d0b9c3b9b4b3e..54264ad0dde362391901f046df2c5bcd89a01678 100644 --- a/src/share/vm/runtime/vm_operations.hpp +++ b/src/share/vm/runtime/vm_operations.hpp @@ -49,6 +49,7 @@ template(GenCollectFull) \ template(GenCollectFullConcurrent) \ template(GenCollectForAllocation) \ + template(GenCollectForPermanentAllocation) \ template(ParallelGCFailedAllocation) \ template(ParallelGCFailedPermanentAllocation) \ template(ParallelGCSystemGC) \