提交 d5a88d45 编写于 作者: Y ysr

6782457: CMS: Livelock in CompactibleFreeListSpace::block_size()

6736295: SIGSEGV in product jvm, assertion "these are the only valid states during a mark sweep" in fastdebug
Summary: Restructured the code in the perm gen allocation retry loop so as to avoid "safepoint-blocking" on locks, in this case the Heap_lock, while holding uninitialized allocated heap storage.
Reviewed-by: apetrusenko, iveresov, jcoomes, jmasa, poonam
上级 80629b86
...@@ -26,20 +26,24 @@ ...@@ -26,20 +26,24 @@
#include "incls/_permGen.cpp.incl" #include "incls/_permGen.cpp.incl"
HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
MutexLocker ml(Heap_lock);
GCCause::Cause next_cause = GCCause::_permanent_generation_full; GCCause::Cause next_cause = GCCause::_permanent_generation_full;
GCCause::Cause prev_cause = GCCause::_no_gc; GCCause::Cause prev_cause = GCCause::_no_gc;
unsigned int gc_count_before, full_gc_count_before;
HeapWord* obj;
for (;;) { for (;;) {
HeapWord* obj = gen->allocate(size, false); {
if (obj != NULL) { MutexLocker ml(Heap_lock);
return obj; if ((obj = gen->allocate(size, false)) != NULL) {
} return obj;
if (gen->capacity() < _capacity_expansion_limit || }
prev_cause != GCCause::_no_gc) { if (gen->capacity() < _capacity_expansion_limit ||
obj = gen->expand_and_allocate(size, false); prev_cause != GCCause::_no_gc) {
} obj = gen->expand_and_allocate(size, false);
if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) { }
if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
return obj;
}
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
...@@ -61,31 +65,27 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { ...@@ -61,31 +65,27 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
return NULL; return NULL;
} }
} }
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
unsigned int gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = SharedHeap::heap()->total_collections();
unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections(); full_gc_count_before = SharedHeap::heap()->total_full_collections();
{ }
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, // Give up heap lock above, VMThread::execute below gets it back
next_cause); VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
VMThread::execute(&op); next_cause);
if (!op.prologue_succeeded() || op.gc_locked()) { VMThread::execute(&op);
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); if (!op.prologue_succeeded() || op.gc_locked()) {
continue; // retry and/or stall as necessary assert(op.result() == NULL, "must be NULL if gc_locked() is true");
} continue; // retry and/or stall as necessary
obj = op.result(); }
assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), obj = op.result();
"result not in heap"); assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
if (obj != NULL) { "result not in heap");
return obj; if (obj != NULL) {
}
}
prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection;
} else {
return obj; return obj;
} }
prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册