提交 0cc6b7ee 编写于 作者: S sjohanss

8028498: runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java asserts in RT_Baseline

Summary: Preventing GCs to occur before VM is completely initialized. This was previously partly done by one part of the GC locker which not was removed.
Reviewed-by: coleenp, pliden
上级 bfe262f7
...@@ -103,6 +103,15 @@ bool VM_GC_Operation::doit_prologue() { ...@@ -103,6 +103,15 @@ bool VM_GC_Operation::doit_prologue() {
assert(((_gc_cause != GCCause::_no_gc) && assert(((_gc_cause != GCCause::_no_gc) &&
(_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
// To be able to handle a GC the VM initialization needs to be completed.
if (!is_init_completed()) {
vm_exit_during_initialization(
err_msg("GC triggered before VM initialization completed. Try increasing "
"NewSize, current value " UINTX_FORMAT "%s.",
byte_size_in_proper_unit(NewSize),
proper_unit_for_byte_size(NewSize)));
}
acquire_pending_list_lock(); acquire_pending_list_lock();
// If the GC count has changed someone beat us to the collection // If the GC count has changed someone beat us to the collection
// Get the Heap_lock after the pending_list_lock. // Get the Heap_lock after the pending_list_lock.
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "memory/sharedHeap.hpp" #include "memory/sharedHeap.hpp"
volatile jint GC_locker::_jni_lock_count = 0; volatile jint GC_locker::_jni_lock_count = 0;
volatile jint GC_locker::_lock_count = 0;
volatile bool GC_locker::_needs_gc = false; volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false; volatile bool GC_locker::_doing_gc = false;
...@@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) { ...@@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
// We check that at least one thread is in a critical region before // We check that at least one thread is in a critical region before
// blocking because blocked threads are woken up by a thread exiting // blocking because blocked threads are woken up by a thread exiting
// a JNI critical region. // a JNI critical region.
while ((needs_gc() && is_jni_active()) || _doing_gc) { while (is_active_and_needs_gc() || _doing_gc) {
JNICritical_lock->wait(); JNICritical_lock->wait();
} }
thread->enter_critical(); thread->enter_critical();
...@@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) { ...@@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) {
_jni_lock_count--; _jni_lock_count--;
decrement_debug_jni_lock_count(); decrement_debug_jni_lock_count();
thread->exit_critical(); thread->exit_critical();
if (needs_gc() && !is_jni_active()) { if (needs_gc() && !is_active_internal()) {
// We're the last thread out. Cause a GC to occur. // We're the last thread out. Cause a GC to occur.
// GC will also check is_active, so this check is not _doing_gc = true;
// strictly needed. It's added here to make it clear that {
// the GC will NOT be performed if any other caller // Must give up the lock while at a safepoint
// of GC_locker::lock() still needs GC locked. MutexUnlocker munlock(JNICritical_lock);
if (!is_active_internal()) { if (PrintJNIGCStalls && PrintGCDetails) {
_doing_gc = true; ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
{ gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
// Must give up the lock while at a safepoint gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
MutexUnlocker munlock(JNICritical_lock);
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
}
Universe::heap()->collect(GCCause::_gc_locker);
} }
_doing_gc = false; Universe::heap()->collect(GCCause::_gc_locker);
} }
_doing_gc = false;
_needs_gc = false; _needs_gc = false;
JNICritical_lock->notify_all(); JNICritical_lock->notify_all();
} }
......
...@@ -54,8 +54,6 @@ class GC_locker: public AllStatic { ...@@ -54,8 +54,6 @@ class GC_locker: public AllStatic {
// safepointing and decremented during the slow path of GC_locker // safepointing and decremented during the slow path of GC_locker
// unlocking. // unlocking.
static volatile jint _jni_lock_count; // number of jni active instances. static volatile jint _jni_lock_count; // number of jni active instances.
static volatile jint _lock_count; // number of other active instances
static volatile bool _needs_gc; // heap is filling, we need a GC static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint // note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC static volatile bool _doing_gc; // unlock_critical() is doing a GC
...@@ -66,12 +64,6 @@ class GC_locker: public AllStatic { ...@@ -66,12 +64,6 @@ class GC_locker: public AllStatic {
static volatile jint _debug_jni_lock_count; static volatile jint _debug_jni_lock_count;
#endif #endif
// Accessors
static bool is_jni_active() {
assert(_needs_gc, "only valid when _needs_gc is set");
return _jni_lock_count > 0;
}
// At a safepoint, visit all threads and count the number of active // At a safepoint, visit all threads and count the number of active
// critical sections. This is used to ensure that all active // critical sections. This is used to ensure that all active
// critical sections are exited before a new one is started. // critical sections are exited before a new one is started.
...@@ -82,7 +74,7 @@ class GC_locker: public AllStatic { ...@@ -82,7 +74,7 @@ class GC_locker: public AllStatic {
static bool is_active_internal() { static bool is_active_internal() {
verify_critical_count(); verify_critical_count();
return _lock_count > 0 || _jni_lock_count > 0; return _jni_lock_count > 0;
} }
public: public:
...@@ -132,10 +124,6 @@ class GC_locker: public AllStatic { ...@@ -132,10 +124,6 @@ class GC_locker: public AllStatic {
// not a stable predicate. // not a stable predicate.
static void stall_until_clear(); static void stall_until_clear();
// Non-structured GC locking: currently needed for JNI. Use with care!
static void lock();
static void unlock();
// The following two methods are used for JNI critical regions. // The following two methods are used for JNI critical regions.
// If we find that we failed to perform a GC because the GC_locker // If we find that we failed to perform a GC because the GC_locker
// was active, arrange for one as soon as possible by allowing // was active, arrange for one as soon as possible by allowing
......
...@@ -27,22 +27,6 @@ ...@@ -27,22 +27,6 @@
#include "memory/gcLocker.hpp" #include "memory/gcLocker.hpp"
inline void GC_locker::lock() {
// cast away volatile
Atomic::inc(&_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
assert(Universe::heap() == NULL ||
!Universe::heap()->is_gc_active(), "locking failed");
}
inline void GC_locker::unlock() {
// cast away volatile
Atomic::dec(&_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
}
inline void GC_locker::lock_critical(JavaThread* thread) { inline void GC_locker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) { if (!thread->in_critical()) {
if (needs_gc()) { if (needs_gc()) {
......
...@@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) { ...@@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
TraceTime timer("Dump Shared Spaces", TraceStartupTime); TraceTime timer("Dump Shared Spaces", TraceStartupTime);
ResourceMark rm; ResourceMark rm;
// Lock out GC - is it necessary? I don't think we care.
No_GC_Verifier no_gc;
// Preload classes to be shared. // Preload classes to be shared.
// Should use some os:: method rather than fopen() here. aB. // Should use some os:: method rather than fopen() here. aB.
// Construct the path to the class list (in jre/lib) // Construct the path to the class list (in jre/lib)
......
...@@ -632,7 +632,6 @@ jint universe_init() { ...@@ -632,7 +632,6 @@ jint universe_init() {
guarantee(sizeof(oop) % sizeof(HeapWord) == 0, guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
"oop size is not not a multiple of HeapWord size"); "oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime); TraceTime timer("Genesis", TraceStartupTime);
GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets(); JavaClasses::compute_hard_coded_offsets();
jint status = Universe::initialize_heap(); jint status = Universe::initialize_heap();
...@@ -1164,8 +1163,6 @@ bool universe_post_init() { ...@@ -1164,8 +1163,6 @@ bool universe_post_init() {
MemoryService::add_metaspace_memory_pools(); MemoryService::add_metaspace_memory_pools();
GC_locker::unlock(); // allow gc after bootstrapping
MemoryService::set_universe_heap(Universe::_collectedHeap); MemoryService::set_universe_heap(Universe::_collectedHeap);
return true; return true;
} }
......
...@@ -239,7 +239,6 @@ Thread::Thread() { ...@@ -239,7 +239,6 @@ Thread::Thread() {
debug_only(_allow_allocation_count = 0;) debug_only(_allow_allocation_count = 0;)
NOT_PRODUCT(_allow_safepoint_count = 0;) NOT_PRODUCT(_allow_safepoint_count = 0;)
NOT_PRODUCT(_skip_gcalot = false;) NOT_PRODUCT(_skip_gcalot = false;)
CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
_jvmti_env_iteration_count = 0; _jvmti_env_iteration_count = 0;
set_allocated_bytes(0); set_allocated_bytes(0);
_vm_operation_started_count = 0; _vm_operation_started_count = 0;
......
...@@ -249,9 +249,6 @@ class Thread: public ThreadShadow { ...@@ -249,9 +249,6 @@ class Thread: public ThreadShadow {
// Used by SkipGCALot class. // Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
// Record when GC is locked out via the GC_locker mechanism
CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
friend class No_Alloc_Verifier; friend class No_Alloc_Verifier;
friend class No_Safepoint_Verifier; friend class No_Safepoint_Verifier;
friend class Pause_No_Safepoint_Verifier; friend class Pause_No_Safepoint_Verifier;
...@@ -397,7 +394,6 @@ class Thread: public ThreadShadow { ...@@ -397,7 +394,6 @@ class Thread: public ThreadShadow {
void clear_unhandled_oops() { void clear_unhandled_oops() {
if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
} }
bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
#endif // CHECK_UNHANDLED_OOPS #endif // CHECK_UNHANDLED_OOPS
#ifndef PRODUCT #ifndef PRODUCT
......
...@@ -113,9 +113,7 @@ void UnhandledOops::unregister_unhandled_oop(oop* op) { ...@@ -113,9 +113,7 @@ void UnhandledOops::unregister_unhandled_oop(oop* op) {
void UnhandledOops::clear_unhandled_oops() { void UnhandledOops::clear_unhandled_oops() {
assert (CheckUnhandledOops, "should only be called with checking option"); assert (CheckUnhandledOops, "should only be called with checking option");
if (_thread->is_gc_locked_out()) {
return;
}
for (int k = 0; k < _oop_list->length(); k++) { for (int k = 0; k < _oop_list->length(); k++) {
UnhandledOopEntry entry = _oop_list->at(k); UnhandledOopEntry entry = _oop_list->at(k);
// If an entry is on the unhandled oop list but isn't on the stack // If an entry is on the unhandled oop list but isn't on the stack
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册