提交 12e01550 编写于 作者: D dholmes

8035663: Suspicious failure of test java/util/concurrent/Phaser/FickleRegister.java

Reviewed-by: shade, coleenp
上级 023f1d63
...@@ -322,10 +322,33 @@ UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject ...@@ -322,10 +322,33 @@ UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject
UNSAFE_END UNSAFE_END
#ifndef SUPPORTS_NATIVE_CX8 #ifndef SUPPORTS_NATIVE_CX8
// Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
// Volatile long versions must use locks if !VM_Version::supports_cx8(). // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
// support_cx8 is a surrogate for 'supports atomic long memory ops'. //
// On platforms which do not support atomic compare-and-swap of jlong (8 byte)
// values we have to use a lock-based scheme to enforce atomicity. This has to be
// applied to all Unsafe operations that set the value of a jlong field. Even so
// the compareAndSwapLong operation will not be atomic with respect to direct stores
// to the field from Java code. It is important therefore that any Java code that
// utilizes these Unsafe jlong operations does not perform direct stores. To permit
// direct loads of the field from Java code we must also use Atomic::store within the
// locked regions. And for good measure, in case there are direct stores, we also
// employ Atomic::load within those regions. Note that the field in question must be
// volatile and so must have atomic load/store accesses applied at the Java level.
//
// The locking scheme could utilize a range of strategies for controlling the locking
// granularity: from a lock per-field through to a single global lock. The latter is
// the simplest and is used for the current implementation. Note that the Java object
// that contains the field, can not, in general, be used for locking. To do so can lead
// to deadlocks as we may introduce locking into what appears to the Java code to be a
// lock-free path.
//
// As all the locked-regions are very short and themselves non-blocking we can treat
// them as leaf routines and elide safepoint checks (ie we don't perform any thread
// state transitions even when blocking for the lock). Note that if we do choose to
// add safepoint checks and thread state transitions, we must ensure that we calculate
// the address of the field _after_ we have acquired the lock, else the object may have
// been moved by the GC
UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetLongVolatile"); UnsafeWrapper("Unsafe_GetLongVolatile");
...@@ -337,8 +360,8 @@ UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject ...@@ -337,8 +360,8 @@ UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject
else { else {
Handle p (THREAD, JNIHandles::resolve(obj)); Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset)); jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
ObjectLocker ol(p, THREAD); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
jlong value = *addr; jlong value = Atomic::load(addr);
return value; return value;
} }
} }
...@@ -353,8 +376,8 @@ UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject o ...@@ -353,8 +376,8 @@ UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject o
else { else {
Handle p (THREAD, JNIHandles::resolve(obj)); Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset)); jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
ObjectLocker ol(p, THREAD); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
*addr = x; Atomic::store(x, addr);
} }
} }
UNSAFE_END UNSAFE_END
...@@ -463,8 +486,8 @@ UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject ob ...@@ -463,8 +486,8 @@ UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject ob
else { else {
Handle p (THREAD, JNIHandles::resolve(obj)); Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset)); jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
ObjectLocker ol(p, THREAD); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
*addr = x; Atomic::store(x, addr);
} }
} }
#endif #endif
...@@ -1213,14 +1236,19 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jo ...@@ -1213,14 +1236,19 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jo
UnsafeWrapper("Unsafe_CompareAndSwapLong"); UnsafeWrapper("Unsafe_CompareAndSwapLong");
Handle p (THREAD, JNIHandles::resolve(obj)); Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset)); jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
#ifdef SUPPORTS_NATIVE_CX8
return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
#else
if (VM_Version::supports_cx8()) if (VM_Version::supports_cx8())
return (jlong)(Atomic::cmpxchg(x, addr, e)) == e; return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
else { else {
jboolean success = false; jboolean success = false;
ObjectLocker ol(p, THREAD); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
if (*addr == e) { *addr = x; success = true; } jlong val = Atomic::load(addr);
if (val == e) { Atomic::store(x, addr); success = true; }
return success; return success;
} }
#endif
UNSAFE_END UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
......
...@@ -135,6 +135,10 @@ Mutex* JfrStream_lock = NULL; ...@@ -135,6 +135,10 @@ Mutex* JfrStream_lock = NULL;
Mutex* JfrThreadGroups_lock = NULL; Mutex* JfrThreadGroups_lock = NULL;
#endif #endif
#ifndef SUPPORTS_NATIVE_CX8
Mutex* UnsafeJlong_lock = NULL;
#endif
#define MAX_NUM_MUTEX 128 #define MAX_NUM_MUTEX 128
static Monitor * _mutex_array[MAX_NUM_MUTEX]; static Monitor * _mutex_array[MAX_NUM_MUTEX];
static int _num_mutex; static int _num_mutex;
...@@ -286,6 +290,9 @@ void mutex_init() { ...@@ -286,6 +290,9 @@ void mutex_init() {
def(JfrStacktrace_lock , Mutex, special, true ); def(JfrStacktrace_lock , Mutex, special, true );
#endif #endif
#ifndef SUPPORTS_NATIVE_CX8
def(UnsafeJlong_lock , Mutex, special, false);
#endif
} }
GCMutexLocker::GCMutexLocker(Monitor * mutex) { GCMutexLocker::GCMutexLocker(Monitor * mutex) {
......
...@@ -151,6 +151,10 @@ extern Mutex* JfrStream_lock; // protects JFR stream access ...@@ -151,6 +151,10 @@ extern Mutex* JfrStream_lock; // protects JFR stream access
extern Mutex* JfrThreadGroups_lock; // protects JFR access to Thread Groups extern Mutex* JfrThreadGroups_lock; // protects JFR access to Thread Groups
#endif #endif
#ifndef SUPPORTS_NATIVE_CX8
extern Mutex* UnsafeJlong_lock; // provides Unsafe atomic updates to jlongs on platforms that don't support cx8
#endif
// A MutexLocker provides mutual exclusion with respect to a given mutex // A MutexLocker provides mutual exclusion with respect to a given mutex
// for the scope which contains the locker. The lock is an OS lock, not // for the scope which contains the locker. The lock is an OS lock, not
// an object lock, and the two do not interoperate. Do not use Mutex-based // an object lock, and the two do not interoperate. Do not use Mutex-based
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册