diff --git a/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/src/cpu/sparc/vm/stubGenerator_sparc.cpp index e4a3806da8642cc47ddd0b951f431f98122d67c0..62c201605e529ed39a113aec0ccef33c7518655e 100644 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp @@ -817,21 +817,6 @@ class StubGenerator: public StubCodeGenerator { Label _atomic_add_stub; // called from other stubs - // Support for void OrderAccess::fence(). - // - address generate_fence() { - StubCodeMark mark(this, "StubRoutines", "fence"); - address start = __ pc(); - - __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); - __ retl(false); - __ delayed()->nop(); - - return start; - } - - //------------------------------------------------------------------------------------------------------------------------ // The following routine generates a subroutine to throw an asynchronous // UnknownError when an unsafe access gets a fault that could not be @@ -2861,7 +2846,6 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry; StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; - StubRoutines::_fence_entry = generate_fence(); #endif // COMPILER2 !=> _LP64 } diff --git a/src/cpu/x86/vm/assembler_x86.cpp b/src/cpu/x86/vm/assembler_x86.cpp index 351ae0447288163de6138e5fccdff0c20ba383ed..dbf2f2b664af4506bffd87e1d6d4d33d4d239d51 100644 --- a/src/cpu/x86/vm/assembler_x86.cpp +++ b/src/cpu/x86/vm/assembler_x86.cpp @@ -1438,26 +1438,12 @@ void Assembler::lock() { } } -// Serializes memory. +// Emit mfence instruction void Assembler::mfence() { - // Memory barriers are only needed on multiprocessors - if (os::is_MP()) { - if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) { - emit_byte( 0x0F ); // MFENCE; faster blows no regs - emit_byte( 0xAE ); - emit_byte( 0xF0 ); - } else { - // All usable chips support "locked" instructions which suffice - // as barriers, and are much faster than the alternative of - // using cpuid instruction. We use here a locked add [esp],0. - // This is conveniently otherwise a no-op except for blowing - // flags (which we save and restore.) - pushf(); // Save eflags register - lock(); - addl(Address(rsp, 0), 0);// Assert the lock# signal here - popf(); // Restore eflags register - } - } + NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) + emit_byte( 0x0F ); + emit_byte( 0xAE ); + emit_byte( 0xF0 ); } void Assembler::mov(Register dst, Register src) { diff --git a/src/cpu/x86/vm/assembler_x86.hpp b/src/cpu/x86/vm/assembler_x86.hpp index 4dfe7fec22ef142051d82acac41c8f1f7e8df2da..a5efad8d22b20dcb6b65aa6e6bddfc27816e94a6 100644 --- a/src/cpu/x86/vm/assembler_x86.hpp +++ b/src/cpu/x86/vm/assembler_x86.hpp @@ -1068,15 +1068,23 @@ private: LoadLoad = 1 << 0 }; - // Serializes memory. + // Serializes memory and blows flags void membar(Membar_mask_bits order_constraint) { - // We only have to handle StoreLoad and LoadLoad - if (order_constraint & StoreLoad) { - // MFENCE subsumes LFENCE - mfence(); - } /* [jk] not needed currently: else if (order_constraint & LoadLoad) { - lfence(); - } */ + if (os::is_MP()) { + // We only have to handle StoreLoad + if (order_constraint & StoreLoad) { + // All usable chips support "locked" instructions which suffice + // as barriers, and are much faster than the alternative of + // using cpuid instruction. We use here a locked add [esp],0. + // This is conveniently otherwise a no-op except for blowing + // flags. + // Any change to this code may need to revisit other places in + // the code where this idiom is used, in particular the + // orderAccess code. + lock(); + addl(Address(rsp, 0), 0);// Assert the lock# signal here + } + } } void mfence(); diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp index 73d60542a4d15c23a59381297ff37429736a79bb..ec322b527d1f0e950e8e28a7222208e0e9b6b3f2 100644 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp @@ -637,7 +637,7 @@ class StubGenerator: public StubCodeGenerator { address generate_orderaccess_fence() { StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); address start = __ pc(); - __ mfence(); + __ membar(Assembler::StoreLoad); __ ret(0); return start; diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad index 479adb1cbfdf30c30016afc04d452b2b6643d401..cd64cfbf9f23cea856d352c882c0e7c742d23274 100644 --- a/src/cpu/x86/vm/x86_32.ad +++ b/src/cpu/x86/vm/x86_32.ad @@ -4288,24 +4288,6 @@ encode %{ emit_opcode(cbuf, 0xC8 + $src2$$reg); %} - enc_class enc_membar_acquire %{ - // Doug Lea believes this is not needed with current Sparcs and TSO. - // MacroAssembler masm(&cbuf); - // masm.membar(); - %} - - enc_class enc_membar_release %{ - // Doug Lea believes this is not needed with current Sparcs and TSO. - // MacroAssembler masm(&cbuf); - // masm.membar(); - %} - - enc_class enc_membar_volatile %{ - MacroAssembler masm(&cbuf); - masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - %} - // Atomically load the volatile long enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{ emit_opcode(cbuf,0xDF); @@ -7498,9 +7480,9 @@ instruct membar_acquire() %{ ins_cost(400); size(0); - format %{ "MEMBAR-acquire" %} - ins_encode( enc_membar_acquire ); - ins_pipe(pipe_slow); + format %{ "MEMBAR-acquire ! (empty encoding)" %} + ins_encode(); + ins_pipe(empty); %} instruct membar_acquire_lock() %{ @@ -7519,9 +7501,9 @@ instruct membar_release() %{ ins_cost(400); size(0); - format %{ "MEMBAR-release" %} - ins_encode( enc_membar_release ); - ins_pipe(pipe_slow); + format %{ "MEMBAR-release ! (empty encoding)" %} + ins_encode( ); + ins_pipe(empty); %} instruct membar_release_lock() %{ @@ -7535,12 +7517,22 @@ instruct membar_release_lock() %{ ins_pipe(empty); %} -instruct membar_volatile() %{ +instruct membar_volatile(eFlagsReg cr) %{ match(MemBarVolatile); + effect(KILL cr); ins_cost(400); - format %{ "MEMBAR-volatile" %} - ins_encode( enc_membar_volatile ); + format %{ + $$template + if (os::is_MP()) { + $$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile" + } else { + $$emit$$"MEMBAR-volatile ! (empty encoding)" + } + %} + ins_encode %{ + __ membar(Assembler::StoreLoad); + %} ins_pipe(pipe_slow); %} diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad index 0705c2a009ec6f118844480638a9397abd33cc0f..ae23fef11146907f4518574ebb5b929f2d699b8c 100644 --- a/src/cpu/x86/vm/x86_64.ad +++ b/src/cpu/x86/vm/x86_64.ad @@ -4162,33 +4162,6 @@ encode %{ // done: %} - enc_class enc_membar_acquire - %{ - // [jk] not needed currently, if you enable this and it really - // emits code don't forget to the remove the "size(0)" line in - // membar_acquire() - // MacroAssembler masm(&cbuf); - // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore | - // Assembler::LoadLoad)); - %} - - enc_class enc_membar_release - %{ - // [jk] not needed currently, if you enable this and it really - // emits code don't forget to the remove the "size(0)" line in - // membar_release() - // MacroAssembler masm(&cbuf); - // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore | - // Assembler::StoreStore)); - %} - - enc_class enc_membar_volatile - %{ - MacroAssembler masm(&cbuf); - masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - %} - // Safepoint Poll. This polls the safepoint page, and causes an // exception if it is not readable. Unfortunately, it kills // RFLAGS in the process. @@ -7458,7 +7431,7 @@ instruct membar_acquire() ins_cost(0); size(0); - format %{ "MEMBAR-acquire" %} + format %{ "MEMBAR-acquire ! (empty encoding)" %} ins_encode(); ins_pipe(empty); %} @@ -7481,7 +7454,7 @@ instruct membar_release() ins_cost(0); size(0); - format %{ "MEMBAR-release" %} + format %{ "MEMBAR-release ! (empty encoding)" %} ins_encode(); ins_pipe(empty); %} @@ -7498,13 +7471,22 @@ instruct membar_release_lock() ins_pipe(empty); %} -instruct membar_volatile() -%{ +instruct membar_volatile(rFlagsReg cr) %{ match(MemBarVolatile); + effect(KILL cr); ins_cost(400); - format %{ "MEMBAR-volatile" %} - ins_encode(enc_membar_volatile); + format %{ + $$template + if (os::is_MP()) { + $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile" + } else { + $$emit$$"MEMBAR-volatile ! (empty encoding)" + } + %} + ins_encode %{ + __ membar(Assembler::StoreLoad); + %} ins_pipe(pipe_slow); %} diff --git a/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp b/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp index 4c74a6af6abe14ca58c871798e968a6c150687f2..865cd1ce0f13fad9cd62c539b72de8af411fb0cc 100644 --- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp @@ -29,13 +29,11 @@ static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static jint (*atomic_add_func) (jint, volatile jint*); - static void (*fence_func) (); static jint atomic_xchg_bootstrap (jint, volatile jint*); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static jint atomic_add_bootstrap (jint, volatile jint*); - static void fence_bootstrap (); static void setup_fpu() {} diff --git a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp index 2b5f3ec0ac528903d7761c953d283e9ff4cdb81d..9777b0a131db2ce21020e89f063987e7455ed1f3 100644 --- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp +++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp @@ -44,11 +44,12 @@ inline void OrderAccess::release() { inline void OrderAccess::fence() { if (os::is_MP()) { + // always use locked addl since mfence is sometimes expensive #ifdef AMD64 - __asm__ __volatile__ ("mfence":::"memory"); + __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); #else __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); -#endif // AMD64 +#endif } } diff --git a/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp b/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp index c80d89157e4e54927dee48dbc5abdd7600939d0d..5e27aefb9773fed6c31093d49f9f656a4b74e227 100644 --- a/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp +++ b/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp @@ -60,22 +60,10 @@ inline void OrderAccess::release() { dummy = 0; } -#if defined(COMPILER2) || defined(_LP64) - inline void OrderAccess::fence() { _OrderAccess_fence(); } -#else // defined(COMPILER2) || defined(_LP64) - -inline void OrderAccess::fence() { - if (os::is_MP()) { - (*os::fence_func)(); - } -} - -#endif // defined(COMPILER2) || defined(_LP64) - #endif // _GNU_SOURCE inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } diff --git a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp index 802934511af59eb4a4c0b2d8fb65f285d417b921..44b67e9d0352204398b63ebba2ef8fe3341299ae 100644 --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp @@ -619,7 +619,6 @@ typedef jint xchg_func_t (jint, volatile jint*); typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); typedef jint add_func_t (jint, volatile jint*); -typedef void fence_func_t (); jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { // try to use the stub: @@ -681,25 +680,10 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { return (*dest) += add_value; } -void os::fence_bootstrap() { - // try to use the stub: - fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry()); - - if (func != NULL) { - os::fence_func = func; - (*func)(); - return; - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - // don't have to do anything for a single thread -} - xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; -fence_func_t* os::fence_func = os::fence_bootstrap; #endif // !_LP64 && !COMPILER2 diff --git a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp index f522b038507229fc61d39e5f7697ed59856ea7ca..62fee83dd252797bc1be2e81f8cdbf11718a9e92 100644 --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp @@ -29,13 +29,11 @@ static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static jint (*atomic_add_func) (jint, volatile jint*); - static void (*fence_func) (); static jint atomic_xchg_bootstrap (jint, volatile jint*); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static jint atomic_add_bootstrap (jint, volatile jint*); - static void fence_bootstrap (); static void setup_fpu() {} diff --git a/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp b/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp index ed8486f746f2a25633c31ec1ce4237099ef3a90f..bf4d97d21b4f435ee490bb8feb91782d07ef6b76 100644 --- a/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp +++ b/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp @@ -61,11 +61,8 @@ extern "C" { #endif // AMD64 } inline void _OrderAccess_fence() { -#ifdef AMD64 - __asm__ __volatile__ ("mfence":::"memory"); -#else + // Always use locked addl since mfence is sometimes expensive __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); -#endif // AMD64 } } diff --git a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp index e6b1edca5a463f6354c8110156d38643b9dea8da..59ed458b23ffad1a4cec69a4ad381706767f315c 100644 --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp @@ -794,7 +794,6 @@ typedef jint xchg_func_t (jint, volatile jint*); typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); typedef jint add_func_t (jint, volatile jint*); -typedef void fence_func_t (); jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { // try to use the stub: @@ -856,25 +855,10 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { return (*dest) += add_value; } -void os::fence_bootstrap() { - // try to use the stub: - fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry()); - - if (func != NULL) { - os::fence_func = func; - (*func)(); - return; - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - // don't have to do anything for a single thread -} - xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; -fence_func_t* os::fence_func = os::fence_bootstrap; extern "C" _solaris_raw_setup_fpu(address ptr); void os::setup_fpu() { diff --git a/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp b/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp index fd5707cbe37820d0acd619041596fa34a99237d7..3a02d11965d3993d3465d5a782c2f0a08bebece9 100644 --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp @@ -32,13 +32,11 @@ static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static jint (*atomic_add_func) (jint, volatile jint*); - static void (*fence_func) (); static jint atomic_xchg_bootstrap (jint, volatile jint*); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static jint atomic_add_bootstrap (jint, volatile jint*); - static void fence_bootstrap (); static void setup_fpu(); #endif // AMD64 diff --git a/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp b/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp index b0a98bb0bab8f3ebedb6fb2b52e3c3252bfe964b..1e53ed1aaa1346e5416838503e50c240b6428e10 100644 --- a/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp +++ b/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp @@ -46,7 +46,7 @@ inline void OrderAccess::release() { inline void OrderAccess::fence() { #ifdef AMD64 - (*os::fence_func)(); + StubRoutines_fence(); #else if (os::is_MP()) { __asm { diff --git a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp index 27b6af946d687d40956f11a500f5c275cb115cc2..e322f5fd1e8dfcf2eba4bbe7f10aa72b8ed583e0 100644 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp @@ -196,7 +196,6 @@ typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); typedef jint add_func_t (jint, volatile jint*); typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); -typedef void fence_func_t (); #ifdef AMD64 @@ -292,27 +291,11 @@ intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* des return (*dest) += add_value; } -void os::fence_bootstrap() { - // try to use the stub: - fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry()); - - if (func != NULL) { - os::fence_func = func; - (*func)(); - return; - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - // don't have to do anything for a single thread -} - - xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap; -fence_func_t* os::fence_func = os::fence_bootstrap; #endif // AMD64 diff --git a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp index d75781016778378c3911d4eca8e4fa9163a2261f..1e0c6b334b58c89228e07ff8f873b2512e9dd477 100644 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp @@ -35,9 +35,6 @@ static jint (*atomic_add_func) (jint, volatile jint*); static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); - static void (*fence_func) (); - - static jint atomic_xchg_bootstrap (jint, volatile jint*); static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); @@ -53,8 +50,6 @@ #ifdef AMD64 static jint atomic_add_bootstrap (jint, volatile jint*); static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*); - - static void fence_bootstrap (); #endif // AMD64 static void setup_fpu(); diff --git a/src/share/vm/includeDB_core b/src/share/vm/includeDB_core index b75a1ab39a8391fbb8dd9a92d5f6ebd47c23a7c2..a88800f29f0caafe5a820ea13e02594da4c20e43 100644 --- a/src/share/vm/includeDB_core +++ b/src/share/vm/includeDB_core @@ -3154,6 +3154,8 @@ oopsHierarchy.cpp thread.hpp oopsHierarchy.cpp thread_.inline.hpp orderAccess.cpp orderAccess.hpp +orderAccess.cpp stubRoutines.hpp +orderAccess.cpp thread.hpp orderAccess.hpp allocation.hpp orderAccess.hpp os.hpp diff --git a/src/share/vm/runtime/orderAccess.cpp b/src/share/vm/runtime/orderAccess.cpp index 392b5978126f0e663b0161d4811ceb17979f3741..1e66d6258f5b46c44c9fbe2964cc43a4aeb93dfb 100644 --- a/src/share/vm/runtime/orderAccess.cpp +++ b/src/share/vm/runtime/orderAccess.cpp @@ -26,3 +26,15 @@ # include "incls/_orderAccess.cpp.incl" volatile intptr_t OrderAccess::dummy = 0; + +void OrderAccess::StubRoutines_fence() { + // Use a stub if it exists. It may not exist during bootstrap so do + // nothing in that case but assert if no fence code exists after threads have been created + void (*func)() = CAST_TO_FN_PTR(void (*)(), StubRoutines::fence_entry()); + + if (func != NULL) { + (*func)(); + return; + } + assert(Threads::number_of_threads() == 0, "for bootstrap only"); +} diff --git a/src/share/vm/runtime/orderAccess.hpp b/src/share/vm/runtime/orderAccess.hpp index c51a9229735a58fc7ecc5c00b1a5b5bd48c98672..d6b83466da5e6e565d241fe36f6de7a20b705b88 100644 --- a/src/share/vm/runtime/orderAccess.hpp +++ b/src/share/vm/runtime/orderAccess.hpp @@ -300,4 +300,10 @@ class OrderAccess : AllStatic { // In order to force a memory access, implementations may // need a volatile externally visible dummy variable. static volatile intptr_t dummy; + + private: + // This is a helper that invokes the StubRoutines::fence_entry() + // routine if it exists, It should only be used by platforms that + // don't another way to do the inline eassembly. + static void StubRoutines_fence(); };