diff --git a/make/jprt.properties b/make/jprt.properties index 9109ded6f9884617aeaa71abb648d4204c9cf380..a0548f2b37f816233bc5e32c5ccce059a6799eb8 100644 --- a/make/jprt.properties +++ b/make/jprt.properties @@ -329,9 +329,81 @@ jprt.my.linux.i586.test.targets.embedded = \ # The complete list of test targets for jprt # Note: no PPC or ARM tests at this stage +jprt.my.linux.armvfpsflt.test.targets.embedded = \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-scimark, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_default, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_SerialGC, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParallelGC, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParNewGC, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_CMS, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_G1, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParOldGC, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_default, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_SerialGC, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_ParallelGC, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_ParNewGC, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_CMS, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_G1, \ + linux_armvfpsflt_2.6-productEmb-{c1|c2}-GCOld_ParOldGC, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_default, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c2-jbb_default_nontiered, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParallelGC, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_CMS, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_G1, \ + linux_armvfpsflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParOldGC + +# QEMU Emulators for ARM VFP HFLT +jprt.my.linux.armvfphflt.test.targets.embedded = \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-scimark, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_default, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_SerialGC, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParallelGC, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParNewGC, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_CMS, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_G1, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParOldGC, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_default, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_SerialGC, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_ParallelGC, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_ParNewGC, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_CMS, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_G1, \ + linux_armvfphflt_2.6-productEmb-{c1|c2}-GCOld_ParOldGC, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_default, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c2-jbb_default_nontiered, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParallelGC, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_CMS, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_G1, \ + linux_armvfphflt_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParOldGC + +jprt.my.linux.ppc.test.targets.embedded = \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-scimark, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_default, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_SerialGC, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParallelGC, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParNewGC, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_CMS, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_G1, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-{c1|c2}-GCBasher_ParOldGC, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_default, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_SerialGC, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_ParallelGC, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_ParNewGC, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_CMS, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_G1, \ + linux_ppc_2.6-productEmb-{c1|c2}-GCOld_ParOldGC, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c1-jbb_default, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c2-jbb_default_nontiered, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParallelGC, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c1-jbb_CMS, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c1-jbb_G1, \ + linux_ppc_2.6-{productEmb|fastdebugEmb}-c1-jbb_ParOldGC jprt.test.targets.standard = \ ${jprt.my.linux.i586.test.targets.embedded}, \ + ${jprt.my.linux.armvfpsflt.test.targets.embedded}, \ + ${jprt.my.linux.armvfphflt.test.targets.embedded}, \ + ${jprt.my.linux.ppc.test.targets.embedded}, \ ${jprt.my.solaris.sparcv9.test.targets}, \ ${jprt.my.solaris.x64.test.targets}, \ ${jprt.my.linux.i586.test.targets}, \ diff --git a/src/cpu/sparc/vm/sparc.ad b/src/cpu/sparc/vm/sparc.ad index a84af2e8ee82e0663fd9c975821453f6d0e5c922..8dfe76b5a239c0d4b0219c7e18c7f10c3b7b7547 100644 --- a/src/cpu/sparc/vm/sparc.ad +++ b/src/cpu/sparc/vm/sparc.ad @@ -6651,6 +6651,7 @@ instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ instruct membar_acquire() %{ match(MemBarAcquire); + match(LoadFence); ins_cost(4*MEMORY_REF_COST); size(0); @@ -6671,6 +6672,7 @@ instruct membar_acquire_lock() %{ instruct membar_release() %{ match(MemBarRelease); + match(StoreFence); ins_cost(4*MEMORY_REF_COST); size(0); diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad index 00a9861df89a32e6f84e3f9e63c2abd9dde5f169..e9d34e948cf46527a7d30973274b930dba0b4975 100644 --- a/src/cpu/x86/vm/x86_32.ad +++ b/src/cpu/x86/vm/x86_32.ad @@ -7096,6 +7096,7 @@ instruct storeSSL(stackSlotL dst, eRegL src) %{ instruct membar_acquire() %{ match(MemBarAcquire); + match(LoadFence); ins_cost(400); size(0); @@ -7116,6 +7117,7 @@ instruct membar_acquire_lock() %{ instruct membar_release() %{ match(MemBarRelease); + match(StoreFence); ins_cost(400); size(0); diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad index b94f5a5422ccfcd3e8d77cb6e0b72b3079b7b628..12280ddb1571b8a04caab5c4219aa1111d232179 100644 --- a/src/cpu/x86/vm/x86_64.ad +++ b/src/cpu/x86/vm/x86_64.ad @@ -6345,6 +6345,7 @@ instruct popCountL_mem(rRegI dst, memory mem, rFlagsReg cr) %{ instruct membar_acquire() %{ match(MemBarAcquire); + match(LoadFence); ins_cost(0); size(0); @@ -6367,6 +6368,7 @@ instruct membar_acquire_lock() instruct membar_release() %{ match(MemBarRelease); + match(StoreFence); ins_cost(0); size(0); diff --git a/src/share/vm/adlc/formssel.cpp b/src/share/vm/adlc/formssel.cpp index b20835815e391b73cc34b3d7f4c14ff6ccbde788..904b888c2bc15dff1b1d5091e274c99e279ec3d9 100644 --- a/src/share/vm/adlc/formssel.cpp +++ b/src/share/vm/adlc/formssel.cpp @@ -648,6 +648,8 @@ bool InstructForm::is_wide_memory_kill(FormDict &globals) const { if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true; if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true; if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true; + if( strcmp(_matrule->_opType,"StoreFence") == 0 ) return true; + if( strcmp(_matrule->_opType,"LoadFence") == 0 ) return true; return false; } @@ -4054,13 +4056,15 @@ bool MatchRule::is_ideal_fastlock() const { bool MatchRule::is_ideal_membar() const { if( !_opType ) return false; return - !strcmp(_opType,"MemBarAcquire" ) || - !strcmp(_opType,"MemBarRelease" ) || + !strcmp(_opType,"MemBarAcquire") || + !strcmp(_opType,"MemBarRelease") || !strcmp(_opType,"MemBarAcquireLock") || !strcmp(_opType,"MemBarReleaseLock") || - !strcmp(_opType,"MemBarVolatile" ) || - !strcmp(_opType,"MemBarCPUOrder" ) || - !strcmp(_opType,"MemBarStoreStore" ); + !strcmp(_opType,"LoadFence" ) || + !strcmp(_opType,"StoreFence") || + !strcmp(_opType,"MemBarVolatile") || + !strcmp(_opType,"MemBarCPUOrder") || + !strcmp(_opType,"MemBarStoreStore"); } bool MatchRule::is_ideal_loadPC() const { diff --git a/src/share/vm/opto/classes.hpp b/src/share/vm/opto/classes.hpp index 002d2db636d7abc80945229060d9b1e316484d1f..79aa1b8b6286b95b083582ef0f418793c3c706ed 100644 --- a/src/share/vm/opto/classes.hpp +++ b/src/share/vm/opto/classes.hpp @@ -175,9 +175,11 @@ macro(MathExactI) macro(MathExactL) macro(MaxI) macro(MemBarAcquire) +macro(LoadFence) macro(MemBarAcquireLock) macro(MemBarCPUOrder) macro(MemBarRelease) +macro(StoreFence) macro(MemBarReleaseLock) macro(MemBarVolatile) macro(MemBarStoreStore) diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp index 14f7015bd03a0cd2ea965781126607a37141fb83..8ad2fb7cff4e94719e93670b9bf24cbe65c515f9 100644 --- a/src/share/vm/opto/library_call.cpp +++ b/src/share/vm/opto/library_call.cpp @@ -3105,10 +3105,10 @@ bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { insert_mem_bar(Op_MemBarCPUOrder); switch(id) { case vmIntrinsics::_loadFence: - insert_mem_bar(Op_MemBarAcquire); + insert_mem_bar(Op_LoadFence); return true; case vmIntrinsics::_storeFence: - insert_mem_bar(Op_MemBarRelease); + insert_mem_bar(Op_StoreFence); return true; case vmIntrinsics::_fullFence: insert_mem_bar(Op_MemBarVolatile); diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp index 87eb22446975352c98b04901b6b6fec623893154..708711e2a64b2436513c2562e48bcac01fdc7ff7 100644 --- a/src/share/vm/opto/matcher.cpp +++ b/src/share/vm/opto/matcher.cpp @@ -2333,7 +2333,7 @@ void Matcher::validate_null_checks( ) { bool Matcher::post_store_load_barrier(const Node* vmb) { Compile* C = Compile::current(); assert(vmb->is_MemBar(), ""); - assert(vmb->Opcode() != Op_MemBarAcquire, ""); + assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, ""); const MemBarNode* membar = vmb->as_MemBar(); // Get the Ideal Proj node, ctrl, that can be used to iterate forward @@ -2378,7 +2378,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) { if (x->is_MemBar()) { // We must retain this membar if there is an upcoming volatile // load, which will be followed by acquire membar. - if (xop == Op_MemBarAcquire) { + if (xop == Op_MemBarAcquire || xop == Op_LoadFence) { return false; } else { // For other kinds of barriers, check by pretending we diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp index db956862482029fa1de1480bc02e5a9697b8bb55..939827b89b83e9d7cfdce740fa2380deb083cb74 100644 --- a/src/share/vm/opto/memnode.cpp +++ b/src/share/vm/opto/memnode.cpp @@ -1002,9 +1002,13 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { // a synchronized region. while (current->is_Proj()) { int opc = current->in(0)->Opcode(); - if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) || - opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder || - opc == Op_MemBarReleaseLock) { + if ((final && (opc == Op_MemBarAcquire || + opc == Op_MemBarAcquireLock || + opc == Op_LoadFence)) || + opc == Op_MemBarRelease || + opc == Op_StoreFence || + opc == Op_MemBarReleaseLock || + opc == Op_MemBarCPUOrder) { Node* mem = current->in(0)->in(TypeFunc::Memory); if (mem->is_MergeMem()) { MergeMemNode* merge = mem->as_MergeMem(); @@ -2973,15 +2977,17 @@ uint MemBarNode::cmp( const Node &n ) const { //------------------------------make------------------------------------------- MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { switch (opcode) { - case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); - case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); - case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); - case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); - case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); - case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); - case Op_Initialize: return new(C) InitializeNode(C, atp, pn); - case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); - default: ShouldNotReachHere(); return NULL; + case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); + case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn); + case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); + case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn); + case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); + case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); + case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); + case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); + case Op_Initialize: return new(C) InitializeNode(C, atp, pn); + case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); + default: ShouldNotReachHere(); return NULL; } } diff --git a/src/share/vm/opto/memnode.hpp b/src/share/vm/opto/memnode.hpp index 750a79af8a2c6562d4099b21e2d9654cc5937f7a..7c92e1c5a7cd318bdd90e07133e02982dc0a4a6f 100644 --- a/src/share/vm/opto/memnode.hpp +++ b/src/share/vm/opto/memnode.hpp @@ -994,6 +994,17 @@ public: virtual int Opcode() const; }; +// "Acquire" - no following ref can move before (but earlier refs can +// follow, like an early Load stalled in cache). Requires multi-cpu +// visibility. Inserted independ of any load, as required +// for intrinsic sun.misc.Unsafe.loadFence(). +class LoadFenceNode: public MemBarNode { +public: + LoadFenceNode(Compile* C, int alias_idx, Node* precedent) + : MemBarNode(C, alias_idx, precedent) {} + virtual int Opcode() const; +}; + // "Release" - no earlier ref can move after (but later refs can move // up, like a speculative pipelined cache-hitting Load). Requires // multi-cpu visibility. Inserted before a volatile store. @@ -1004,6 +1015,17 @@ public: virtual int Opcode() const; }; +// "Release" - no earlier ref can move after (but later refs can move +// up, like a speculative pipelined cache-hitting Load). Requires +// multi-cpu visibility. Inserted independent of any store, as required +// for intrinsic sun.misc.Unsafe.storeFence(). +class StoreFenceNode: public MemBarNode { +public: + StoreFenceNode(Compile* C, int alias_idx, Node* precedent) + : MemBarNode(C, alias_idx, precedent) {} + virtual int Opcode() const; +}; + // "Acquire" - no following ref can move before (but earlier refs can // follow, like an early Load stalled in cache). Requires multi-cpu // visibility. Inserted after a FastLock. diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp index 1867ba513243234bfd69745123995efaa84e3d86..b67187c6e3d99785d0b3478e56e751b210ea8653 100644 --- a/src/share/vm/runtime/vmStructs.cpp +++ b/src/share/vm/runtime/vmStructs.cpp @@ -1820,6 +1820,8 @@ typedef BinaryTreeDictionary MetablockTreeDictionary; declare_c2_type(MemBarNode, MultiNode) \ declare_c2_type(MemBarAcquireNode, MemBarNode) \ declare_c2_type(MemBarReleaseNode, MemBarNode) \ + declare_c2_type(LoadFenceNode, MemBarNode) \ + declare_c2_type(StoreFenceNode, MemBarNode) \ declare_c2_type(MemBarVolatileNode, MemBarNode) \ declare_c2_type(MemBarCPUOrderNode, MemBarNode) \ declare_c2_type(InitializeNode, MemBarNode) \