提交 f9bc752b 编写于 作者: A acorn

6988353: refactor contended sync subsystem

Summary: reduce complexity by factoring synchronizer.cpp
Reviewed-by: dholmes, never, coleenp
上级 044288ff
/*
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
private:
/*
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
private:
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
...@@ -301,6 +301,7 @@ c1_MacroAssembler.hpp assembler.hpp ...@@ -301,6 +301,7 @@ c1_MacroAssembler.hpp assembler.hpp
c1_MacroAssembler.hpp assembler_<arch>.inline.hpp c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
c1_MacroAssembler_<arch>.cpp arrayOop.hpp c1_MacroAssembler_<arch>.cpp arrayOop.hpp
c1_MacroAssembler_<arch>.cpp basicLock.hpp
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp c1_MacroAssembler_<arch>.cpp biasedLocking.hpp
c1_MacroAssembler_<arch>.cpp c1_MacroAssembler.hpp c1_MacroAssembler_<arch>.cpp c1_MacroAssembler.hpp
c1_MacroAssembler_<arch>.cpp c1_Runtime1.hpp c1_MacroAssembler_<arch>.cpp c1_Runtime1.hpp
...@@ -309,7 +310,6 @@ c1_MacroAssembler_<arch>.cpp interpreter.hpp ...@@ -309,7 +310,6 @@ c1_MacroAssembler_<arch>.cpp interpreter.hpp
c1_MacroAssembler_<arch>.cpp markOop.hpp c1_MacroAssembler_<arch>.cpp markOop.hpp
c1_MacroAssembler_<arch>.cpp os.hpp c1_MacroAssembler_<arch>.cpp os.hpp
c1_MacroAssembler_<arch>.cpp stubRoutines.hpp c1_MacroAssembler_<arch>.cpp stubRoutines.hpp
c1_MacroAssembler_<arch>.cpp synchronizer.hpp
c1_MacroAssembler_<arch>.cpp systemDictionary.hpp c1_MacroAssembler_<arch>.cpp systemDictionary.hpp
c1_MacroAssembler_<arch>.hpp generate_platform_dependent_include c1_MacroAssembler_<arch>.hpp generate_platform_dependent_include
......
...@@ -300,10 +300,17 @@ barrierSet.hpp oopsHierarchy.hpp ...@@ -300,10 +300,17 @@ barrierSet.hpp oopsHierarchy.hpp
barrierSet.inline.hpp barrierSet.hpp barrierSet.inline.hpp barrierSet.hpp
barrierSet.inline.hpp cardTableModRefBS.hpp barrierSet.inline.hpp cardTableModRefBS.hpp
basicLock.cpp basicLock.hpp
basicLock.cpp synchronizer.hpp
basicLock.hpp handles.hpp
basicLock.hpp markOop.hpp
basicLock.hpp top.hpp
biasedLocking.cpp basicLock.hpp
biasedLocking.cpp biasedLocking.hpp biasedLocking.cpp biasedLocking.hpp
biasedLocking.cpp klass.inline.hpp biasedLocking.cpp klass.inline.hpp
biasedLocking.cpp markOop.hpp biasedLocking.cpp markOop.hpp
biasedLocking.cpp synchronizer.hpp
biasedLocking.cpp task.hpp biasedLocking.cpp task.hpp
biasedLocking.cpp vframe.hpp biasedLocking.cpp vframe.hpp
biasedLocking.cpp vmThread.hpp biasedLocking.cpp vmThread.hpp
...@@ -404,13 +411,13 @@ bytecodeInterpreter_<arch>.cpp vframeArray.hpp ...@@ -404,13 +411,13 @@ bytecodeInterpreter_<arch>.cpp vframeArray.hpp
bytecodeInterpreterWithChecks.cpp bytecodeInterpreter.cpp bytecodeInterpreterWithChecks.cpp bytecodeInterpreter.cpp
bytecodeInterpreter.hpp allocation.hpp bytecodeInterpreter.hpp allocation.hpp
bytecodeInterpreter.hpp basicLock.hpp
bytecodeInterpreter.hpp bytes_<arch>.hpp bytecodeInterpreter.hpp bytes_<arch>.hpp
bytecodeInterpreter.hpp frame.hpp bytecodeInterpreter.hpp frame.hpp
bytecodeInterpreter.hpp globalDefinitions.hpp bytecodeInterpreter.hpp globalDefinitions.hpp
bytecodeInterpreter.hpp globals.hpp bytecodeInterpreter.hpp globals.hpp
bytecodeInterpreter.hpp methodDataOop.hpp bytecodeInterpreter.hpp methodDataOop.hpp
bytecodeInterpreter.hpp methodOop.hpp bytecodeInterpreter.hpp methodOop.hpp
bytecodeInterpreter.hpp synchronizer.hpp
bytecodeInterpreter.inline.hpp bytecodeInterpreter.hpp bytecodeInterpreter.inline.hpp bytecodeInterpreter.hpp
bytecodeInterpreter.inline.hpp stubRoutines.hpp bytecodeInterpreter.inline.hpp stubRoutines.hpp
...@@ -1667,10 +1674,10 @@ frame.cpp stubRoutines.hpp ...@@ -1667,10 +1674,10 @@ frame.cpp stubRoutines.hpp
frame.cpp universe.inline.hpp frame.cpp universe.inline.hpp
frame.hpp assembler.hpp frame.hpp assembler.hpp
frame.hpp basicLock.hpp
frame.hpp methodOop.hpp frame.hpp methodOop.hpp
frame.hpp monitorChunk.hpp frame.hpp monitorChunk.hpp
frame.hpp registerMap.hpp frame.hpp registerMap.hpp
frame.hpp synchronizer.hpp
frame.hpp top.hpp frame.hpp top.hpp
frame.inline.hpp bytecodeInterpreter.hpp frame.inline.hpp bytecodeInterpreter.hpp
...@@ -2120,6 +2127,7 @@ interfaceSupport.hpp vmThread.hpp ...@@ -2120,6 +2127,7 @@ interfaceSupport.hpp vmThread.hpp
interfaceSupport_<os_family>.hpp generate_platform_dependent_include interfaceSupport_<os_family>.hpp generate_platform_dependent_include
interp_masm_<arch_model>.cpp arrayOop.hpp interp_masm_<arch_model>.cpp arrayOop.hpp
interp_masm_<arch_model>.cpp basicLock.hpp
interp_masm_<arch_model>.cpp biasedLocking.hpp interp_masm_<arch_model>.cpp biasedLocking.hpp
interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp
interp_masm_<arch_model>.cpp interpreterRuntime.hpp interp_masm_<arch_model>.cpp interpreterRuntime.hpp
...@@ -2131,7 +2139,6 @@ interp_masm_<arch_model>.cpp markOop.hpp ...@@ -2131,7 +2139,6 @@ interp_masm_<arch_model>.cpp markOop.hpp
interp_masm_<arch_model>.cpp methodDataOop.hpp interp_masm_<arch_model>.cpp methodDataOop.hpp
interp_masm_<arch_model>.cpp methodOop.hpp interp_masm_<arch_model>.cpp methodOop.hpp
interp_masm_<arch_model>.cpp sharedRuntime.hpp interp_masm_<arch_model>.cpp sharedRuntime.hpp
interp_masm_<arch_model>.cpp synchronizer.hpp
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
...@@ -3094,25 +3101,26 @@ objArrayOop.cpp oop.inline.hpp ...@@ -3094,25 +3101,26 @@ objArrayOop.cpp oop.inline.hpp
objArrayOop.hpp arrayOop.hpp objArrayOop.hpp arrayOop.hpp
objectMonitor.hpp os.hpp objectMonitor.cpp dtrace.hpp
objectMonitor.cpp handles.inline.hpp
objectMonitor_<os_family>.cpp dtrace.hpp objectMonitor.cpp interfaceSupport.hpp
objectMonitor_<os_family>.cpp interfaceSupport.hpp objectMonitor.cpp markOop.hpp
objectMonitor_<os_family>.cpp objectMonitor.hpp objectMonitor.cpp mutexLocker.hpp
objectMonitor_<os_family>.cpp objectMonitor.inline.hpp objectMonitor.cpp objectMonitor.hpp
objectMonitor_<os_family>.cpp oop.inline.hpp objectMonitor.cpp objectMonitor.inline.hpp
objectMonitor_<os_family>.cpp osThread.hpp objectMonitor.cpp oop.inline.hpp
objectMonitor_<os_family>.cpp os_<os_family>.inline.hpp objectMonitor.cpp osThread.hpp
objectMonitor_<os_family>.cpp threadService.hpp objectMonitor.cpp os_<os_family>.inline.hpp
objectMonitor_<os_family>.cpp thread_<os_family>.inline.hpp objectMonitor.cpp preserveException.hpp
objectMonitor_<os_family>.cpp vmSymbols.hpp objectMonitor.cpp resourceArea.hpp
objectMonitor.cpp stubRoutines.hpp
objectMonitor_<os_family>.hpp generate_platform_dependent_include objectMonitor.cpp thread.hpp
objectMonitor_<os_family>.hpp os_<os_family>.inline.hpp objectMonitor.cpp thread_<os_family>.inline.hpp
objectMonitor_<os_family>.hpp thread_<os_family>.inline.hpp objectMonitor.cpp threadService.hpp
objectMonitor_<os_family>.hpp top.hpp objectMonitor.cpp vmSymbols.hpp
objectMonitor_<os_family>.inline.hpp generate_platform_dependent_include objectMonitor.hpp os.hpp
objectMonitor.hpp perfData.hpp
oop.cpp copy.hpp oop.cpp copy.hpp
oop.cpp handles.inline.hpp oop.cpp handles.inline.hpp
...@@ -3329,7 +3337,6 @@ os_<os_family>.cpp mutex_<os_family>.inline.hpp ...@@ -3329,7 +3337,6 @@ os_<os_family>.cpp mutex_<os_family>.inline.hpp
os_<os_family>.cpp nativeInst_<arch>.hpp os_<os_family>.cpp nativeInst_<arch>.hpp
os_<os_family>.cpp no_precompiled_headers os_<os_family>.cpp no_precompiled_headers
os_<os_family>.cpp objectMonitor.hpp os_<os_family>.cpp objectMonitor.hpp
os_<os_family>.cpp objectMonitor.inline.hpp
os_<os_family>.cpp oop.inline.hpp os_<os_family>.cpp oop.inline.hpp
os_<os_family>.cpp osThread.hpp os_<os_family>.cpp osThread.hpp
os_<os_family>.cpp os_share_<os_family>.hpp os_<os_family>.cpp os_share_<os_family>.hpp
...@@ -3389,6 +3396,12 @@ ostream.cpp xmlstream.hpp ...@@ -3389,6 +3396,12 @@ ostream.cpp xmlstream.hpp
ostream.hpp allocation.hpp ostream.hpp allocation.hpp
ostream.hpp timer.hpp ostream.hpp timer.hpp
// include thread.hpp to prevent cyclic includes
park.cpp thread.hpp
park.hpp debug.hpp
park.hpp globalDefinitions.hpp
pcDesc.cpp debugInfoRec.hpp pcDesc.cpp debugInfoRec.hpp
pcDesc.cpp nmethod.hpp pcDesc.cpp nmethod.hpp
pcDesc.cpp pcDesc.hpp pcDesc.cpp pcDesc.hpp
...@@ -4063,10 +4076,10 @@ synchronizer.cpp preserveException.hpp ...@@ -4063,10 +4076,10 @@ synchronizer.cpp preserveException.hpp
synchronizer.cpp resourceArea.hpp synchronizer.cpp resourceArea.hpp
synchronizer.cpp stubRoutines.hpp synchronizer.cpp stubRoutines.hpp
synchronizer.cpp synchronizer.hpp synchronizer.cpp synchronizer.hpp
synchronizer.cpp threadService.hpp
synchronizer.cpp thread_<os_family>.inline.hpp synchronizer.cpp thread_<os_family>.inline.hpp
synchronizer.cpp vmSymbols.hpp synchronizer.cpp vmSymbols.hpp
synchronizer.hpp basicLock.hpp
synchronizer.hpp handles.hpp synchronizer.hpp handles.hpp
synchronizer.hpp markOop.hpp synchronizer.hpp markOop.hpp
synchronizer.hpp perfData.hpp synchronizer.hpp perfData.hpp
...@@ -4238,7 +4251,6 @@ thread.cpp memprofiler.hpp ...@@ -4238,7 +4251,6 @@ thread.cpp memprofiler.hpp
thread.cpp mutexLocker.hpp thread.cpp mutexLocker.hpp
thread.cpp objArrayOop.hpp thread.cpp objArrayOop.hpp
thread.cpp objectMonitor.hpp thread.cpp objectMonitor.hpp
thread.cpp objectMonitor.inline.hpp
thread.cpp oop.inline.hpp thread.cpp oop.inline.hpp
thread.cpp oopFactory.hpp thread.cpp oopFactory.hpp
thread.cpp osThread.hpp thread.cpp osThread.hpp
...@@ -4276,6 +4288,7 @@ thread.hpp mutexLocker.hpp ...@@ -4276,6 +4288,7 @@ thread.hpp mutexLocker.hpp
thread.hpp oop.hpp thread.hpp oop.hpp
thread.hpp os.hpp thread.hpp os.hpp
thread.hpp osThread.hpp thread.hpp osThread.hpp
thread.hpp park.hpp
thread.hpp safepoint.hpp thread.hpp safepoint.hpp
thread.hpp stubRoutines.hpp thread.hpp stubRoutines.hpp
thread.hpp threadLocalAllocBuffer.hpp thread.hpp threadLocalAllocBuffer.hpp
...@@ -4587,6 +4600,7 @@ vframeArray.hpp frame.inline.hpp ...@@ -4587,6 +4600,7 @@ vframeArray.hpp frame.inline.hpp
vframeArray.hpp growableArray.hpp vframeArray.hpp growableArray.hpp
vframeArray.hpp monitorChunk.hpp vframeArray.hpp monitorChunk.hpp
vframe_hp.cpp basicLock.hpp
vframe_hp.cpp codeCache.hpp vframe_hp.cpp codeCache.hpp
vframe_hp.cpp debugInfoRec.hpp vframe_hp.cpp debugInfoRec.hpp
vframe_hp.cpp handles.inline.hpp vframe_hp.cpp handles.inline.hpp
...@@ -4600,7 +4614,6 @@ vframe_hp.cpp pcDesc.hpp ...@@ -4600,7 +4614,6 @@ vframe_hp.cpp pcDesc.hpp
vframe_hp.cpp scopeDesc.hpp vframe_hp.cpp scopeDesc.hpp
vframe_hp.cpp signature.hpp vframe_hp.cpp signature.hpp
vframe_hp.cpp stubRoutines.hpp vframe_hp.cpp stubRoutines.hpp
vframe_hp.cpp synchronizer.hpp
vframe_hp.cpp vframeArray.hpp vframe_hp.cpp vframeArray.hpp
vframe_hp.cpp vframe_hp.hpp vframe_hp.cpp vframe_hp.hpp
...@@ -4752,6 +4765,7 @@ workgroup.cpp os.hpp ...@@ -4752,6 +4765,7 @@ workgroup.cpp os.hpp
workgroup.cpp workgroup.hpp workgroup.cpp workgroup.hpp
workgroup.hpp taskqueue.hpp workgroup.hpp taskqueue.hpp
workgroup.hpp thread_<os_family>.inline.hpp workgroup.hpp thread_<os_family>.inline.hpp
xmlstream.cpp allocation.hpp xmlstream.cpp allocation.hpp
......
...@@ -184,6 +184,13 @@ jvmtiImpl.hpp stackValueCollection.hpp ...@@ -184,6 +184,13 @@ jvmtiImpl.hpp stackValueCollection.hpp
jvmtiImpl.hpp systemDictionary.hpp jvmtiImpl.hpp systemDictionary.hpp
jvmtiImpl.hpp vm_operations.hpp jvmtiImpl.hpp vm_operations.hpp
jvmtiRawMonitor.cpp interfaceSupport.hpp
jvmtiRawMonitor.cpp jvmtiRawMonitor.hpp
jvmtiRawMonitor.cpp thread.hpp
jvmtiRawMonitor.hpp growableArray.hpp
jvmtiRawMonitor.hpp objectMonitor.hpp
jvmtiTagMap.cpp biasedLocking.hpp jvmtiTagMap.cpp biasedLocking.hpp
jvmtiTagMap.cpp javaCalls.hpp jvmtiTagMap.cpp javaCalls.hpp
jvmtiTagMap.cpp jniHandles.hpp jvmtiTagMap.cpp jniHandles.hpp
......
...@@ -35,6 +35,7 @@ jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp ...@@ -35,6 +35,7 @@ jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp
// jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features // jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
jvmtiEnter.cpp jvmtiEnter.hpp jvmtiEnter.cpp jvmtiEnter.hpp
jvmtiEnter.cpp jvmtiRawMonitor.hpp
jvmtiEnter.cpp jvmtiUtil.hpp jvmtiEnter.cpp jvmtiUtil.hpp
jvmtiEnter.hpp interfaceSupport.hpp jvmtiEnter.hpp interfaceSupport.hpp
...@@ -44,6 +45,7 @@ jvmtiEnter.hpp resourceArea.hpp ...@@ -44,6 +45,7 @@ jvmtiEnter.hpp resourceArea.hpp
jvmtiEnter.hpp systemDictionary.hpp jvmtiEnter.hpp systemDictionary.hpp
jvmtiEnterTrace.cpp jvmtiEnter.hpp jvmtiEnterTrace.cpp jvmtiEnter.hpp
jvmtiEnterTrace.cpp jvmtiRawMonitor.hpp
jvmtiEnterTrace.cpp jvmtiUtil.hpp jvmtiEnterTrace.cpp jvmtiUtil.hpp
jvmtiEnv.cpp arguments.hpp jvmtiEnv.cpp arguments.hpp
...@@ -66,11 +68,11 @@ jvmtiEnv.cpp jvmtiExtensions.hpp ...@@ -66,11 +68,11 @@ jvmtiEnv.cpp jvmtiExtensions.hpp
jvmtiEnv.cpp jvmtiGetLoadedClasses.hpp jvmtiEnv.cpp jvmtiGetLoadedClasses.hpp
jvmtiEnv.cpp jvmtiImpl.hpp jvmtiEnv.cpp jvmtiImpl.hpp
jvmtiEnv.cpp jvmtiManageCapabilities.hpp jvmtiEnv.cpp jvmtiManageCapabilities.hpp
jvmtiEnv.cpp jvmtiRawMonitor.hpp
jvmtiEnv.cpp jvmtiRedefineClasses.hpp jvmtiEnv.cpp jvmtiRedefineClasses.hpp
jvmtiEnv.cpp jvmtiTagMap.hpp jvmtiEnv.cpp jvmtiTagMap.hpp
jvmtiEnv.cpp jvmtiThreadState.inline.hpp jvmtiEnv.cpp jvmtiThreadState.inline.hpp
jvmtiEnv.cpp jvmtiUtil.hpp jvmtiEnv.cpp jvmtiUtil.hpp
jvmtiEnv.cpp objectMonitor.inline.hpp
jvmtiEnv.cpp osThread.hpp jvmtiEnv.cpp osThread.hpp
jvmtiEnv.cpp preserveException.hpp jvmtiEnv.cpp preserveException.hpp
jvmtiEnv.cpp reflectionUtils.hpp jvmtiEnv.cpp reflectionUtils.hpp
...@@ -178,11 +180,13 @@ jvmtiExport.cpp jvmtiEventController.inline.hpp ...@@ -178,11 +180,13 @@ jvmtiExport.cpp jvmtiEventController.inline.hpp
jvmtiExport.cpp jvmtiExport.hpp jvmtiExport.cpp jvmtiExport.hpp
jvmtiExport.cpp jvmtiImpl.hpp jvmtiExport.cpp jvmtiImpl.hpp
jvmtiExport.cpp jvmtiManageCapabilities.hpp jvmtiExport.cpp jvmtiManageCapabilities.hpp
jvmtiExport.cpp jvmtiRawMonitor.hpp
jvmtiExport.cpp jvmtiTagMap.hpp jvmtiExport.cpp jvmtiTagMap.hpp
jvmtiExport.cpp jvmtiThreadState.inline.hpp jvmtiExport.cpp jvmtiThreadState.inline.hpp
jvmtiExport.cpp nmethod.hpp jvmtiExport.cpp nmethod.hpp
jvmtiExport.cpp objArrayKlass.hpp jvmtiExport.cpp objArrayKlass.hpp
jvmtiExport.cpp objArrayOop.hpp jvmtiExport.cpp objArrayOop.hpp
jvmtiExport.cpp objectMonitor.hpp
jvmtiExport.cpp objectMonitor.inline.hpp jvmtiExport.cpp objectMonitor.inline.hpp
jvmtiExport.cpp pcDesc.hpp jvmtiExport.cpp pcDesc.hpp
jvmtiExport.cpp resourceArea.hpp jvmtiExport.cpp resourceArea.hpp
...@@ -210,6 +214,8 @@ jvmtiManageCapabilities.cpp jvmtiManageCapabilities.hpp ...@@ -210,6 +214,8 @@ jvmtiManageCapabilities.cpp jvmtiManageCapabilities.hpp
jvmtiManageCapabilities.hpp allocation.hpp jvmtiManageCapabilities.hpp allocation.hpp
jvmtiManageCapabilities.hpp jvmti.h jvmtiManageCapabilities.hpp jvmti.h
// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
jvmtiRedefineClasses.cpp bitMap.inline.hpp jvmtiRedefineClasses.cpp bitMap.inline.hpp
jvmtiRedefineClasses.cpp codeCache.hpp jvmtiRedefineClasses.cpp codeCache.hpp
jvmtiRedefineClasses.cpp deoptimization.hpp jvmtiRedefineClasses.cpp deoptimization.hpp
......
...@@ -25,26 +25,6 @@ ...@@ -25,26 +25,6 @@
# include "incls/_precompiled.incl" # include "incls/_precompiled.incl"
# include "incls/_jvmtiImpl.cpp.incl" # include "incls/_jvmtiImpl.cpp.incl"
GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
void JvmtiPendingMonitors::transition_raw_monitors() {
assert((Threads::number_of_threads()==1),
"Java thread has not created yet or more than one java thread \
is running. Raw monitor transition will not work");
JavaThread *current_java_thread = JavaThread::current();
assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
{
ThreadBlockInVM __tbivm(current_java_thread);
for(int i=0; i< count(); i++) {
JvmtiRawMonitor *rmonitor = monitors()->at(i);
int r = rmonitor->raw_enter(current_java_thread);
assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
}
}
// pending monitors are converted to real monitor so delete them all.
dispose();
}
// //
// class JvmtiAgentThread // class JvmtiAgentThread
// //
...@@ -216,57 +196,6 @@ void GrowableCache::gc_epilogue() { ...@@ -216,57 +196,6 @@ void GrowableCache::gc_epilogue() {
} }
} }
//
// class JvmtiRawMonitor
//
JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
#ifdef ASSERT
_name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
#else
_name = NULL;
#endif
_magic = JVMTI_RM_MAGIC;
}
JvmtiRawMonitor::~JvmtiRawMonitor() {
#ifdef ASSERT
FreeHeap(_name);
#endif
_magic = 0;
}
bool
JvmtiRawMonitor::is_valid() {
int value = 0;
// This object might not be a JvmtiRawMonitor so we can't assume
// the _magic field is properly aligned. Get the value in a safe
// way and then check against JVMTI_RM_MAGIC.
switch (sizeof(_magic)) {
case 2:
value = Bytes::get_native_u2((address)&_magic);
break;
case 4:
value = Bytes::get_native_u4((address)&_magic);
break;
case 8:
value = Bytes::get_native_u8((address)&_magic);
break;
default:
guarantee(false, "_magic field is an unexpected size");
}
return value == JVMTI_RM_MAGIC;
}
// //
// class JvmtiBreakpoint // class JvmtiBreakpoint
// //
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
// Forward Declarations // Forward Declarations
// //
class JvmtiRawMonitor;
class JvmtiBreakpoint; class JvmtiBreakpoint;
class JvmtiBreakpoints; class JvmtiBreakpoints;
...@@ -327,76 +326,6 @@ bool JvmtiCurrentBreakpoints::is_breakpoint(address bcp) { ...@@ -327,76 +326,6 @@ bool JvmtiCurrentBreakpoints::is_breakpoint(address bcp) {
return false; return false;
} }
///////////////////////////////////////////////////////////////
//
// class JvmtiRawMonitor
//
// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
//
// Wrapper for ObjectMonitor class that saves the Monitor's name
//
class JvmtiRawMonitor : public ObjectMonitor {
private:
int _magic;
char * _name;
// JVMTI_RM_MAGIC is set in contructor and unset in destructor.
enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
public:
JvmtiRawMonitor(const char *name);
~JvmtiRawMonitor();
int magic() { return _magic; }
const char *get_name() { return _name; }
bool is_valid();
};
// Onload pending raw monitors
// Class is used to cache onload or onstart monitor enter
// which will transition into real monitor when
// VM is fully initialized.
class JvmtiPendingMonitors : public AllStatic {
private:
static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
static void dispose() {
delete monitors();
}
public:
static void enter(JvmtiRawMonitor *monitor) {
monitors()->append(monitor);
}
static int count() {
return monitors()->length();
}
static void destroy(JvmtiRawMonitor *monitor) {
while (monitors()->contains(monitor)) {
monitors()->remove(monitor);
}
}
// Return false if monitor is not found in the list.
static bool exit(JvmtiRawMonitor *monitor) {
if (monitors()->contains(monitor)) {
monitors()->remove(monitor);
return true;
} else {
return false;
}
}
static void transition_raw_monitors();
};
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// The get/set local operations must only be done by the VM thread // The get/set local operations must only be done by the VM thread
// because the interpreter version needs to access oop maps, which can // because the interpreter version needs to access oop maps, which can
......
/*
* Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_jvmtiRawMonitor.cpp.incl"
GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
void JvmtiPendingMonitors::transition_raw_monitors() {
assert((Threads::number_of_threads()==1),
"Java thread has not created yet or more than one java thread \
is running. Raw monitor transition will not work");
JavaThread *current_java_thread = JavaThread::current();
assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
{
ThreadBlockInVM __tbivm(current_java_thread);
for(int i=0; i< count(); i++) {
JvmtiRawMonitor *rmonitor = monitors()->at(i);
int r = rmonitor->raw_enter(current_java_thread);
assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
}
}
// pending monitors are converted to real monitor so delete them all.
dispose();
}
//
// class JvmtiRawMonitor
//
JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
#ifdef ASSERT
_name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
#else
_name = NULL;
#endif
_magic = JVMTI_RM_MAGIC;
}
JvmtiRawMonitor::~JvmtiRawMonitor() {
#ifdef ASSERT
FreeHeap(_name);
#endif
_magic = 0;
}
bool
JvmtiRawMonitor::is_valid() {
int value = 0;
// This object might not be a JvmtiRawMonitor so we can't assume
// the _magic field is properly aligned. Get the value in a safe
// way and then check against JVMTI_RM_MAGIC.
switch (sizeof(_magic)) {
case 2:
value = Bytes::get_native_u2((address)&_magic);
break;
case 4:
value = Bytes::get_native_u4((address)&_magic);
break;
case 8:
value = Bytes::get_native_u8((address)&_magic);
break;
default:
guarantee(false, "_magic field is an unexpected size");
}
return value == JVMTI_RM_MAGIC;
}
// -------------------------------------------------------------------------
// The raw monitor subsystem is entirely distinct from normal
// java-synchronization or jni-synchronization. raw monitors are not
// associated with objects. They can be implemented in any manner
// that makes sense. The original implementors decided to piggy-back
// the raw-monitor implementation on the existing Java objectMonitor mechanism.
// This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
// Specifically, we should not implement raw monitors via java monitors.
// Time permitting, we should disentangle and deconvolve the two implementations
// and move the resulting raw monitor implementation over to the JVMTI directories.
// Ideally, the raw monitor implementation would be built on top of
// park-unpark and nothing else.
//
// raw monitors are used mainly by JVMTI
// The raw monitor implementation borrows the ObjectMonitor structure,
// but the operators are degenerate and extremely simple.
//
// Mixed use of a single objectMonitor instance -- as both a raw monitor
// and a normal java monitor -- is not permissible.
//
// Note that we use the single RawMonitor_lock to protect queue operations for
// _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
// is deprecated and rare, this is not of concern. The RawMonitor_lock can not
// be held indefinitely. The critical sections must be short and bounded.
//
// -------------------------------------------------------------------------
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
for (;;) {
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
return OS_OK ;
}
ObjectWaiter Node (Self) ;
Self->_ParkEvent->reset() ; // strictly optional
Node.TState = ObjectWaiter::TS_ENTER ;
RawMonitor_lock->lock_without_safepoint_check() ;
Node._next = _EntryList ;
_EntryList = &Node ;
OrderAccess::fence() ;
if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
_EntryList = Node._next ;
RawMonitor_lock->unlock() ;
return OS_OK ;
}
RawMonitor_lock->unlock() ;
while (Node.TState == ObjectWaiter::TS_ENTER) {
Self->_ParkEvent->park() ;
}
}
}
int JvmtiRawMonitor::SimpleExit (Thread * Self) {
guarantee (_owner == Self, "invariant") ;
OrderAccess::release_store_ptr (&_owner, NULL) ;
OrderAccess::fence() ;
if (_EntryList == NULL) return OS_OK ;
ObjectWaiter * w ;
RawMonitor_lock->lock_without_safepoint_check() ;
w = _EntryList ;
if (w != NULL) {
_EntryList = w->_next ;
}
RawMonitor_lock->unlock() ;
if (w != NULL) {
guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
ParkEvent * ev = w->_event ;
w->TState = ObjectWaiter::TS_RUN ;
OrderAccess::fence() ;
ev->unpark() ;
}
return OS_OK ;
}
int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
guarantee (_owner == Self , "invariant") ;
guarantee (_recursions == 0, "invariant") ;
ObjectWaiter Node (Self) ;
Node._notified = 0 ;
Node.TState = ObjectWaiter::TS_WAIT ;
RawMonitor_lock->lock_without_safepoint_check() ;
Node._next = _WaitSet ;
_WaitSet = &Node ;
RawMonitor_lock->unlock() ;
SimpleExit (Self) ;
guarantee (_owner != Self, "invariant") ;
int ret = OS_OK ;
if (millis <= 0) {
Self->_ParkEvent->park();
} else {
ret = Self->_ParkEvent->park(millis);
}
// If thread still resides on the waitset then unlink it.
// Double-checked locking -- the usage is safe in this context
// as we TState is volatile and the lock-unlock operators are
// serializing (barrier-equivalent).
if (Node.TState == ObjectWaiter::TS_WAIT) {
RawMonitor_lock->lock_without_safepoint_check() ;
if (Node.TState == ObjectWaiter::TS_WAIT) {
// Simple O(n) unlink, but performance isn't critical here.
ObjectWaiter * p ;
ObjectWaiter * q = NULL ;
for (p = _WaitSet ; p != &Node; p = p->_next) {
q = p ;
}
guarantee (p == &Node, "invariant") ;
if (q == NULL) {
guarantee (p == _WaitSet, "invariant") ;
_WaitSet = p->_next ;
} else {
guarantee (p == q->_next, "invariant") ;
q->_next = p->_next ;
}
Node.TState = ObjectWaiter::TS_RUN ;
}
RawMonitor_lock->unlock() ;
}
guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
SimpleEnter (Self) ;
guarantee (_owner == Self, "invariant") ;
guarantee (_recursions == 0, "invariant") ;
return ret ;
}
int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
guarantee (_owner == Self, "invariant") ;
if (_WaitSet == NULL) return OS_OK ;
// We have two options:
// A. Transfer the threads from the WaitSet to the EntryList
// B. Remove the thread from the WaitSet and unpark() it.
//
// We use (B), which is crude and results in lots of futile
// context switching. In particular (B) induces lots of contention.
ParkEvent * ev = NULL ; // consider using a small auto array ...
RawMonitor_lock->lock_without_safepoint_check() ;
for (;;) {
ObjectWaiter * w = _WaitSet ;
if (w == NULL) break ;
_WaitSet = w->_next ;
if (ev != NULL) { ev->unpark(); ev = NULL; }
ev = w->_event ;
OrderAccess::loadstore() ;
w->TState = ObjectWaiter::TS_RUN ;
OrderAccess::storeload();
if (!All) break ;
}
RawMonitor_lock->unlock() ;
if (ev != NULL) ev->unpark();
return OS_OK ;
}
// Any JavaThread will enter here with state _thread_blocked
int JvmtiRawMonitor::raw_enter(TRAPS) {
TEVENT (raw_enter) ;
void * Contended ;
// don't enter raw monitor if thread is being externally suspended, it will
// surprise the suspender if a "suspended" thread can still enter monitor
JavaThread * jt = (JavaThread *)THREAD;
if (THREAD->is_Java_thread()) {
jt->SR_lock()->lock_without_safepoint_check();
while (jt->is_external_suspend()) {
jt->SR_lock()->unlock();
jt->java_suspend_self();
jt->SR_lock()->lock_without_safepoint_check();
}
// guarded by SR_lock to avoid racing with new external suspend requests.
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
jt->SR_lock()->unlock();
} else {
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
}
if (Contended == THREAD) {
_recursions ++ ;
return OM_OK ;
}
if (Contended == NULL) {
guarantee (_owner == THREAD, "invariant") ;
guarantee (_recursions == 0, "invariant") ;
return OM_OK ;
}
THREAD->set_current_pending_monitor(this);
if (!THREAD->is_Java_thread()) {
// No other non-Java threads besides VM thread would acquire
// a raw monitor.
assert(THREAD->is_VM_thread(), "must be VM thread");
SimpleEnter (THREAD) ;
} else {
guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
for (;;) {
jt->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or
// java_suspend_self()
SimpleEnter (THREAD) ;
// were we externally suspended while we were waiting?
if (!jt->handle_special_suspend_equivalent_condition()) break ;
// This thread was externally suspended
//
// This logic isn't needed for JVMTI raw monitors,
// but doesn't hurt just in case the suspend rules change. This
// logic is needed for the JvmtiRawMonitor.wait() reentry phase.
// We have reentered the contended monitor, but while we were
// waiting another thread suspended us. We don't want to reenter
// the monitor while suspended because that would surprise the
// thread that suspended us.
//
// Drop the lock -
SimpleExit (THREAD) ;
jt->java_suspend_self();
}
assert(_owner == THREAD, "Fatal error with monitor owner!");
assert(_recursions == 0, "Fatal error with monitor recursions!");
}
THREAD->set_current_pending_monitor(NULL);
guarantee (_recursions == 0, "invariant") ;
return OM_OK;
}
// Used mainly for JVMTI raw monitor implementation
// Also used for JvmtiRawMonitor::wait().
int JvmtiRawMonitor::raw_exit(TRAPS) {
TEVENT (raw_exit) ;
if (THREAD != _owner) {
return OM_ILLEGAL_MONITOR_STATE;
}
if (_recursions > 0) {
--_recursions ;
return OM_OK ;
}
void * List = _EntryList ;
SimpleExit (THREAD) ;
return OM_OK;
}
// Used for JVMTI raw monitor implementation.
// All JavaThreads will enter here with state _thread_blocked
int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
TEVENT (raw_wait) ;
if (THREAD != _owner) {
return OM_ILLEGAL_MONITOR_STATE;
}
// To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
// The caller must be able to tolerate spurious returns from raw_wait().
THREAD->_ParkEvent->reset() ;
OrderAccess::fence() ;
// check interrupt event
if (interruptible && Thread::is_interrupted(THREAD, true)) {
return OM_INTERRUPTED;
}
intptr_t save = _recursions ;
_recursions = 0 ;
_waiters ++ ;
if (THREAD->is_Java_thread()) {
guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
((JavaThread *)THREAD)->set_suspend_equivalent();
}
int rv = SimpleWait (THREAD, millis) ;
_recursions = save ;
_waiters -- ;
guarantee (THREAD == _owner, "invariant") ;
if (THREAD->is_Java_thread()) {
JavaThread * jSelf = (JavaThread *) THREAD ;
for (;;) {
if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
SimpleExit (THREAD) ;
jSelf->java_suspend_self();
SimpleEnter (THREAD) ;
jSelf->set_suspend_equivalent() ;
}
}
guarantee (THREAD == _owner, "invariant") ;
if (interruptible && Thread::is_interrupted(THREAD, true)) {
return OM_INTERRUPTED;
}
return OM_OK ;
}
int JvmtiRawMonitor::raw_notify(TRAPS) {
TEVENT (raw_notify) ;
if (THREAD != _owner) {
return OM_ILLEGAL_MONITOR_STATE;
}
SimpleNotify (THREAD, false) ;
return OM_OK;
}
int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
TEVENT (raw_notifyAll) ;
if (THREAD != _owner) {
return OM_ILLEGAL_MONITOR_STATE;
}
SimpleNotify (THREAD, true) ;
return OM_OK;
}
/* /*
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -22,3 +21,79 @@ ...@@ -22,3 +21,79 @@
* questions. * questions.
* *
*/ */
//
// class JvmtiRawMonitor
//
// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
//
// Wrapper for ObjectMonitor class that saves the Monitor's name
//
class JvmtiRawMonitor : public ObjectMonitor {
private:
int _magic;
char * _name;
// JVMTI_RM_MAGIC is set in contructor and unset in destructor.
enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
int SimpleEnter (Thread * Self) ;
int SimpleExit (Thread * Self) ;
int SimpleWait (Thread * Self, jlong millis) ;
int SimpleNotify (Thread * Self, bool All) ;
public:
JvmtiRawMonitor(const char *name);
~JvmtiRawMonitor();
int raw_enter(TRAPS);
int raw_exit(TRAPS);
int raw_wait(jlong millis, bool interruptable, TRAPS);
int raw_notify(TRAPS);
int raw_notifyAll(TRAPS);
int magic() { return _magic; }
const char *get_name() { return _name; }
bool is_valid();
};
// Onload pending raw monitors
// Class is used to cache onload or onstart monitor enter
// which will transition into real monitor when
// VM is fully initialized.
class JvmtiPendingMonitors : public AllStatic {
private:
static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
static void dispose() {
delete monitors();
}
public:
static void enter(JvmtiRawMonitor *monitor) {
monitors()->append(monitor);
}
static int count() {
return monitors()->length();
}
static void destroy(JvmtiRawMonitor *monitor) {
while (monitors()->contains(monitor)) {
monitors()->remove(monitor);
}
}
// Return false if monitor is not found in the list.
static bool exit(JvmtiRawMonitor *monitor) {
if (monitors()->contains(monitor)) {
monitors()->remove(monitor);
return true;
} else {
return false;
}
}
static void transition_raw_monitors();
};
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_basicLock.cpp.incl"
void BasicLock::print_on(outputStream* st) const {
st->print("monitor");
}
void BasicLock::move_to(oop obj, BasicLock* dest) {
// Check to see if we need to inflate the lock. This is only needed
// if an object is locked using "this" lightweight monitor. In that
// case, the displaced_header() is unlocked, because the
// displaced_header() contains the header for the originally unlocked
// object. However the object could have already been inflated. But it
// does not matter, the inflation will just a no-op. For other cases,
// the displaced header will be either 0x0 or 0x3, which are location
// independent, therefore the BasicLock is free to move.
//
// During OSR we may need to relocate a BasicLock (which contains a
// displaced word) from a location in an interpreter frame to a
// new location in a compiled frame. "this" refers to the source
// basiclock in the interpreter frame. "dest" refers to the destination
// basiclock in the new compiled frame. We *always* inflate in move_to().
// The always-Inflate policy works properly, but in 1.5.0 it can sometimes
// cause performance problems in code that makes heavy use of a small # of
// uncontended locks. (We'd inflate during OSR, and then sync performance
// would subsequently plummet because the thread would be forced thru the slow-path).
// This problem has been made largely moot on IA32 by inlining the inflated fast-path
// operations in Fast_Lock and Fast_Unlock in i486.ad.
//
// Note that there is a way to safely swing the object's markword from
// one stack location to another. This avoids inflation. Obviously,
// we need to ensure that both locations refer to the current thread's stack.
// There are some subtle concurrency issues, however, and since the benefit is
// is small (given the support for inflated fast-path locking in the fast_lock, etc)
// we'll leave that optimization for another time.
if (displaced_header()->is_neutral()) {
ObjectSynchronizer::inflate_helper(obj);
// WARNING: We can not put check here, because the inflation
// will not update the displaced header. Once BasicLock is inflated,
// no one should ever look at its content.
} else {
// Typically the displaced header will be 0 (recursive stack lock) or
// unused_mark. Naively we'd like to assert that the displaced mark
// value is either 0, neutral, or 3. But with the advent of the
// store-before-CAS avoidance in fast_lock/compiler_lock_object
// we can find any flavor mark in the displaced mark.
}
// [RGV] The next line appears to do nothing!
intptr_t dh = (intptr_t) displaced_header();
dest->set_displaced_header(displaced_header());
}
/* /*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -22,4 +22,51 @@ ...@@ -22,4 +22,51 @@
* *
*/ */
class BasicLock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private: private:
volatile markOop _displaced_header;
public:
markOop displaced_header() const { return _displaced_header; }
void set_displaced_header(markOop header) { _displaced_header = header; }
void print_on(outputStream* st) const;
// move a basic lock (used during deoptimization
void move_to(oop obj, BasicLock* dest);
static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
};
// A BasicObjectLock associates a specific Java object with a BasicLock.
// It is currently embedded in an interpreter frame.
// Because some machines have alignment restrictions on the control stack,
// the actual space allocated by the interpreter may include padding words
// after the end of the BasicObjectLock. Also, in order to guarantee
// alignment of the embedded BasicLock objects on such machines, we
// put the embedded BasicLock at the beginning of the struct.
class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
BasicLock _lock; // the lock, must be double word aligned
oop _obj; // object holds the lock;
public:
// Manipulation
oop obj() const { return _obj; }
void set_obj(oop obj) { _obj = obj; }
BasicLock* lock() { return &_lock; }
// Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
// in interpreter activation frames since it includes machine-specific padding.
static int size() { return sizeof(BasicObjectLock)/wordSize; }
// GC support
void oops_do(OopClosure* f) { f->do_oop(&_obj); }
static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
};
...@@ -265,48 +265,3 @@ class Mutex : public Monitor { // degenerate Monitor ...@@ -265,48 +265,3 @@ class Mutex : public Monitor { // degenerate Monitor
} }
}; };
/*
* Per-thread blocking support for JSR166. See the Java-level
* Documentation for rationale. Basically, park acts like wait, unpark
* like notify.
*
* 6271289 --
* To avoid errors where an os thread expires but the JavaThread still
* exists, Parkers are immortal (type-stable) and are recycled across
* new threads. This parallels the ParkEvent implementation.
* Because park-unpark allow spurious wakeups it is harmless if an
* unpark call unparks a new thread using the old Parker reference.
*
* In the future we'll want to think about eliminating Parker and using
* ParkEvent instead. There's considerable duplication between the two
* services.
*
*/
class Parker : public os::PlatformParker {
private:
volatile int _counter ;
Parker * FreeNext ;
JavaThread * AssociatedWith ; // Current association
public:
Parker() : PlatformParker() {
_counter = 0 ;
FreeNext = NULL ;
AssociatedWith = NULL ;
}
protected:
~Parker() { ShouldNotReachHere(); }
public:
// For simplicity of interface with Java, all forms of park (indefinite,
// relative, and absolute) are multiplexed into one call.
void park(bool isAbsolute, jlong time);
void unpark();
// Lifecycle operators
static Parker * Allocate (JavaThread * t) ;
static void Release (Parker * e) ;
private:
static Parker * volatile FreeList ;
static volatile int ListLock ;
};
此差异已折叠。
...@@ -22,6 +22,32 @@ ...@@ -22,6 +22,32 @@
* *
*/ */
// ObjectWaiter serves as a "proxy" or surrogate thread.
// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
// ParkEvent instead. Beware, however, that the JVMTI code
// knows about ObjectWaiters, so we'll have to reconcile that code.
// See next_waiter(), first_waiter(), etc.
class ObjectWaiter : public StackObj {
public:
enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
enum Sorted { PREPEND, APPEND, SORTED } ;
ObjectWaiter * volatile _next;
ObjectWaiter * volatile _prev;
Thread* _thread;
ParkEvent * _event;
volatile int _notified ;
volatile TStates TState ;
Sorted _Sorted ; // List placement disposition
bool _active ; // Contention monitoring is enabled
public:
ObjectWaiter(Thread* thread);
void wait_reenter_begin(ObjectMonitor *mon);
void wait_reenter_end(ObjectMonitor *mon);
};
// WARNING: // WARNING:
// This is a very sensitive and fragile class. DO NOT make any // This is a very sensitive and fragile class. DO NOT make any
// change unless you are fully aware of the underlying semantics. // change unless you are fully aware of the underlying semantics.
...@@ -38,8 +64,6 @@ ...@@ -38,8 +64,6 @@
// It is also used as RawMonitor by the JVMTI // It is also used as RawMonitor by the JVMTI
class ObjectWaiter;
class ObjectMonitor { class ObjectMonitor {
public: public:
enum { enum {
...@@ -74,13 +98,16 @@ class ObjectMonitor { ...@@ -74,13 +98,16 @@ class ObjectMonitor {
public: public:
ObjectMonitor();
~ObjectMonitor();
markOop header() const; markOop header() const;
void set_header(markOop hdr); void set_header(markOop hdr);
intptr_t is_busy() const; intptr_t is_busy() const {
// TODO-FIXME: merge _count and _waiters.
// TODO-FIXME: assert _owner == null implies _recursions = 0
// TODO-FIXME: assert _WaitSet != null implies _count > 0
return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
}
intptr_t is_entered(Thread* current) const; intptr_t is_entered(Thread* current) const;
void* owner() const; void* owner() const;
...@@ -91,13 +118,58 @@ class ObjectMonitor { ...@@ -91,13 +118,58 @@ class ObjectMonitor {
intptr_t count() const; intptr_t count() const;
void set_count(intptr_t count); void set_count(intptr_t count);
intptr_t contentions() const ; intptr_t contentions() const ;
intptr_t recursions() const { return _recursions; }
// JVM/DI GetMonitorInfo() needs this // JVM/DI GetMonitorInfo() needs this
Thread * thread_of_waiter (ObjectWaiter *) ; ObjectWaiter* first_waiter() { return _WaitSet; }
ObjectWaiter * first_waiter () ; ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
ObjectWaiter * next_waiter(ObjectWaiter* o); Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
intptr_t recursions() const { return _recursions; } // initialize the monitor, exception the semaphore, all other fields
// are simple integers or pointers
ObjectMonitor() {
_header = NULL;
_count = 0;
_waiters = 0,
_recursions = 0;
_object = NULL;
_owner = NULL;
_WaitSet = NULL;
_WaitSetLock = 0 ;
_Responsible = NULL ;
_succ = NULL ;
_cxq = NULL ;
FreeNext = NULL ;
_EntryList = NULL ;
_SpinFreq = 0 ;
_SpinClock = 0 ;
OwnerIsThread = 0 ;
}
~ObjectMonitor() {
// TODO: Add asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
// _count == 0 _EntryList == NULL etc
}
private:
void Recycle () {
// TODO: add stronger asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
// _count == 0 EntryList == NULL
// _recursions == 0 _WaitSet == NULL
// TODO: assert (is_busy()|_recursions) == 0
_succ = NULL ;
_EntryList = NULL ;
_cxq = NULL ;
_WaitSet = NULL ;
_recursions = 0 ;
_SpinFreq = 0 ;
_SpinClock = 0 ;
OwnerIsThread = 0 ;
}
public:
void* object() const; void* object() const;
void* object_addr(); void* object_addr();
...@@ -122,22 +194,9 @@ class ObjectMonitor { ...@@ -122,22 +194,9 @@ class ObjectMonitor {
intptr_t complete_exit(TRAPS); intptr_t complete_exit(TRAPS);
void reenter(intptr_t recursions, TRAPS); void reenter(intptr_t recursions, TRAPS);
int raw_enter(TRAPS);
int raw_exit(TRAPS);
int raw_wait(jlong millis, bool interruptable, TRAPS);
int raw_notify(TRAPS);
int raw_notifyAll(TRAPS);
private: private:
// JVMTI support -- remove ASAP
int SimpleEnter (Thread * Self) ;
int SimpleExit (Thread * Self) ;
int SimpleWait (Thread * Self, jlong millis) ;
int SimpleNotify (Thread * Self, bool All) ;
private:
void Recycle () ;
void AddWaiter (ObjectWaiter * waiter) ; void AddWaiter (ObjectWaiter * waiter) ;
static void DeferredInitialize();
ObjectWaiter * DequeueWaiter () ; ObjectWaiter * DequeueWaiter () ;
void DequeueSpecificWaiter (ObjectWaiter * waiter) ; void DequeueSpecificWaiter (ObjectWaiter * waiter) ;
...@@ -172,13 +231,17 @@ class ObjectMonitor { ...@@ -172,13 +231,17 @@ class ObjectMonitor {
// The VM assumes write ordering wrt these fields, which can be // The VM assumes write ordering wrt these fields, which can be
// read from other threads. // read from other threads.
protected: // protected for jvmtiRawMonitor
void * volatile _owner; // pointer to owning thread OR BasicLock void * volatile _owner; // pointer to owning thread OR BasicLock
volatile intptr_t _recursions; // recursion count, 0 for first entry volatile intptr_t _recursions; // recursion count, 0 for first entry
private:
int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry. ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry.
// The list is actually composed of WaitNodes, acting // The list is actually composed of WaitNodes, acting
// as proxies for Threads. // as proxies for Threads.
protected:
ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry. ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry.
private:
Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling
Thread * volatile _Responsible ; Thread * volatile _Responsible ;
int _PromptDrain ; // rqst to drain cxq into EntryList ASAP int _PromptDrain ; // rqst to drain cxq into EntryList ASAP
...@@ -196,8 +259,12 @@ class ObjectMonitor { ...@@ -196,8 +259,12 @@ class ObjectMonitor {
volatile intptr_t _count; // reference count to prevent reclaimation/deflation volatile intptr_t _count; // reference count to prevent reclaimation/deflation
// at stop-the-world time. See deflate_idle_monitors(). // at stop-the-world time. See deflate_idle_monitors().
// _count is approximately |_WaitSet| + |_EntryList| // _count is approximately |_WaitSet| + |_EntryList|
protected:
volatile intptr_t _waiters; // number of waiting threads volatile intptr_t _waiters; // number of waiting threads
private:
protected:
ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
private:
volatile int _WaitSetLock; // protects Wait Queue - simple spinlock volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
public: public:
...@@ -205,4 +272,37 @@ class ObjectMonitor { ...@@ -205,4 +272,37 @@ class ObjectMonitor {
ObjectMonitor * FreeNext ; // Free list linkage ObjectMonitor * FreeNext ; // Free list linkage
intptr_t StatA, StatsB ; intptr_t StatA, StatsB ;
public:
static void Initialize () ;
static PerfCounter * _sync_ContendedLockAttempts ;
static PerfCounter * _sync_FutileWakeups ;
static PerfCounter * _sync_Parks ;
static PerfCounter * _sync_EmptyNotifications ;
static PerfCounter * _sync_Notifications ;
static PerfCounter * _sync_SlowEnter ;
static PerfCounter * _sync_SlowExit ;
static PerfCounter * _sync_SlowNotify ;
static PerfCounter * _sync_SlowNotifyAll ;
static PerfCounter * _sync_FailedSpins ;
static PerfCounter * _sync_SuccessfulSpins ;
static PerfCounter * _sync_PrivateA ;
static PerfCounter * _sync_PrivateB ;
static PerfCounter * _sync_MonInCirculation ;
static PerfCounter * _sync_MonScavenged ;
static PerfCounter * _sync_Inflations ;
static PerfCounter * _sync_Deflations ;
static PerfLongVariable * _sync_MonExtant ;
public:
static int Knob_Verbose;
static int Knob_SpinLimit;
}; };
#undef TEVENT
#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
#undef TEVENT
#define TEVENT(nom) {;}
...@@ -104,7 +104,3 @@ inline void ObjectMonitor::set_owner(void* owner) { ...@@ -104,7 +104,3 @@ inline void ObjectMonitor::set_owner(void* owner) {
_count = 0; _count = 0;
} }
// here are the platform-dependent bodies:
# include "incls/_objectMonitor_pd.inline.hpp.incl"
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_park.cpp.incl"
// Lifecycle management for TSM ParkEvents.
// ParkEvents are type-stable (TSM).
// In our particular implementation they happen to be immortal.
//
// We manage concurrency on the FreeList with a CAS-based
// detach-modify-reattach idiom that avoids the ABA problems
// that would otherwise be present in a simple CAS-based
// push-pop implementation. (push-one and pop-all)
//
// Caveat: Allocate() and Release() may be called from threads
// other than the thread associated with the Event!
// If we need to call Allocate() when running as the thread in
// question then look for the PD calls to initialize native TLS.
// Native TLS (Win32/Linux/Solaris) can only be initialized or
// accessed by the associated thread.
// See also pd_initialize().
//
// Note that we could defer associating a ParkEvent with a thread
// until the 1st time the thread calls park(). unpark() calls to
// an unprovisioned thread would be ignored. The first park() call
// for a thread would allocate and associate a ParkEvent and return
// immediately.
volatile int ParkEvent::ListLock = 0 ;
ParkEvent * volatile ParkEvent::FreeList = NULL ;
ParkEvent * ParkEvent::Allocate (Thread * t) {
// In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
ParkEvent * ev ;
// Start by trying to recycle an existing but unassociated
// ParkEvent from the global free list.
for (;;) {
ev = FreeList ;
if (ev == NULL) break ;
// 1: Detach - sequester or privatize the list
// Tantamount to ev = Swap (&FreeList, NULL)
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
continue ;
}
// We've detached the list. The list in-hand is now
// local to this thread. This thread can operate on the
// list without risk of interference from other threads.
// 2: Extract -- pop the 1st element from the list.
ParkEvent * List = ev->FreeNext ;
if (List == NULL) break ;
for (;;) {
// 3: Try to reattach the residual list
guarantee (List != NULL, "invariant") ;
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
if (Arv == NULL) break ;
// New nodes arrived. Try to detach the recent arrivals.
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
continue ;
}
guarantee (Arv != NULL, "invariant") ;
// 4: Merge Arv into List
ParkEvent * Tail = List ;
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
Tail->FreeNext = Arv ;
}
break ;
}
if (ev != NULL) {
guarantee (ev->AssociatedWith == NULL, "invariant") ;
} else {
// Do this the hard way -- materialize a new ParkEvent.
// In rare cases an allocating thread might detach a long list --
// installing null into FreeList -- and then stall or be obstructed.
// A 2nd thread calling Allocate() would see FreeList == null.
// The list held privately by the 1st thread is unavailable to the 2nd thread.
// In that case the 2nd thread would have to materialize a new ParkEvent,
// even though free ParkEvents existed in the system. In this case we end up
// with more ParkEvents in circulation than we need, but the race is
// rare and the outcome is benign. Ideally, the # of extant ParkEvents
// is equal to the maximum # of threads that existed at any one time.
// Because of the race mentioned above, segments of the freelist
// can be transiently inaccessible. At worst we may end up with the
// # of ParkEvents in circulation slightly above the ideal.
// Note that if we didn't have the TSM/immortal constraint, then
// when reattaching, above, we could trim the list.
ev = new ParkEvent () ;
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
}
ev->reset() ; // courtesy to caller
ev->AssociatedWith = t ; // Associate ev with t
ev->FreeNext = NULL ;
return ev ;
}
void ParkEvent::Release (ParkEvent * ev) {
if (ev == NULL) return ;
guarantee (ev->FreeNext == NULL , "invariant") ;
ev->AssociatedWith = NULL ;
for (;;) {
// Push ev onto FreeList
// The mechanism is "half" lock-free.
ParkEvent * List = FreeList ;
ev->FreeNext = List ;
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
}
}
// Override operator new and delete so we can ensure that the
// least significant byte of ParkEvent addresses is 0.
// Beware that excessive address alignment is undesirable
// as it can result in D$ index usage imbalance as
// well as bank access imbalance on Niagara-like platforms,
// although Niagara's hash function should help.
void * ParkEvent::operator new (size_t sz) {
return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
}
void ParkEvent::operator delete (void * a) {
// ParkEvents are type-stable and immortal ...
ShouldNotReachHere();
}
// 6399321 As a temporary measure we copied & modified the ParkEvent::
// allocate() and release() code for use by Parkers. The Parker:: forms
// will eventually be removed as we consolide and shift over to ParkEvents
// for both builtin synchronization and JSR166 operations.
volatile int Parker::ListLock = 0 ;
Parker * volatile Parker::FreeList = NULL ;
Parker * Parker::Allocate (JavaThread * t) {
guarantee (t != NULL, "invariant") ;
Parker * p ;
// Start by trying to recycle an existing but unassociated
// Parker from the global free list.
for (;;) {
p = FreeList ;
if (p == NULL) break ;
// 1: Detach
// Tantamount to p = Swap (&FreeList, NULL)
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
continue ;
}
// We've detached the list. The list in-hand is now
// local to this thread. This thread can operate on the
// list without risk of interference from other threads.
// 2: Extract -- pop the 1st element from the list.
Parker * List = p->FreeNext ;
if (List == NULL) break ;
for (;;) {
// 3: Try to reattach the residual list
guarantee (List != NULL, "invariant") ;
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
if (Arv == NULL) break ;
// New nodes arrived. Try to detach the recent arrivals.
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
continue ;
}
guarantee (Arv != NULL, "invariant") ;
// 4: Merge Arv into List
Parker * Tail = List ;
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
Tail->FreeNext = Arv ;
}
break ;
}
if (p != NULL) {
guarantee (p->AssociatedWith == NULL, "invariant") ;
} else {
// Do this the hard way -- materialize a new Parker..
// In rare cases an allocating thread might detach
// a long list -- installing null into FreeList --and
// then stall. Another thread calling Allocate() would see
// FreeList == null and then invoke the ctor. In this case we
// end up with more Parkers in circulation than we need, but
// the race is rare and the outcome is benign.
// Ideally, the # of extant Parkers is equal to the
// maximum # of threads that existed at any one time.
// Because of the race mentioned above, segments of the
// freelist can be transiently inaccessible. At worst
// we may end up with the # of Parkers in circulation
// slightly above the ideal.
p = new Parker() ;
}
p->AssociatedWith = t ; // Associate p with t
p->FreeNext = NULL ;
return p ;
}
void Parker::Release (Parker * p) {
if (p == NULL) return ;
guarantee (p->AssociatedWith != NULL, "invariant") ;
guarantee (p->FreeNext == NULL , "invariant") ;
p->AssociatedWith = NULL ;
for (;;) {
// Push p onto FreeList
Parker * List = FreeList ;
p->FreeNext = List ;
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
}
}
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* Per-thread blocking support for JSR166. See the Java-level
* Documentation for rationale. Basically, park acts like wait, unpark
* like notify.
*
* 6271289 --
* To avoid errors where an os thread expires but the JavaThread still
* exists, Parkers are immortal (type-stable) and are recycled across
* new threads. This parallels the ParkEvent implementation.
* Because park-unpark allow spurious wakeups it is harmless if an
* unpark call unparks a new thread using the old Parker reference.
*
* In the future we'll want to think about eliminating Parker and using
* ParkEvent instead. There's considerable duplication between the two
* services.
*
*/
class Parker : public os::PlatformParker {
private:
volatile int _counter ;
Parker * FreeNext ;
JavaThread * AssociatedWith ; // Current association
public:
Parker() : PlatformParker() {
_counter = 0 ;
FreeNext = NULL ;
AssociatedWith = NULL ;
}
protected:
~Parker() { ShouldNotReachHere(); }
public:
// For simplicity of interface with Java, all forms of park (indefinite,
// relative, and absolute) are multiplexed into one call.
void park(bool isAbsolute, jlong time);
void unpark();
// Lifecycle operators
static Parker * Allocate (JavaThread * t) ;
static void Release (Parker * e) ;
private:
static Parker * volatile FreeList ;
static volatile int ListLock ;
};
/////////////////////////////////////////////////////////////
//
// ParkEvents are type-stable and immortal.
//
// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
// associated with the thread for the thread's entire lifetime - the relationship is
// stable. A thread will be associated at most one ParkEvent. When the thread
// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from
// the EventFreeList before creating a new Event. Type-stability frees us from
// worrying about stale Event or Thread references in the objectMonitor subsystem.
// (A reference to ParkEvent is always valid, even though the event may no longer be associated
// with the desired or expected thread. A key aspect of this design is that the callers of
// park, unpark, etc must tolerate stale references and spurious wakeups).
//
// Only the "associated" thread can block (park) on the ParkEvent, although
// any other thread can unpark a reachable parkevent. Park() is allowed to
// return spuriously. In fact park-unpark a really just an optimization to
// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
// A degenerate albeit "impolite" park-unpark implementation could simply return.
// See http://blogs.sun.com/dave for more details.
//
// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
// thread proxies, and simply make the THREAD structure type-stable and persistent.
// Currently, we unpark events associated with threads, but ideally we'd just
// unpark threads.
//
// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
// platform-independent. PlatformEvent provides park(), unpark(), etc., and
// is abstract -- that is, a PlatformEvent should never be instantiated except
// as part of a ParkEvent.
// Equivalently we could have defined a platform-independent base-class that
// exported Allocate(), Release(), etc. The platform-specific class would extend
// that base-class, adding park(), unpark(), etc.
//
// A word of caution: The JVM uses 2 very similar constructs:
// 1. ParkEvent are used for Java-level "monitor" synchronization.
// 2. Parkers are used by JSR166-JUC park-unpark.
//
// We'll want to eventually merge these redundant facilities and use ParkEvent.
class ParkEvent : public os::PlatformEvent {
private:
ParkEvent * FreeNext ;
// Current association
Thread * AssociatedWith ;
intptr_t RawThreadIdentity ; // LWPID etc
volatile int Incarnation ;
// diagnostic : keep track of last thread to wake this thread.
// this is useful for construction of dependency graphs.
void * LastWaker ;
public:
// MCS-CLH list linkage and Native Mutex/Monitor
ParkEvent * volatile ListNext ;
ParkEvent * volatile ListPrev ;
volatile intptr_t OnList ;
volatile int TState ;
volatile int Notified ; // for native monitor construct
volatile int IsWaiting ; // Enqueued on WaitSet
private:
static ParkEvent * volatile FreeList ;
static volatile int ListLock ;
// It's prudent to mark the dtor as "private"
// ensuring that it's not visible outside the package.
// Unfortunately gcc warns about such usage, so
// we revert to the less desirable "protected" visibility.
// The other compilers accept private dtors.
protected: // Ensure dtor is never invoked
~ParkEvent() { guarantee (0, "invariant") ; }
ParkEvent() : PlatformEvent() {
AssociatedWith = NULL ;
FreeNext = NULL ;
ListNext = NULL ;
ListPrev = NULL ;
OnList = 0 ;
TState = 0 ;
Notified = 0 ;
IsWaiting = 0 ;
}
// We use placement-new to force ParkEvent instances to be
// aligned on 256-byte address boundaries. This ensures that the least
// significant byte of a ParkEvent address is always 0.
void * operator new (size_t sz) ;
void operator delete (void * a) ;
public:
static ParkEvent * Allocate (Thread * t) ;
static void Release (ParkEvent * e) ;
} ;
...@@ -22,53 +22,6 @@ ...@@ -22,53 +22,6 @@
* *
*/ */
class BasicLock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
volatile markOop _displaced_header;
public:
markOop displaced_header() const { return _displaced_header; }
void set_displaced_header(markOop header) { _displaced_header = header; }
void print_on(outputStream* st) const;
// move a basic lock (used during deoptimization
void move_to(oop obj, BasicLock* dest);
static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
};
// A BasicObjectLock associates a specific Java object with a BasicLock.
// It is currently embedded in an interpreter frame.
// Because some machines have alignment restrictions on the control stack,
// the actual space allocated by the interpreter may include padding words
// after the end of the BasicObjectLock. Also, in order to guarantee
// alignment of the embedded BasicLock objects on such machines, we
// put the embedded BasicLock at the beginning of the struct.
class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
BasicLock _lock; // the lock, must be double word aligned
oop _obj; // object holds the lock;
public:
// Manipulation
oop obj() const { return _obj; }
void set_obj(oop obj) { _obj = obj; }
BasicLock* lock() { return &_lock; }
// Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
// in interpreter activation frames since it includes machine-specific padding.
static int size() { return sizeof(BasicObjectLock)/wordSize; }
// GC support
void oops_do(OopClosure* f) { f->do_oop(&_obj); }
static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
};
class ObjectMonitor; class ObjectMonitor;
...@@ -163,6 +116,8 @@ class ObjectSynchronizer : AllStatic { ...@@ -163,6 +116,8 @@ class ObjectSynchronizer : AllStatic {
static void verify() PRODUCT_RETURN; static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
private: private:
enum { _BLOCKSIZE = 128 }; enum { _BLOCKSIZE = 128 };
static ObjectMonitor* gBlockList; static ObjectMonitor* gBlockList;
...@@ -170,30 +125,6 @@ class ObjectSynchronizer : AllStatic { ...@@ -170,30 +125,6 @@ class ObjectSynchronizer : AllStatic {
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
static int gOmInUseCount; static int gOmInUseCount;
public:
static void Initialize () ;
static PerfCounter * _sync_ContendedLockAttempts ;
static PerfCounter * _sync_FutileWakeups ;
static PerfCounter * _sync_Parks ;
static PerfCounter * _sync_EmptyNotifications ;
static PerfCounter * _sync_Notifications ;
static PerfCounter * _sync_SlowEnter ;
static PerfCounter * _sync_SlowExit ;
static PerfCounter * _sync_SlowNotify ;
static PerfCounter * _sync_SlowNotifyAll ;
static PerfCounter * _sync_FailedSpins ;
static PerfCounter * _sync_SuccessfulSpins ;
static PerfCounter * _sync_PrivateA ;
static PerfCounter * _sync_PrivateB ;
static PerfCounter * _sync_MonInCirculation ;
static PerfCounter * _sync_MonScavenged ;
static PerfCounter * _sync_Inflations ;
static PerfCounter * _sync_Deflations ;
static PerfLongVariable * _sync_MonExtant ;
public:
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
}; };
// ObjectLocker enforced balanced locking and can never thrown an // ObjectLocker enforced balanced locking and can never thrown an
......
...@@ -2995,8 +2995,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { ...@@ -2995,8 +2995,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// crash Linux VM, see notes in os_linux.cpp. // crash Linux VM, see notes in os_linux.cpp.
main_thread->create_stack_guard_pages(); main_thread->create_stack_guard_pages();
// Initialize Java-Leve synchronization subsystem // Initialize Java-Level synchronization subsystem
ObjectSynchronizer::Initialize() ; ObjectMonitor::Initialize() ;
// Initialize global modules // Initialize global modules
jint status = init_globals(); jint status = init_globals();
...@@ -3965,215 +3965,272 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int b ...@@ -3965,215 +3965,272 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int b
} }
} }
// Internal SpinLock and Mutex
// Based on ParkEvent
// Lifecycle management for TSM ParkEvents. // Ad-hoc mutual exclusion primitives: SpinLock and Mux
// ParkEvents are type-stable (TSM).
// In our particular implementation they happen to be immortal.
// //
// We manage concurrency on the FreeList with a CAS-based // We employ SpinLocks _only for low-contention, fixed-length
// detach-modify-reattach idiom that avoids the ABA problems // short-duration critical sections where we're concerned
// that would otherwise be present in a simple CAS-based // about native mutex_t or HotSpot Mutex:: latency.
// push-pop implementation. (push-one and pop-all) // The mux construct provides a spin-then-block mutual exclusion
// mechanism.
// //
// Caveat: Allocate() and Release() may be called from threads // Testing has shown that contention on the ListLock guarding gFreeList
// other than the thread associated with the Event! // is common. If we implement ListLock as a simple SpinLock it's common
// If we need to call Allocate() when running as the thread in // for the JVM to devolve to yielding with little progress. This is true
// question then look for the PD calls to initialize native TLS. // despite the fact that the critical sections protected by ListLock are
// Native TLS (Win32/Linux/Solaris) can only be initialized or // extremely short.
// accessed by the associated thread.
// See also pd_initialize().
// //
// Note that we could defer associating a ParkEvent with a thread // TODO-FIXME: ListLock should be of type SpinLock.
// until the 1st time the thread calls park(). unpark() calls to // We should make this a 1st-class type, integrated into the lock
// an unprovisioned thread would be ignored. The first park() call // hierarchy as leaf-locks. Critically, the SpinLock structure
// for a thread would allocate and associate a ParkEvent and return // should have sufficient padding to avoid false-sharing and excessive
// immediately. // cache-coherency traffic.
volatile int ParkEvent::ListLock = 0 ;
ParkEvent * volatile ParkEvent::FreeList = NULL ;
ParkEvent * ParkEvent::Allocate (Thread * t) { typedef volatile int SpinLockT ;
// In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
ParkEvent * ev ;
// Start by trying to recycle an existing but unassociated void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
// ParkEvent from the global free list. if (Atomic::cmpxchg (1, adr, 0) == 0) {
for (;;) { return ; // normal fast-path return
ev = FreeList ;
if (ev == NULL) break ;
// 1: Detach - sequester or privatize the list
// Tantamount to ev = Swap (&FreeList, NULL)
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
continue ;
}
// We've detached the list. The list in-hand is now
// local to this thread. This thread can operate on the
// list without risk of interference from other threads.
// 2: Extract -- pop the 1st element from the list.
ParkEvent * List = ev->FreeNext ;
if (List == NULL) break ;
for (;;) {
// 3: Try to reattach the residual list
guarantee (List != NULL, "invariant") ;
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
if (Arv == NULL) break ;
// New nodes arrived. Try to detach the recent arrivals.
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
continue ;
}
guarantee (Arv != NULL, "invariant") ;
// 4: Merge Arv into List
ParkEvent * Tail = List ;
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
Tail->FreeNext = Arv ;
}
break ;
} }
if (ev != NULL) { // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
guarantee (ev->AssociatedWith == NULL, "invariant") ; TEVENT (SpinAcquire - ctx) ;
} else { int ctr = 0 ;
// Do this the hard way -- materialize a new ParkEvent. int Yields = 0 ;
// In rare cases an allocating thread might detach a long list --
// installing null into FreeList -- and then stall or be obstructed.
// A 2nd thread calling Allocate() would see FreeList == null.
// The list held privately by the 1st thread is unavailable to the 2nd thread.
// In that case the 2nd thread would have to materialize a new ParkEvent,
// even though free ParkEvents existed in the system. In this case we end up
// with more ParkEvents in circulation than we need, but the race is
// rare and the outcome is benign. Ideally, the # of extant ParkEvents
// is equal to the maximum # of threads that existed at any one time.
// Because of the race mentioned above, segments of the freelist
// can be transiently inaccessible. At worst we may end up with the
// # of ParkEvents in circulation slightly above the ideal.
// Note that if we didn't have the TSM/immortal constraint, then
// when reattaching, above, we could trim the list.
ev = new ParkEvent () ;
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
}
ev->reset() ; // courtesy to caller
ev->AssociatedWith = t ; // Associate ev with t
ev->FreeNext = NULL ;
return ev ;
}
void ParkEvent::Release (ParkEvent * ev) {
if (ev == NULL) return ;
guarantee (ev->FreeNext == NULL , "invariant") ;
ev->AssociatedWith = NULL ;
for (;;) { for (;;) {
// Push ev onto FreeList while (*adr != 0) {
// The mechanism is "half" lock-free. ++ctr ;
ParkEvent * List = FreeList ; if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
ev->FreeNext = List ; if (Yields > 5) {
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; // Consider using a simple NakedSleep() instead.
// Then SpinAcquire could be called by non-JVM threads
Thread::current()->_ParkEvent->park(1) ;
} else {
os::NakedYield() ;
++Yields ;
}
} else {
SpinPause() ;
}
}
if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
} }
} }
// Override operator new and delete so we can ensure that the void Thread::SpinRelease (volatile int * adr) {
// least significant byte of ParkEvent addresses is 0. assert (*adr != 0, "invariant") ;
// Beware that excessive address alignment is undesirable OrderAccess::fence() ; // guarantee at least release consistency.
// as it can result in D$ index usage imbalance as // Roach-motel semantics.
// well as bank access imbalance on Niagara-like platforms, // It's safe if subsequent LDs and STs float "up" into the critical section,
// although Niagara's hash function should help. // but prior LDs and STs within the critical section can't be allowed
// to reorder or float past the ST that releases the lock.
void * ParkEvent::operator new (size_t sz) { *adr = 0 ;
return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
} }
void ParkEvent::operator delete (void * a) { // muxAcquire and muxRelease:
// ParkEvents are type-stable and immortal ... //
ShouldNotReachHere(); // * muxAcquire and muxRelease support a single-word lock-word construct.
} // The LSB of the word is set IFF the lock is held.
// The remainder of the word points to the head of a singly-linked list
// of threads blocked on the lock.
//
// * The current implementation of muxAcquire-muxRelease uses its own
// dedicated Thread._MuxEvent instance. If we're interested in
// minimizing the peak number of extant ParkEvent instances then
// we could eliminate _MuxEvent and "borrow" _ParkEvent as long
// as certain invariants were satisfied. Specifically, care would need
// to be taken with regards to consuming unpark() "permits".
// A safe rule of thumb is that a thread would never call muxAcquire()
// if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
// park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
// consume an unpark() permit intended for monitorenter, for instance.
// One way around this would be to widen the restricted-range semaphore
// implemented in park(). Another alternative would be to provide
// multiple instances of the PlatformEvent() for each thread. One
// instance would be dedicated to muxAcquire-muxRelease, for instance.
//
// * Usage:
// -- Only as leaf locks
// -- for short-term locking only as muxAcquire does not perform
// thread state transitions.
//
// Alternatives:
// * We could implement muxAcquire and muxRelease with MCS or CLH locks
// but with parking or spin-then-park instead of pure spinning.
// * Use Taura-Oyama-Yonenzawa locks.
// * It's possible to construct a 1-0 lock if we encode the lockword as
// (List,LockByte). Acquire will CAS the full lockword while Release
// will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
// acquiring threads use timers (ParkTimed) to detect and recover from
// the stranding window. Thread/Node structures must be aligned on 256-byte
// boundaries by using placement-new.
// * Augment MCS with advisory back-link fields maintained with CAS().
// Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
// The validity of the backlinks must be ratified before we trust the value.
// If the backlinks are invalid the exiting thread must back-track through the
// the forward links, which are always trustworthy.
// * Add a successor indication. The LockWord is currently encoded as
// (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
// to provide the usual futile-wakeup optimization.
// See RTStt for details.
// * Consider schedctl.sc_nopreempt to cover the critical section.
//
typedef volatile intptr_t MutexT ; // Mux Lock-word
enum MuxBits { LOCKBIT = 1 } ;
void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
if (w == 0) return ;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
}
// 6399321 As a temporary measure we copied & modified the ParkEvent:: TEVENT (muxAcquire - Contention) ;
// allocate() and release() code for use by Parkers. The Parker:: forms ParkEvent * const Self = Thread::current()->_MuxEvent ;
// will eventually be removed as we consolide and shift over to ParkEvents assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
// for both builtin synchronization and JSR166 operations. for (;;) {
int its = (os::is_MP() ? 100 : 0) + 1 ;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock ;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
}
}
Self->reset() ;
Self->OnList = intptr_t(Lock) ;
// The following fence() isn't _strictly necessary as the subsequent
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock ;
if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0 ; // hygiene - allows stronger asserts
return ;
}
continue ; // Interference -- *Lock changed -- Just retry
}
assert (w & LOCKBIT, "invariant") ;
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
}
volatile int Parker::ListLock = 0 ; while (Self->OnList != 0) {
Parker * volatile Parker::FreeList = NULL ; Self->park() ;
}
}
}
Parker * Parker::Allocate (JavaThread * t) { void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
guarantee (t != NULL, "invariant") ; intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
Parker * p ; if (w == 0) return ;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
}
// Start by trying to recycle an existing but unassociated TEVENT (muxAcquire - Contention) ;
// Parker from the global free list. ParkEvent * ReleaseAfter = NULL ;
if (ev == NULL) {
ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
}
assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
for (;;) { for (;;) {
p = FreeList ; guarantee (ev->OnList == 0, "invariant") ;
if (p == NULL) break ; int its = (os::is_MP() ? 100 : 0) + 1 ;
// 1: Detach
// Tantamount to p = Swap (&FreeList, NULL) // Optional spin phase: spin-then-park strategy
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { while (--its >= 0) {
continue ; w = *Lock ;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
if (ReleaseAfter != NULL) {
ParkEvent::Release (ReleaseAfter) ;
}
return ;
}
} }
// We've detached the list. The list in-hand is now ev->reset() ;
// local to this thread. This thread can operate on the ev->OnList = intptr_t(Lock) ;
// list without risk of interference from other threads. // The following fence() isn't _strictly necessary as the subsequent
// 2: Extract -- pop the 1st element from the list. // CAS() both serializes execution and ratifies the fetched *Lock value.
Parker * List = p->FreeNext ; OrderAccess::fence();
if (List == NULL) break ;
for (;;) { for (;;) {
// 3: Try to reattach the residual list w = *Lock ;
guarantee (List != NULL, "invariant") ; if ((w & LOCKBIT) == 0) {
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
if (Arv == NULL) break ; ev->OnList = 0 ;
// We call ::Release while holding the outer lock, thus
// New nodes arrived. Try to detach the recent arrivals. // artificially lengthening the critical section.
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { // Consider deferring the ::Release() until the subsequent unlock(),
continue ; // after we've dropped the outer lock.
if (ReleaseAfter != NULL) {
ParkEvent::Release (ReleaseAfter) ;
}
return ;
} }
guarantee (Arv != NULL, "invariant") ; continue ; // Interference -- *Lock changed -- Just retry
// 4: Merge Arv into List }
Parker * Tail = List ; assert (w & LOCKBIT, "invariant") ;
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
Tail->FreeNext = Arv ; if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
}
while (ev->OnList != 0) {
ev->park() ;
} }
break ;
} }
}
if (p != NULL) { // Release() must extract a successor from the list and then wake that thread.
guarantee (p->AssociatedWith == NULL, "invariant") ; // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
} else { // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
// Do this the hard way -- materialize a new Parker.. // Release() would :
// In rare cases an allocating thread might detach // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
// a long list -- installing null into FreeList --and // (B) Extract a successor from the private list "in-hand"
// then stall. Another thread calling Allocate() would see // (C) attempt to CAS() the residual back into *Lock over null.
// FreeList == null and then invoke the ctor. In this case we // If there were any newly arrived threads and the CAS() would fail.
// end up with more Parkers in circulation than we need, but // In that case Release() would detach the RATs, re-merge the list in-hand
// the race is rare and the outcome is benign. // with the RATs and repeat as needed. Alternately, Release() might
// Ideally, the # of extant Parkers is equal to the // detach and extract a successor, but then pass the residual list to the wakee.
// maximum # of threads that existed at any one time. // The wakee would be responsible for reattaching and remerging before it
// Because of the race mentioned above, segments of the // competed for the lock.
// freelist can be transiently inaccessible. At worst //
// we may end up with the # of Parkers in circulation // Both "pop" and DMR are immune from ABA corruption -- there can be
// slightly above the ideal. // multiple concurrent pushers, but only one popper or detacher.
p = new Parker() ; // This implementation pops from the head of the list. This is unfair,
} // but tends to provide excellent throughput as hot threads remain hot.
p->AssociatedWith = t ; // Associate p with t // (We wake recently run threads first).
p->FreeNext = NULL ;
return p ; void Thread::muxRelease (volatile intptr_t * Lock) {
}
void Parker::Release (Parker * p) {
if (p == NULL) return ;
guarantee (p->AssociatedWith != NULL, "invariant") ;
guarantee (p->FreeNext == NULL , "invariant") ;
p->AssociatedWith = NULL ;
for (;;) { for (;;) {
// Push p onto FreeList const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
Parker * List = FreeList ; assert (w & LOCKBIT, "invariant") ;
p->FreeNext = List ; if (w == LOCKBIT) return ;
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
assert (List != NULL, "invariant") ;
assert (List->OnList == intptr_t(Lock), "invariant") ;
ParkEvent * nxt = List->ListNext ;
// The following CAS() releases the lock and pops the head element.
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
continue ;
}
List->OnList = 0 ;
OrderAccess::fence() ;
List->unpark () ;
return ;
} }
} }
void Threads::verify() { void Threads::verify() {
ALL_JAVA_THREADS(p) { ALL_JAVA_THREADS(p) {
p->verify(); p->verify();
......
...@@ -30,6 +30,7 @@ class JvmtiGetLoadedClassesClosure; ...@@ -30,6 +30,7 @@ class JvmtiGetLoadedClassesClosure;
class ThreadStatistics; class ThreadStatistics;
class ConcurrentLocksDump; class ConcurrentLocksDump;
class ParkEvent ; class ParkEvent ;
class Parker;
class ciEnv; class ciEnv;
class CompileThread; class CompileThread;
...@@ -544,7 +545,6 @@ public: ...@@ -544,7 +545,6 @@ public:
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ; static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ; static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
static void muxRelease (volatile intptr_t * Lock) ; static void muxRelease (volatile intptr_t * Lock) ;
}; };
// Inline implementation of Thread::current() // Inline implementation of Thread::current()
...@@ -1769,100 +1769,3 @@ public: ...@@ -1769,100 +1769,3 @@ public:
} }
}; };
// ParkEvents are type-stable and immortal.
//
// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
// associated with the thread for the thread's entire lifetime - the relationship is
// stable. A thread will be associated at most one ParkEvent. When the thread
// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from
// the EventFreeList before creating a new Event. Type-stability frees us from
// worrying about stale Event or Thread references in the objectMonitor subsystem.
// (A reference to ParkEvent is always valid, even though the event may no longer be associated
// with the desired or expected thread. A key aspect of this design is that the callers of
// park, unpark, etc must tolerate stale references and spurious wakeups).
//
// Only the "associated" thread can block (park) on the ParkEvent, although
// any other thread can unpark a reachable parkevent. Park() is allowed to
// return spuriously. In fact park-unpark a really just an optimization to
// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
// A degenerate albeit "impolite" park-unpark implementation could simply return.
// See http://blogs.sun.com/dave for more details.
//
// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
// thread proxies, and simply make the THREAD structure type-stable and persistent.
// Currently, we unpark events associated with threads, but ideally we'd just
// unpark threads.
//
// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
// platform-independent. PlatformEvent provides park(), unpark(), etc., and
// is abstract -- that is, a PlatformEvent should never be instantiated except
// as part of a ParkEvent.
// Equivalently we could have defined a platform-independent base-class that
// exported Allocate(), Release(), etc. The platform-specific class would extend
// that base-class, adding park(), unpark(), etc.
//
// A word of caution: The JVM uses 2 very similar constructs:
// 1. ParkEvent are used for Java-level "monitor" synchronization.
// 2. Parkers are used by JSR166-JUC park-unpark.
//
// We'll want to eventually merge these redundant facilities and use ParkEvent.
class ParkEvent : public os::PlatformEvent {
private:
ParkEvent * FreeNext ;
// Current association
Thread * AssociatedWith ;
intptr_t RawThreadIdentity ; // LWPID etc
volatile int Incarnation ;
// diagnostic : keep track of last thread to wake this thread.
// this is useful for construction of dependency graphs.
void * LastWaker ;
public:
// MCS-CLH list linkage and Native Mutex/Monitor
ParkEvent * volatile ListNext ;
ParkEvent * volatile ListPrev ;
volatile intptr_t OnList ;
volatile int TState ;
volatile int Notified ; // for native monitor construct
volatile int IsWaiting ; // Enqueued on WaitSet
private:
static ParkEvent * volatile FreeList ;
static volatile int ListLock ;
// It's prudent to mark the dtor as "private"
// ensuring that it's not visible outside the package.
// Unfortunately gcc warns about such usage, so
// we revert to the less desirable "protected" visibility.
// The other compilers accept private dtors.
protected: // Ensure dtor is never invoked
~ParkEvent() { guarantee (0, "invariant") ; }
ParkEvent() : PlatformEvent() {
AssociatedWith = NULL ;
FreeNext = NULL ;
ListNext = NULL ;
ListPrev = NULL ;
OnList = 0 ;
TState = 0 ;
Notified = 0 ;
IsWaiting = 0 ;
}
// We use placement-new to force ParkEvent instances to be
// aligned on 256-byte address boundaries. This ensures that the least
// significant byte of a ParkEvent address is always 0.
void * operator new (size_t sz) ;
void operator delete (void * a) ;
public:
static ParkEvent * Allocate (Thread * t) ;
static void Release (ParkEvent * e) ;
} ;
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册