提交 8041d7f2 编写于 作者: A amurillo

Merge

/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -49,8 +49,12 @@ public class G1CollectedHeap extends SharedHeap { ...@@ -49,8 +49,12 @@ public class G1CollectedHeap extends SharedHeap {
static private long g1CommittedFieldOffset; static private long g1CommittedFieldOffset;
// size_t _summary_bytes_used; // size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField; static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm // G1MonitoringSupport* _g1mm;
static private AddressField g1mmField; static private AddressField g1mmField;
// MasterOldRegionSet _old_set;
static private long oldSetFieldOffset;
// MasterHumongousRegionSet _humongous_set;
static private long humongousSetFieldOffset;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
...@@ -67,12 +71,14 @@ public class G1CollectedHeap extends SharedHeap { ...@@ -67,12 +71,14 @@ public class G1CollectedHeap extends SharedHeap {
g1CommittedFieldOffset = type.getField("_g1_committed").getOffset(); g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used"); summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm"); g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
} }
public long capacity() { public long capacity() {
Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset); Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
MemRegion g1_committed = new MemRegion(g1CommittedAddr); MemRegion g1Committed = new MemRegion(g1CommittedAddr);
return g1_committed.byteSize(); return g1Committed.byteSize();
} }
public long used() { public long used() {
...@@ -94,6 +100,18 @@ public class G1CollectedHeap extends SharedHeap { ...@@ -94,6 +100,18 @@ public class G1CollectedHeap extends SharedHeap {
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr); return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
} }
public HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
oldSetAddr);
}
public HeapRegionSetBase humongousSet() {
Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
humongousSetAddr);
}
private Iterator<HeapRegion> heapRegionIterator() { private Iterator<HeapRegion> heapRegionIterator() {
return hrs().heapRegionIterator(); return hrs().heapRegionIterator();
} }
......
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -77,6 +77,10 @@ public class G1MonitoringSupport extends VMObject { ...@@ -77,6 +77,10 @@ public class G1MonitoringSupport extends VMObject {
return edenUsedField.getValue(addr); return edenUsedField.getValue(addr);
} }
public long edenRegionNum() {
return edenUsed() / HeapRegion.grainBytes();
}
public long survivorCommitted() { public long survivorCommitted() {
return survivorCommittedField.getValue(addr); return survivorCommittedField.getValue(addr);
} }
...@@ -85,6 +89,10 @@ public class G1MonitoringSupport extends VMObject { ...@@ -85,6 +89,10 @@ public class G1MonitoringSupport extends VMObject {
return survivorUsedField.getValue(addr); return survivorUsedField.getValue(addr);
} }
public long survivorRegionNum() {
return survivorUsed() / HeapRegion.grainBytes();
}
public long oldCommitted() { public long oldCommitted() {
return oldCommittedField.getValue(addr); return oldCommittedField.getValue(addr);
} }
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject {
// size_t _length;
static private CIntegerField lengthField;
// size_t _region_num;
static private CIntegerField regionNumField;
// size_t _total_used_bytes;
static private CIntegerField totalUsedBytesField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetBase");
lengthField = type.getCIntegerField("_length");
regionNumField = type.getCIntegerField("_region_num");
totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
}
public long length() {
return lengthField.getValue(addr);
}
public long regionNum() {
return regionNumField.getValue(addr);
}
public long totalUsedBytes() {
return totalUsedBytesField.getValue(addr);
}
public HeapRegionSetBase(Address addr) {
super(addr);
}
}
/* /*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -67,6 +67,7 @@ public class HeapSummary extends Tool { ...@@ -67,6 +67,7 @@ public class HeapSummary extends Tool {
printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap)); printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
printValMB("PermSize = ", getFlagValue("PermSize", flagMap)); printValMB("PermSize = ", getFlagValue("PermSize", flagMap));
printValMB("MaxPermSize = ", getFlagValue("MaxPermSize", flagMap)); printValMB("MaxPermSize = ", getFlagValue("MaxPermSize", flagMap));
printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
System.out.println(); System.out.println();
System.out.println("Heap Usage:"); System.out.println("Heap Usage:");
...@@ -100,11 +101,20 @@ public class HeapSummary extends Tool { ...@@ -100,11 +101,20 @@ public class HeapSummary extends Tool {
} else if (sharedHeap instanceof G1CollectedHeap) { } else if (sharedHeap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap; G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
G1MonitoringSupport g1mm = g1h.g1mm(); G1MonitoringSupport g1mm = g1h.g1mm();
System.out.println("G1 Young Generation"); long edenRegionNum = g1mm.edenRegionNum();
printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted()); long survivorRegionNum = g1mm.survivorRegionNum();
printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted()); HeapRegionSetBase oldSet = g1h.oldSet();
printG1Space("To Space:", 0, 0); HeapRegionSetBase humongousSet = g1h.humongousSet();
printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted()); long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
printG1Space("G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
System.out.println("G1 Young Generation:");
printG1Space("Eden Space:", edenRegionNum,
g1mm.edenUsed(), g1mm.edenCommitted());
printG1Space("Survivor Space:", survivorRegionNum,
g1mm.survivorUsed(), g1mm.survivorCommitted());
printG1Space("G1 Old Generation:", oldRegionNum,
g1mm.oldUsed(), g1mm.oldCommitted());
} else { } else {
throw new RuntimeException("unknown SharedHeap type : " + heap.getClass()); throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
} }
...@@ -216,9 +226,11 @@ public class HeapSummary extends Tool { ...@@ -216,9 +226,11 @@ public class HeapSummary extends Tool {
System.out.println(alignment + (double)space.used() * 100.0 / space.capacity() + "% used"); System.out.println(alignment + (double)space.used() * 100.0 / space.capacity() + "% used");
} }
private void printG1Space(String spaceName, long used, long capacity) { private void printG1Space(String spaceName, long regionNum,
long used, long capacity) {
long free = capacity - used; long free = capacity - used;
System.out.println(spaceName); System.out.println(spaceName);
printValue("regions = ", regionNum);
printValMB("capacity = ", capacity); printValMB("capacity = ", capacity);
printValMB("used = ", used); printValMB("used = ", used);
printValMB("free = ", free); printValMB("free = ", free);
......
...@@ -208,7 +208,7 @@ TARGETS_ZERO = $(addsuffix zero,$(TARGETS)) ...@@ -208,7 +208,7 @@ TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
TARGETS_SHARK = $(addsuffix shark,$(TARGETS)) TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS) BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
......
...@@ -162,20 +162,6 @@ ifndef HOTSPOT_VM_DISTRO ...@@ -162,20 +162,6 @@ ifndef HOTSPOT_VM_DISTRO
endif endif
endif endif
ifeq ($(OS_VENDOR), Darwin)
# MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
ifeq ($(ALWAYS_PASS_TEST_GAMMA),)
# ALWAYS_PASS_TEST_GAMMA wasn't set so we default to true on MacOS X
# until MACOSX_PORT-214 is fixed
ALWAYS_PASS_TEST_GAMMA=true
endif
endif
ifeq ($(ALWAYS_PASS_TEST_GAMMA), true)
TEST_GAMMA_STATUS= echo 'exit 0';
else
TEST_GAMMA_STATUS=
endif
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
BUILDTREE = \ BUILDTREE = \
...@@ -353,12 +339,10 @@ env.sh: $(BUILDTREE_MAKE) ...@@ -353,12 +339,10 @@ env.sh: $(BUILDTREE_MAKE)
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \ [ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
{ \ { \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
echo "export JAVA_HOME LD_LIBRARY_PATH DYLD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
) > $@ ) > $@
env.csh: env.sh env.csh: env.sh
...@@ -412,7 +396,7 @@ JAVA_FLAG/32 = -d32 ...@@ -412,7 +396,7 @@ JAVA_FLAG/32 = -d32
JAVA_FLAG/64 = -d64 JAVA_FLAG/64 = -d64
WRONG_DATA_MODE_MSG = \ WRONG_DATA_MODE_MSG = \
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK." echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
CROSS_COMPILING_MSG = \ CROSS_COMPILING_MSG = \
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run." echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
...@@ -420,20 +404,78 @@ CROSS_COMPILING_MSG = \ ...@@ -420,20 +404,78 @@ CROSS_COMPILING_MSG = \
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
echo '#!/bin/sh'; \ echo "#!/bin/sh"; \
echo ""; \
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
echo '. ./env.sh'; \ echo ""; \
echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \ echo "# Include environment settings for gamma run"; \
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \ echo ""; \
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \ echo ". ./env.sh"; \
echo "then"; \ echo ""; \
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \ echo "# Do not run gamma test for cross compiles"; \
echo ""; \
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
echo " $(CROSS_COMPILING_MSG)"; \
echo " exit 0"; \
echo "fi"; \ echo "fi"; \
echo ""; \
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
echo ""; \
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
echo " $(NO_JAVA_HOME_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Check JAVA_HOME version to be used for the test"; \
echo ""; \
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
echo "if [ \$$? -ne 0 ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Use gamma_g if it exists"; \
echo ""; \
echo "GAMMA_PROG=gamma"; \
echo "if [ -f gamma_g ]; then "; \
echo " GAMMA_PROG=gamma_g"; \
echo "fi"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
echo ""; \
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
echo " fi"; \
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo " fi"; \
echo "fi"; \
echo ""; \
echo "# Compile Queens program for test"; \
echo ""; \
echo "rm -f Queens.class"; \ echo "rm -f Queens.class"; \
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \ echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \ echo ""; \
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \ echo "# Set library path solely for gamma launcher test run"; \
$(TEST_GAMMA_STATUS) \ echo ""; \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "export LD_LIBRARY_PATH"; \
echo "unset LD_LIBRARY_PATH_32"; \
echo "unset LD_LIBRARY_PATH_64"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo " export DYLD_LIBRARY_PATH"; \
echo "fi"; \
echo ""; \
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
echo ""; \
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
) > $@ ) > $@
$(QUIETLY) chmod +x $@ $(QUIETLY) chmod +x $@
......
...@@ -142,6 +142,7 @@ EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html ...@@ -142,6 +142,7 @@ EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so # client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX) EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
ifndef BUILD_CLIENT_ONLY ifndef BUILD_CLIENT_ONLY
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
...@@ -150,7 +151,6 @@ endif ...@@ -150,7 +151,6 @@ endif
ifneq ($(ZERO_BUILD), true) ifneq ($(ZERO_BUILD), true)
ifeq ($(ARCH_DATA_MODEL), 32) ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
endif endif
......
...@@ -50,7 +50,24 @@ ifeq ($(LINK_INTO),AOUT) ...@@ -50,7 +50,24 @@ ifeq ($(LINK_INTO),AOUT)
LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS) LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS)
else else
LAUNCHER.o = launcher.o LAUNCHER.o = launcher.o
LFLAGS_LAUNCHER += -L`pwd` LFLAGS_LAUNCHER += -L`pwd`
# The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a
# freshly built JVM at ./libjvm.{so|dylib}. This is accomplished by setting
# the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM
# first. Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is
# statically linked with CoreFoundation framework libs. Unfortunately, gamma's
# unique searchpath results in some unresolved symbols in the framework
# libraries, because JDK libraries are inadvertently discovered first on the
# searchpath, e.g. libjpeg. On Mac OS X, filenames are case *insensitive*.
# So, the actual filename collision is libjpeg.dylib and libJPEG.dylib.
# To resolve this, gamma needs to also statically link with the CoreFoundation
# framework libraries.
ifeq ($(OS_VENDOR),Darwin)
LFLAGS_LAUNCHER += -framework CoreFoundation
endif
LIBS_LAUNCHER += -l$(JVM) $(LIBS) LIBS_LAUNCHER += -l$(JVM) $(LIBS)
endif endif
......
...@@ -337,8 +337,8 @@ ifeq ($(OS_VENDOR), Darwin) ...@@ -337,8 +337,8 @@ ifeq ($(OS_VENDOR), Darwin)
$(LIBJVM).dSYM: $(LIBJVM) $(LIBJVM).dSYM: $(LIBJVM)
dsymutil $(LIBJVM) dsymutil $(LIBJVM)
# no launcher or libjvm_db for macosx # no libjvm_db for macosx
build: $(LIBJVM) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
echo "Doing vm.make build:" echo "Doing vm.make build:"
else else
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
......
...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=23 HS_MAJOR_VER=23
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=11 HS_BUILD_NUMBER=12
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -174,6 +174,10 @@ jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6 ...@@ -174,6 +174,10 @@ jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6
jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6 jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1 jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1 jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk7b107=windows_i586_5.0 jprt.my.windows.i586.jdk7b107=windows_i586_5.0
...@@ -211,6 +215,7 @@ jprt.build.targets.standard= \ ...@@ -211,6 +215,7 @@ jprt.build.targets.standard= \
${jprt.my.solaris.x64}-{product|fastdebug|debug}, \ ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \
${jprt.my.linux.i586}-{product|fastdebug|debug}, \ ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
${jprt.my.linux.x64}-{product|fastdebug}, \ ${jprt.my.linux.x64}-{product|fastdebug}, \
${jprt.my.macosx.x64}-{product|fastdebug|debug}, \
${jprt.my.windows.i586}-{product|fastdebug|debug}, \ ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
${jprt.my.windows.x64}-{product|fastdebug|debug} ${jprt.my.windows.x64}-{product|fastdebug|debug}
...@@ -416,6 +421,30 @@ jprt.my.linux.x64.test.targets = \ ...@@ -416,6 +421,30 @@ jprt.my.linux.x64.test.targets = \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.macosx.x64.test.targets = \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.windows.i586.test.targets = \ jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
...@@ -492,6 +521,7 @@ jprt.test.targets.standard = \ ...@@ -492,6 +521,7 @@ jprt.test.targets.standard = \
${jprt.my.solaris.x64.test.targets}, \ ${jprt.my.solaris.x64.test.targets}, \
${jprt.my.linux.i586.test.targets}, \ ${jprt.my.linux.i586.test.targets}, \
${jprt.my.linux.x64.test.targets}, \ ${jprt.my.linux.x64.test.targets}, \
${jprt.my.macosx.x64.test.targets}, \
${jprt.my.windows.i586.test.targets}, \ ${jprt.my.windows.i586.test.targets}, \
${jprt.my.windows.x64.test.targets}, \ ${jprt.my.windows.x64.test.targets}, \
${jprt.test.targets.open} ${jprt.test.targets.open}
...@@ -538,6 +568,7 @@ jprt.make.rule.test.targets.standard.server = \ ...@@ -538,6 +568,7 @@ jprt.make.rule.test.targets.standard.server = \
${jprt.my.solaris.x64}-*-c2-servertest, \ ${jprt.my.solaris.x64}-*-c2-servertest, \
${jprt.my.linux.i586}-*-c2-servertest, \ ${jprt.my.linux.i586}-*-c2-servertest, \
${jprt.my.linux.x64}-*-c2-servertest, \ ${jprt.my.linux.x64}-*-c2-servertest, \
${jprt.my.macosx.x64}-*-c2-servertest, \
${jprt.my.windows.i586}-*-c2-servertest, \ ${jprt.my.windows.i586}-*-c2-servertest, \
${jprt.my.windows.x64}-*-c2-servertest ${jprt.my.windows.x64}-*-c2-servertest
...@@ -548,6 +579,7 @@ jprt.make.rule.test.targets.standard.internalvmtests = \ ...@@ -548,6 +579,7 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \ ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \ ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \ ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \ ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
......
...@@ -326,11 +326,10 @@ env.sh: $(BUILDTREE_MAKE) ...@@ -326,11 +326,10 @@ env.sh: $(BUILDTREE_MAKE)
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \ [ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
{ \ { \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
) > $@ ) > $@
env.csh: env.sh env.csh: env.sh
...@@ -384,7 +383,7 @@ JAVA_FLAG/32 = -d32 ...@@ -384,7 +383,7 @@ JAVA_FLAG/32 = -d32
JAVA_FLAG/64 = -d64 JAVA_FLAG/64 = -d64
WRONG_DATA_MODE_MSG = \ WRONG_DATA_MODE_MSG = \
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK." echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
CROSS_COMPILING_MSG = \ CROSS_COMPILING_MSG = \
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run." echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
...@@ -392,19 +391,78 @@ CROSS_COMPILING_MSG = \ ...@@ -392,19 +391,78 @@ CROSS_COMPILING_MSG = \
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
echo '#!/bin/sh'; \ echo "#!/bin/sh"; \
echo ""; \
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
echo '. ./env.sh'; \ echo ""; \
echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \ echo "# Include environment settings for gamma run"; \
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \ echo ""; \
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \ echo ". ./env.sh"; \
echo "then"; \ echo ""; \
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \ echo "# Do not run gamma test for cross compiles"; \
echo ""; \
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
echo " $(CROSS_COMPILING_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
echo ""; \
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
echo " $(NO_JAVA_HOME_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Check JAVA_HOME version to be used for the test"; \
echo ""; \
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
echo "if [ \$$? -ne 0 ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo "fi"; \ echo "fi"; \
echo ""; \
echo "# Use gamma_g if it exists"; \
echo ""; \
echo "GAMMA_PROG=gamma"; \
echo "if [ -f gamma_g ]; then "; \
echo " GAMMA_PROG=gamma_g"; \
echo "fi"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
echo ""; \
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
echo " fi"; \
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo " fi"; \
echo "fi"; \
echo ""; \
echo "# Compile Queens program for test"; \
echo ""; \
echo "rm -f Queens.class"; \ echo "rm -f Queens.class"; \
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \ echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \ echo ""; \
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \ echo "# Set library path solely for gamma launcher test run"; \
echo ""; \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "export LD_LIBRARY_PATH"; \
echo "unset LD_LIBRARY_PATH_32"; \
echo "unset LD_LIBRARY_PATH_64"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo " export DYLD_LIBRARY_PATH"; \
echo "fi"; \
echo ""; \
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
echo ""; \
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
) > $@ ) > $@
$(QUIETLY) chmod +x $@ $(QUIETLY) chmod +x $@
......
...@@ -118,7 +118,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) ...@@ -118,7 +118,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
env.ksh env.csh jdkpath.sh .dbxrc test_gamma env.sh env.csh jdkpath.sh .dbxrc test_gamma
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
...@@ -313,22 +313,19 @@ sa.make: $(BUILDTREE_MAKE) ...@@ -313,22 +313,19 @@ sa.make: $(BUILDTREE_MAKE)
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@ ) > $@
env.ksh: $(BUILDTREE_MAKE) env.sh: $(BUILDTREE_MAKE)
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \ [ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
{ \ { \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "unset LD_LIBRARY_PATH_32"; \
echo "unset LD_LIBRARY_PATH_64"; \
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
) > $@ ) > $@
env.csh: env.ksh env.csh: env.sh
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
...@@ -384,23 +381,86 @@ JAVA_FLAG/32 = -d32 ...@@ -384,23 +381,86 @@ JAVA_FLAG/32 = -d32
JAVA_FLAG/64 = -d64 JAVA_FLAG/64 = -d64
WRONG_DATA_MODE_MSG = \ WRONG_DATA_MODE_MSG = \
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK." echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
CROSS_COMPILING_MSG = \
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
echo '#!/bin/ksh'; \ echo "#!/bin/sh"; \
echo ""; \
$(BUILDTREE_COMMENT); \ $(BUILDTREE_COMMENT); \
echo '. ./env.ksh'; \ echo ""; \
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \ echo "# Include environment settings for gamma run"; \
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \ echo ""; \
echo "then"; \ echo ". ./env.sh"; \
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \ echo ""; \
echo "# Do not run gamma test for cross compiles"; \
echo ""; \
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
echo " $(CROSS_COMPILING_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
echo ""; \
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
echo " $(NO_JAVA_HOME_MSG)"; \
echo " exit 0"; \
echo "fi"; \
echo ""; \
echo "# Check JAVA_HOME version to be used for the test"; \
echo ""; \
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
echo "if [ \$$? -ne 0 ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo "fi"; \ echo "fi"; \
echo ""; \
echo "# Use gamma_g if it exists"; \
echo ""; \
echo "GAMMA_PROG=gamma"; \
echo "if [ -f gamma_g ]; then "; \
echo " GAMMA_PROG=gamma_g"; \
echo "fi"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
echo ""; \
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
echo " fi"; \
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
echo " $(WRONG_DATA_MODE_MSG)"; \
echo " exit 0"; \
echo " fi"; \
echo "fi"; \
echo ""; \
echo "# Compile Queens program for test"; \
echo ""; \
echo "rm -f Queens.class"; \ echo "rm -f Queens.class"; \
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \ echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \ echo ""; \
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \ echo "# Set library path solely for gamma launcher test run"; \
echo ""; \
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo "export LD_LIBRARY_PATH"; \
echo "unset LD_LIBRARY_PATH_32"; \
echo "unset LD_LIBRARY_PATH_64"; \
echo ""; \
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
echo " export DYLD_LIBRARY_PATH"; \
echo "fi"; \
echo ""; \
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
echo ""; \
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
) > $@ ) > $@
$(QUIETLY) chmod +x $@ $(QUIETLY) chmod +x $@
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -810,7 +810,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const { ...@@ -810,7 +810,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
} }
#ifdef ASSERT #ifndef PRODUCT
#define DESCRIBE_FP_OFFSET(name) \ #define DESCRIBE_FP_OFFSET(name) \
values.describe(frame_no, fp() + frame::name##_offset, #name) values.describe(frame_no, fp() + frame::name##_offset, #name)
...@@ -820,11 +820,19 @@ void frame::describe_pd(FrameValues& values, int frame_no) { ...@@ -820,11 +820,19 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1); values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
} }
if (is_interpreted_frame()) { if (is_ricochet_frame()) {
MethodHandles::RicochetFrame::describe(this, values, frame_no);
} else if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp); DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp); DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_padding); DESCRIBE_FP_OFFSET(interpreter_frame_padding);
DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp); DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
// esp, according to Lesp (e.g. not depending on bci), if seems valid
intptr_t* esp = *interpreter_frame_esp_addr();
if ((esp >= sp()) && (esp < fp())) {
values.describe(-1, esp, "*Lesp");
}
} }
if (!is_compiled_frame()) { if (!is_compiled_frame()) {
...@@ -844,4 +852,3 @@ intptr_t *frame::initial_deoptimization_info() { ...@@ -844,4 +852,3 @@ intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445 // unused... but returns fp() to minimize changes introduced by 7087445
return fp(); return fp();
} }
/* /*
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -177,7 +177,7 @@ void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, ...@@ -177,7 +177,7 @@ void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
BLOCK_COMMENT("ricochet_blob.bounce"); BLOCK_COMMENT("ricochet_blob.bounce");
if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
trace_method_handle(_masm, "ricochet_blob.bounce"); trace_method_handle(_masm, "return/ricochet_blob.bounce");
__ JMP(L1_continuation, 0); __ JMP(L1_continuation, 0);
__ delayed()->nop(); __ delayed()->nop();
...@@ -268,14 +268,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, ...@@ -268,14 +268,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
} }
// Emit code to verify that FP is pointing at a valid ricochet frame. // Emit code to verify that FP is pointing at a valid ricochet frame.
#ifdef ASSERT #ifndef PRODUCT
enum { enum {
ARG_LIMIT = 255, SLOP = 45, ARG_LIMIT = 255, SLOP = 45,
// use this parameter for checking for garbage stack movements: // use this parameter for checking for garbage stack movements:
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
// the slop defends against false alarms due to fencepost errors // the slop defends against false alarms due to fencepost errors
}; };
#endif
#ifdef ASSERT
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
// The stack should look like this: // The stack should look like this:
// ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF] // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
...@@ -1000,32 +1002,143 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, ...@@ -1000,32 +1002,143 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
BLOCK_COMMENT("} move_return_value"); BLOCK_COMMENT("} move_return_value");
} }
#ifndef PRODUCT
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
RicochetFrame* rf = new RicochetFrame(*fr);
// ricochet slots (kept in registers for sparc)
values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
// relevant ricochet targets (in caller frame)
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
}
#endif // ASSERT
#ifndef PRODUCT #ifndef PRODUCT
extern "C" void print_method_handle(oop mh); extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername, void trace_method_handle_stub(const char* adaptername,
oopDesc* mh, oopDesc* mh,
intptr_t* saved_sp) { intptr_t* saved_sp,
intptr_t* args,
intptr_t* tracing_fp) {
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
if (has_mh) tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
if (Verbose) {
// dumping last frame with frame::describe
JavaThread* p = JavaThread::active();
ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
FrameValues values;
// Note: We want to allow trace_method_handle from any call site.
// While trace_method_handle creates a frame, it may be entered
// without a valid return PC in O7 (e.g. not just after a call).
// Walking that frame could lead to failures due to that invalid PC.
// => carefully detect that frame when doing the stack walking
// walk up to the right frame using the "tracing_fp" argument
intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
frame cur_frame(cur_sp, frame::unpatchable, NULL);
while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
cur_frame = os::get_sender_for_C_frame(&cur_frame);
}
// safely create a frame and call frame::describe
intptr_t *dump_sp = cur_frame.sender_sp();
intptr_t *dump_fp = cur_frame.link();
bool walkable = has_mh; // whether the traced frame shoud be walkable
// the sender for cur_frame is the caller of trace_method_handle
if (walkable) {
// The previous definition of walkable may have to be refined
// if new call sites cause the next frame constructor to start
// failing. Alternatively, frame constructors could be
// modified to support the current or future non walkable
// frames (but this is more intrusive and is not considered as
// part of this RFE, which will instead use a simpler output).
frame dump_frame = frame(dump_sp,
cur_frame.sp(), // younger_sp
false); // no adaptation
dump_frame.describe(values, 1);
} else {
// Robust dump for frames which cannot be constructed from sp/younger_sp
// Add descriptions without building a Java frame to avoid issues
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
values.describe(-1, dump_sp, "sp");
}
bool has_args = has_mh; // whether Gargs is meaningful
// mark args, if seems valid (may not be valid for some adapters)
if (has_args) {
if ((args >= dump_sp) && (args < dump_fp)) {
values.describe(-1, args, "*G4_args");
}
}
// mark saved_sp, if seems valid (may not be valid for some adapters)
intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
}
// Note: the unextended_sp may not be correct
tty->print_cr(" stack layout:");
values.print(p);
}
if (has_mh) {
print_method_handle(mh); print_method_handle(mh);
}
} }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return; if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {"); BLOCK_COMMENT("trace_method_handle {");
// save: Gargs, O5_savedSP // save: Gargs, O5_savedSP
__ save_frame(16); __ save_frame(16); // need space for saving required FPU state
__ set((intptr_t) adaptername, O0); __ set((intptr_t) adaptername, O0);
__ mov(G3_method_handle, O1); __ mov(G3_method_handle, O1);
__ mov(I5_savedSP, O2); __ mov(I5_savedSP, O2);
__ mov(Gargs, O3);
__ mov(I6, O4); // frame identifier for safe stack walking
// Save scratched registers that might be needed. Robustness is more
// important than optimizing the saves for this debug only code.
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
__ stf(FloatRegisterImpl::D, Ftos_d, d_save);
// Safely save all globals but G2 (handled by call_VM_leaf) and G7
// (OS reserved).
__ mov(G3_method_handle, L3); __ mov(G3_method_handle, L3);
__ mov(Gargs, L4); __ mov(Gargs, L4);
__ mov(G5_method_type, L5); __ mov(G5_method_type, L5);
__ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); __ mov(G6, L6);
__ mov(G1, L1);
__ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ mov(L3, G3_method_handle); __ mov(L3, G3_method_handle);
__ mov(L4, Gargs); __ mov(L4, Gargs);
__ mov(L5, G5_method_type); __ mov(L5, G5_method_type);
__ mov(L6, G6);
__ mov(L1, G1);
__ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
__ restore(); __ restore();
BLOCK_COMMENT("} trace_method_handle"); BLOCK_COMMENT("} trace_method_handle");
} }
...@@ -1250,7 +1363,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan ...@@ -1250,7 +1363,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
move_typed_arg(_masm, arg_type, false, move_typed_arg(_masm, arg_type, false,
prim_value_addr, prim_value_addr,
Address(O0_argslot, 0), Address(O0_argslot, 0),
O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
} }
if (direct_to_method) { if (direct_to_method) {
......
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -145,6 +145,8 @@ class RicochetFrame : public ResourceObj { ...@@ -145,6 +145,8 @@ class RicochetFrame : public ResourceObj {
} }
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
}; };
// Additional helper methods for MethodHandles code generation: // Additional helper methods for MethodHandles code generation:
......
...@@ -651,13 +651,15 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const { ...@@ -651,13 +651,15 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[index]; return &interpreter_frame_tos_address()[index];
} }
#ifdef ASSERT #ifndef PRODUCT
#define DESCRIBE_FP_OFFSET(name) \ #define DESCRIBE_FP_OFFSET(name) \
values.describe(frame_no, fp() + frame::name##_offset, #name) values.describe(frame_no, fp() + frame::name##_offset, #name)
void frame::describe_pd(FrameValues& values, int frame_no) { void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) { if (is_ricochet_frame()) {
MethodHandles::RicochetFrame::describe(this, values, frame_no);
} else if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method); DESCRIBE_FP_OFFSET(interpreter_frame_method);
...@@ -667,7 +669,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) { ...@@ -667,7 +669,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
DESCRIBE_FP_OFFSET(interpreter_frame_bcx); DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
} }
} }
#endif #endif
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -279,14 +279,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, ...@@ -279,14 +279,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
} }
// Emit code to verify that RBP is pointing at a valid ricochet frame. // Emit code to verify that RBP is pointing at a valid ricochet frame.
#ifdef ASSERT #ifndef PRODUCT
enum { enum {
ARG_LIMIT = 255, SLOP = 4, ARG_LIMIT = 255, SLOP = 4,
// use this parameter for checking for garbage stack movements: // use this parameter for checking for garbage stack movements:
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
// the slop defends against false alarms due to fencepost errors // the slop defends against false alarms due to fencepost errors
}; };
#endif
#ifdef ASSERT
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
// The stack should look like this: // The stack should look like this:
// ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args | // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
...@@ -990,6 +992,26 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, ...@@ -990,6 +992,26 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
BLOCK_COMMENT("} move_return_value"); BLOCK_COMMENT("} move_return_value");
} }
#ifndef PRODUCT
#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
address bp = (address) fr->fp();
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
// ricochet slots
DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
DESCRIBE_RICOCHET_OFFSET(rf, conversion);
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
DESCRIBE_RICOCHET_OFFSET(rf, continuation);
// relevant ricochet targets (in caller frame)
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
}
#endif // ASSERT
#ifndef PRODUCT #ifndef PRODUCT
extern "C" void print_method_handle(oop mh); extern "C" void print_method_handle(oop mh);
...@@ -1001,6 +1023,7 @@ void trace_method_handle_stub(const char* adaptername, ...@@ -1001,6 +1023,7 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* saved_bp) { intptr_t* saved_bp) {
// called as a leaf from native code: do not block the JVM! // called as a leaf from native code: do not block the JVM!
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
intptr_t* base_sp = last_sp; intptr_t* base_sp = last_sp;
typedef MethodHandles::RicochetFrame RicochetFrame; typedef MethodHandles::RicochetFrame RicochetFrame;
...@@ -1030,13 +1053,64 @@ void trace_method_handle_stub(const char* adaptername, ...@@ -1030,13 +1053,64 @@ void trace_method_handle_stub(const char* adaptername,
tty->cr(); tty->cr();
if (last_sp != saved_sp && last_sp != NULL) if (last_sp != saved_sp && last_sp != NULL)
tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp); tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
int stack_dump_count = 16;
if (stack_dump_count < (int)(saved_bp + 2 - saved_sp)) {
stack_dump_count = (int)(saved_bp + 2 - saved_sp); // dumping last frame with frame::describe
if (stack_dump_count > 64) stack_dump_count = 48;
for (i = 0; i < stack_dump_count; i += 4) { JavaThread* p = JavaThread::active();
tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]); ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
FrameValues values;
// Note: We want to allow trace_method_handle from any call site.
// While trace_method_handle creates a frame, it may be entered
// without a PC on the stack top (e.g. not just after a call).
// Walking that frame could lead to failures due to that invalid PC.
// => carefully detect that frame when doing the stack walking
// Current C frame
frame cur_frame = os::current_frame();
// Robust search of trace_calling_frame (independant of inlining).
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
while (trace_calling_frame.fp() < saved_regs) {
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
}
// safely create a frame and call frame::describe
intptr_t *dump_sp = trace_calling_frame.sender_sp();
intptr_t *dump_fp = trace_calling_frame.link();
bool walkable = has_mh; // whether the traced frame shoud be walkable
if (walkable) {
// The previous definition of walkable may have to be refined
// if new call sites cause the next frame constructor to start
// failing. Alternatively, frame constructors could be
// modified to support the current or future non walkable
// frames (but this is more intrusive and is not considered as
// part of this RFE, which will instead use a simpler output).
frame dump_frame = frame(dump_sp, dump_fp);
dump_frame.describe(values, 1);
} else {
// Stack may not be walkable (invalid PC above FP):
// Add descriptions without building a Java frame to avoid issues
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
values.describe(-1, dump_sp, "sp for #1");
}
// mark saved_sp if seems valid
if (has_mh) {
if ((saved_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
values.describe(-1, saved_sp, "*saved_sp");
}
}
tty->print_cr(" stack layout:");
values.print(p);
} }
if (has_mh) if (has_mh)
print_method_handle(mh); print_method_handle(mh);
...@@ -1066,26 +1140,49 @@ void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { ...@@ -1066,26 +1140,49 @@ void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return; if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {"); BLOCK_COMMENT("trace_method_handle {");
__ push(rax);
__ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha();
__ pusha();
__ mov(rbx, rsp);
__ enter(); __ enter();
__ andptr(rsp, -16); // align stack if needed for FPU state
__ pusha();
__ mov(rbx, rsp); // for retreiving saved_regs
// Note: saved_regs must be in the entered frame for the
// robust stack walking implemented in trace_method_handle_stub.
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
__ increment(rsp, -2 * wordSize);
if (UseSSE >= 2) {
__ movdbl(Address(rsp, 0), xmm0);
} else if (UseSSE == 1) {
__ movflt(Address(rsp, 0), xmm0);
} else {
__ fst_d(Address(rsp, 0));
}
// incoming state: // incoming state:
// rcx: method handle // rcx: method handle
// r13 or rsi: saved sp // r13 or rsi: saved sp
// To avoid calling convention issues, build a record on the stack and pass the pointer to that instead. // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
// Note: fix the increment below if pushing more arguments
__ push(rbp); // saved_bp __ push(rbp); // saved_bp
__ push(rsi); // saved_sp __ push(saved_last_sp_register()); // saved_sp
__ push(rax); // entry_sp __ push(rbp); // entry_sp (with extra align space)
__ push(rbx); // pusha saved_regs __ push(rbx); // pusha saved_regs
__ push(rcx); // mh __ push(rcx); // mh
__ push(rcx); // adaptername __ push(rcx); // slot for adaptername
__ movptr(Address(rsp, 0), (intptr_t) adaptername); __ movptr(Address(rsp, 0), (intptr_t) adaptername);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
__ leave(); __ increment(rsp, 6 * wordSize); // MethodHandleStubArguments
if (UseSSE >= 2) {
__ movdbl(xmm0, Address(rsp, 0));
} else if (UseSSE == 1) {
__ movflt(xmm0, Address(rsp, 0));
} else {
__ fld_d(Address(rsp, 0));
}
__ increment(rsp, 2 * wordSize);
__ popa(); __ popa();
__ pop(rax); __ leave();
BLOCK_COMMENT("} trace_method_handle"); BLOCK_COMMENT("} trace_method_handle");
} }
#endif //PRODUCT #endif //PRODUCT
......
/* /*
* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -224,6 +224,8 @@ class RicochetFrame { ...@@ -224,6 +224,8 @@ class RicochetFrame {
} }
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
}; };
// Additional helper methods for MethodHandles code generation: // Additional helper methods for MethodHandles code generation:
......
...@@ -418,7 +418,7 @@ void ZeroFrame::identify_vp_word(int frame_index, ...@@ -418,7 +418,7 @@ void ZeroFrame::identify_vp_word(int frame_index,
} }
} }
#ifdef ASSERT #ifndef PRODUCT
void frame::describe_pd(FrameValues& values, int frame_no) { void frame::describe_pd(FrameValues& values, int frame_no) {
......
/* /*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -301,6 +301,12 @@ static char cpu_arch[] = "sparc"; ...@@ -301,6 +301,12 @@ static char cpu_arch[] = "sparc";
#error Add appropriate cpu_arch setting #error Add appropriate cpu_arch setting
#endif #endif
// Compiler variant
#ifdef COMPILER2
#define COMPILER_VARIANT "server"
#else
#define COMPILER_VARIANT "client"
#endif
#ifndef _ALLBSD_SOURCE #ifndef _ALLBSD_SOURCE
// pid_t gettid() // pid_t gettid()
...@@ -2507,7 +2513,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { ...@@ -2507,7 +2513,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
static char saved_jvm_path[MAXPATHLEN] = {0}; static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so or libjvm_g.so // Find the full path to the current module, libjvm or libjvm_g
void os::jvm_path(char *buf, jint buflen) { void os::jvm_path(char *buf, jint buflen) {
// Error checking. // Error checking.
if (buflen < MAXPATHLEN) { if (buflen < MAXPATHLEN) {
...@@ -2532,11 +2538,11 @@ void os::jvm_path(char *buf, jint buflen) { ...@@ -2532,11 +2538,11 @@ void os::jvm_path(char *buf, jint buflen) {
if (Arguments::created_by_gamma_launcher()) { if (Arguments::created_by_gamma_launcher()) {
// Support for the gamma launcher. Typical value for buf is // Support for the gamma launcher. Typical value for buf is
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm". If "/jre/lib/" appears at
// the right place in the string, then assume we are installed in a JDK and // the right place in the string, then assume we are installed in a JDK and
// we're done. Otherwise, check for a JAVA_HOME environment variable and fix // we're done. Otherwise, check for a JAVA_HOME environment variable and
// up the path so it looks like libjvm.so is installed there (append a // construct a path to the JVM being overridden.
// fake suffix hotspot/libjvm.so).
const char *p = buf + strlen(buf) - 1; const char *p = buf + strlen(buf) - 1;
for (int count = 0; p > buf && count < 5; ++count) { for (int count = 0; p > buf && count < 5; ++count) {
for (--p; p > buf && *p != '/'; --p) for (--p; p > buf && *p != '/'; --p)
...@@ -2550,7 +2556,7 @@ void os::jvm_path(char *buf, jint buflen) { ...@@ -2550,7 +2556,7 @@ void os::jvm_path(char *buf, jint buflen) {
char* jrelib_p; char* jrelib_p;
int len; int len;
// Check the current module name "libjvm.so" or "libjvm_g.so". // Check the current module name "libjvm" or "libjvm_g".
p = strrchr(buf, '/'); p = strrchr(buf, '/');
assert(strstr(p, "/libjvm") == p, "invalid library name"); assert(strstr(p, "/libjvm") == p, "invalid library name");
p = strstr(p, "_g") ? "_g" : ""; p = strstr(p, "_g") ? "_g" : "";
...@@ -2563,19 +2569,32 @@ void os::jvm_path(char *buf, jint buflen) { ...@@ -2563,19 +2569,32 @@ void os::jvm_path(char *buf, jint buflen) {
// modules image doesn't have "jre" subdirectory // modules image doesn't have "jre" subdirectory
len = strlen(buf); len = strlen(buf);
jrelib_p = buf + len; jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
// Add the appropriate library subdir
snprintf(jrelib_p, buflen-len, "/jre/lib");
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib");
}
// Add the appropriate client or server subdir
len = strlen(buf);
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
if (0 != access(buf, F_OK)) { if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); snprintf(jrelib_p, buflen-len, "");
} }
// If the path exists within JAVA_HOME, add the JVM library name
// to complete the path to JVM being overridden. Otherwise fallback
// to the path to the current library.
if (0 == access(buf, F_OK)) { if (0 == access(buf, F_OK)) {
// Use current module name "libjvm[_g].so" instead of // Use current module name "libjvm[_g]" instead of
// "libjvm"debug_only("_g")".so" since for fastdebug version // "libjvm"debug_only("_g")"" since for fastdebug version
// we should have "libjvm.so" but debug_only("_g") adds "_g"! // we should have "libjvm" but debug_only("_g") adds "_g"!
len = strlen(buf); len = strlen(buf);
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); snprintf(buf + len, buflen-len, "/libjvm%s%s", p, JNI_LIB_SUFFIX);
} else { } else {
// Go back to path of .so // Fall back to path of current library
rp = realpath(dli_fname, buf); rp = realpath(dli_fname, buf);
if (rp == NULL) if (rp == NULL)
return; return;
...@@ -3570,26 +3589,28 @@ void os::loop_breaker(int attempts) { ...@@ -3570,26 +3589,28 @@ void os::loop_breaker(int attempts) {
// It is only used when ThreadPriorityPolicy=1 and requires root privilege. // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
int os::java_to_os_priority[MaxPriority + 1] = { int os::java_to_os_priority[CriticalPriority + 1] = {
19, // 0 Entry should never be used 19, // 0 Entry should never be used
0, // 1 MinPriority 0, // 1 MinPriority
3, // 2 3, // 2
6, // 3 6, // 3
10, // 4 10, // 4
15, // 5 NormPriority 15, // 5 NormPriority
18, // 6 18, // 6
21, // 7
25, // 8
28, // 9 NearMaxPriority
21, // 7 31, // 10 MaxPriority
25, // 8
28, // 9 NearMaxPriority
31 // 10 MaxPriority 31 // 11 CriticalPriority
}; };
#elif defined(__APPLE__) #elif defined(__APPLE__)
/* Using Mach high-level priority assignments */ /* Using Mach high-level priority assignments */
int os::java_to_os_priority[MaxPriority + 1] = { int os::java_to_os_priority[CriticalPriority + 1] = {
0, // 0 Entry should never be used (MINPRI_USER) 0, // 0 Entry should never be used (MINPRI_USER)
27, // 1 MinPriority 27, // 1 MinPriority
...@@ -3604,10 +3625,12 @@ int os::java_to_os_priority[MaxPriority + 1] = { ...@@ -3604,10 +3625,12 @@ int os::java_to_os_priority[MaxPriority + 1] = {
34, // 8 34, // 8
35, // 9 NearMaxPriority 35, // 9 NearMaxPriority
36 // 10 MaxPriority 36, // 10 MaxPriority
36 // 11 CriticalPriority
}; };
#else #else
int os::java_to_os_priority[MaxPriority + 1] = { int os::java_to_os_priority[CriticalPriority + 1] = {
19, // 0 Entry should never be used 19, // 0 Entry should never be used
4, // 1 MinPriority 4, // 1 MinPriority
...@@ -3622,7 +3645,9 @@ int os::java_to_os_priority[MaxPriority + 1] = { ...@@ -3622,7 +3645,9 @@ int os::java_to_os_priority[MaxPriority + 1] = {
-3, // 8 -3, // 8
-4, // 9 NearMaxPriority -4, // 9 NearMaxPriority
-5 // 10 MaxPriority -5, // 10 MaxPriority
-5 // 11 CriticalPriority
}; };
#endif #endif
...@@ -3638,6 +3663,9 @@ static int prio_init() { ...@@ -3638,6 +3663,9 @@ static int prio_init() {
ThreadPriorityPolicy = 0; ThreadPriorityPolicy = 0;
} }
} }
if (UseCriticalJavaThreadPriority) {
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
}
return 0; return 0;
} }
......
/* /*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -3383,7 +3383,7 @@ void os::loop_breaker(int attempts) { ...@@ -3383,7 +3383,7 @@ void os::loop_breaker(int attempts) {
// this reason, the code should not be used as default (ThreadPriorityPolicy=0). // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
// It is only used when ThreadPriorityPolicy=1 and requires root privilege. // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
int os::java_to_os_priority[MaxPriority + 1] = { int os::java_to_os_priority[CriticalPriority + 1] = {
19, // 0 Entry should never be used 19, // 0 Entry should never be used
4, // 1 MinPriority 4, // 1 MinPriority
...@@ -3398,7 +3398,9 @@ int os::java_to_os_priority[MaxPriority + 1] = { ...@@ -3398,7 +3398,9 @@ int os::java_to_os_priority[MaxPriority + 1] = {
-3, // 8 -3, // 8
-4, // 9 NearMaxPriority -4, // 9 NearMaxPriority
-5 // 10 MaxPriority -5, // 10 MaxPriority
-5 // 11 CriticalPriority
}; };
static int prio_init() { static int prio_init() {
...@@ -3413,6 +3415,9 @@ static int prio_init() { ...@@ -3413,6 +3415,9 @@ static int prio_init() {
ThreadPriorityPolicy = 0; ThreadPriorityPolicy = 0;
} }
} }
if (UseCriticalJavaThreadPriority) {
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
}
return 0; return 0;
} }
......
...@@ -701,6 +701,14 @@ GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative) ...@@ -701,6 +701,14 @@ GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative)
char libjava[MAXPATHLEN]; char libjava[MAXPATHLEN];
if (GetApplicationHome(path, pathsize)) { if (GetApplicationHome(path, pathsize)) {
/* Is the JRE universal, i.e. no arch dir? */
sprintf(libjava, "%s/jre/lib/" JAVA_DLL, path);
if (access(libjava, F_OK) == 0) {
strcat(path, "/jre");
goto found;
}
/* Is JRE co-located with the application? */ /* Is JRE co-located with the application? */
sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch); sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
if (access(libjava, F_OK) == 0) { if (access(libjava, F_OK) == 0) {
...@@ -734,7 +742,7 @@ LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn) ...@@ -734,7 +742,7 @@ LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn)
ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs; ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs;
return JNI_TRUE; return JNI_TRUE;
#else #else
Dl_info dlinfo; Dl_info dlinfo;
void *libjvm; void *libjvm;
if (_launcher_debug) { if (_launcher_debug) {
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -28,17 +28,17 @@ ...@@ -28,17 +28,17 @@
// This is embedded via include into the class OSThread // This is embedded via include into the class OSThread
private: private:
thread_t _thread_id; // Solaris thread id
thread_t _thread_id; // Solaris thread id uint _lwp_id; // lwp ID, only used with bound threads
unsigned int _lwp_id; // lwp ID, only used with bound threads int _native_priority; // Saved native priority when starting
sigset_t _caller_sigmask; // Caller's signal mask // a bound thread
bool _vm_created_thread; // true if the VM create this thread sigset_t _caller_sigmask; // Caller's signal mask
// false if primary thread or attached thread bool _vm_created_thread; // true if the VM created this thread,
// false if primary thread or attached thread
public: public:
thread_t thread_id() const { return _thread_id; }
thread_t thread_id() const { return _thread_id; } uint lwp_id() const { return _lwp_id; }
int native_priority() const { return _native_priority; }
unsigned int lwp_id() const { return _lwp_id; }
// Set and get state of _vm_created_thread flag // Set and get state of _vm_created_thread flag
void set_vm_created() { _vm_created_thread = true; } void set_vm_created() { _vm_created_thread = true; }
...@@ -62,8 +62,9 @@ ...@@ -62,8 +62,9 @@
return true; return true;
} }
#endif #endif
void set_thread_id(thread_t id) { _thread_id = id; } void set_thread_id(thread_t id) { _thread_id = id; }
void set_lwp_id(unsigned int id){ _lwp_id = id; } void set_lwp_id(uint id) { _lwp_id = id; }
void set_native_priority(int prio) { _native_priority = prio; }
// *************************************************************** // ***************************************************************
// interrupt support. interrupts (using signals) are used to get // interrupt support. interrupts (using signals) are used to get
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -114,6 +114,7 @@ ...@@ -114,6 +114,7 @@
# include <sys/rtpriocntl.h> # include <sys/rtpriocntl.h>
# include <sys/tspriocntl.h> # include <sys/tspriocntl.h>
# include <sys/iapriocntl.h> # include <sys/iapriocntl.h>
# include <sys/fxpriocntl.h>
# include <sys/loadavg.h> # include <sys/loadavg.h>
# include <string.h> # include <string.h>
# include <stdio.h> # include <stdio.h>
...@@ -129,8 +130,8 @@ ...@@ -129,8 +130,8 @@
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE
// See bug #6514594 // See bug #6514594
extern "C" int madvise(caddr_t, size_t, int); extern "C" int madvise(caddr_t, size_t, int);
extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
int attr, int mask); int attr, int mask);
#endif //_GNU_SOURCE #endif //_GNU_SOURCE
/* /*
...@@ -215,8 +216,9 @@ struct memcntl_mha { ...@@ -215,8 +216,9 @@ struct memcntl_mha {
#define MaximumPriority 127 #define MaximumPriority 127
// Values for ThreadPriorityPolicy == 1 // Values for ThreadPriorityPolicy == 1
int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64, int prio_policy1[CriticalPriority+1] = {
80, 96, 112, 124, 127 }; -99999, 0, 16, 32, 48, 64,
80, 96, 112, 124, 127, 127 };
// System parameters used internally // System parameters used internally
static clock_t clock_tics_per_sec = 100; static clock_t clock_tics_per_sec = 100;
...@@ -1048,15 +1050,22 @@ extern "C" void* java_start(void* thread_addr) { ...@@ -1048,15 +1050,22 @@ extern "C" void* java_start(void* thread_addr) {
} }
// If the creator called set priority before we started, // If the creator called set priority before we started,
// we need to call set priority now that we have an lwp. // we need to call set_native_priority now that we have an lwp.
// Get the priority from libthread and set the priority // We used to get the priority from thr_getprio (we called
// for the new Solaris lwp. // thr_setprio way back in create_thread) and pass it to
// set_native_priority, but Solaris scales the priority
// in java_to_os_priority, so when we read it back here,
// we pass trash to set_native_priority instead of what's
// in java_to_os_priority. So we save the native priority
// in the osThread and recall it here.
if ( osthr->thread_id() != -1 ) { if ( osthr->thread_id() != -1 ) {
if ( UseThreadPriorities ) { if ( UseThreadPriorities ) {
thr_getprio(osthr->thread_id(), &prio); int prio = osthr->native_priority();
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n", tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
osthr->thread_id(), osthr->lwp_id(), prio ); INTPTR_FORMAT ", setting priority: %d\n",
osthr->thread_id(), osthr->lwp_id(), prio);
} }
os::set_native_priority(thread, prio); os::set_native_priority(thread, prio);
} }
...@@ -1353,13 +1362,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { ...@@ -1353,13 +1362,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
// Remember that we created this thread so we can set priority on it // Remember that we created this thread so we can set priority on it
osthread->set_vm_created(); osthread->set_vm_created();
// Set the default thread priority otherwise use NormalPriority // Set the default thread priority. If using bound threads, setting
// lwp priority will be delayed until thread start.
if ( UseThreadPriorities ) { set_native_priority(thread,
thr_setprio(tid, (DefaultThreadPriority == -1) ? DefaultThreadPriority == -1 ?
java_to_os_priority[NormPriority] : java_to_os_priority[NormPriority] :
DefaultThreadPriority); DefaultThreadPriority);
}
// Initial thread state is INITIALIZED, not SUSPENDED // Initial thread state is INITIALIZED, not SUSPENDED
osthread->set_state(INITIALIZED); osthread->set_state(INITIALIZED);
...@@ -3728,7 +3736,7 @@ typedef struct { ...@@ -3728,7 +3736,7 @@ typedef struct {
} SchedInfo; } SchedInfo;
static SchedInfo tsLimits, iaLimits, rtLimits; static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
#ifdef ASSERT #ifdef ASSERT
static int ReadBackValidate = 1; static int ReadBackValidate = 1;
...@@ -3739,6 +3747,8 @@ static int myMax = 0; ...@@ -3739,6 +3747,8 @@ static int myMax = 0;
static int myCur = 0; static int myCur = 0;
static bool priocntl_enable = false; static bool priocntl_enable = false;
static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
static int java_MaxPriority_to_os_priority = 0; // Saved mapping
// Call the version of priocntl suitable for all supported versions // Call the version of priocntl suitable for all supported versions
// of Solaris. We need to call through this wrapper so that we can // of Solaris. We need to call through this wrapper so that we can
...@@ -3783,19 +3793,27 @@ int lwp_priocntl_init () ...@@ -3783,19 +3793,27 @@ int lwp_priocntl_init ()
if (os::Solaris::T2_libthread() || UseBoundThreads) { if (os::Solaris::T2_libthread() || UseBoundThreads) {
// If ThreadPriorityPolicy is 1, switch tables // If ThreadPriorityPolicy is 1, switch tables
if (ThreadPriorityPolicy == 1) { if (ThreadPriorityPolicy == 1) {
for (i = 0 ; i < MaxPriority+1; i++) for (i = 0 ; i < CriticalPriority+1; i++)
os::java_to_os_priority[i] = prio_policy1[i]; os::java_to_os_priority[i] = prio_policy1[i];
} }
if (UseCriticalJavaThreadPriority) {
// MaxPriority always maps to the FX scheduling class and criticalPrio.
// See set_native_priority() and set_lwp_class_and_priority().
// Save original MaxPriority mapping in case attempt to
// use critical priority fails.
java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
// Set negative to distinguish from other priorities
os::java_to_os_priority[MaxPriority] = -criticalPrio;
}
} }
// Not using Bound Threads, set to ThreadPolicy 1 // Not using Bound Threads, set to ThreadPolicy 1
else { else {
for ( i = 0 ; i < MaxPriority+1; i++ ) { for ( i = 0 ; i < CriticalPriority+1; i++ ) {
os::java_to_os_priority[i] = prio_policy1[i]; os::java_to_os_priority[i] = prio_policy1[i];
} }
return 0; return 0;
} }
// Get IDs for a set of well-known scheduling classes. // Get IDs for a set of well-known scheduling classes.
// TODO-FIXME: GETCLINFO returns the current # of classes in the // TODO-FIXME: GETCLINFO returns the current # of classes in the
// the system. We should have a loop that iterates over the // the system. We should have a loop that iterates over the
...@@ -3828,24 +3846,33 @@ int lwp_priocntl_init () ...@@ -3828,24 +3846,33 @@ int lwp_priocntl_init ()
rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
rtLimits.minPrio = 0; rtLimits.minPrio = 0;
strcpy(ClassInfo.pc_clname, "FX");
ClassInfo.pc_cid = -1;
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
fxLimits.schedPolicy = ClassInfo.pc_cid;
fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
fxLimits.minPrio = 0;
// Query our "current" scheduling class. // Query our "current" scheduling class.
// This will normally be IA,TS or, rarely, RT. // This will normally be IA, TS or, rarely, FX or RT.
memset (&ParmInfo, 0, sizeof(ParmInfo)); memset(&ParmInfo, 0, sizeof(ParmInfo));
ParmInfo.pc_cid = PC_CLNULL; ParmInfo.pc_cid = PC_CLNULL;
rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo ); rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
if ( rslt < 0 ) return errno; if (rslt < 0) return errno;
myClass = ParmInfo.pc_cid; myClass = ParmInfo.pc_cid;
// We now know our scheduling classId, get specific information // We now know our scheduling classId, get specific information
// the class. // about the class.
ClassInfo.pc_cid = myClass; ClassInfo.pc_cid = myClass;
ClassInfo.pc_clname[0] = 0; ClassInfo.pc_clname[0] = 0;
rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo ); rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
if ( rslt < 0 ) return errno; if (rslt < 0) return errno;
if (ThreadPriorityVerbose) if (ThreadPriorityVerbose) {
tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
}
memset(&ParmInfo, 0, sizeof(pcparms_t)); memset(&ParmInfo, 0, sizeof(pcparms_t));
ParmInfo.pc_cid = PC_CLNULL; ParmInfo.pc_cid = PC_CLNULL;
...@@ -3865,6 +3892,11 @@ int lwp_priocntl_init () ...@@ -3865,6 +3892,11 @@ int lwp_priocntl_init ()
myMin = tsLimits.minPrio; myMin = tsLimits.minPrio;
myMax = tsLimits.maxPrio; myMax = tsLimits.maxPrio;
myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
} else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
myMin = fxLimits.minPrio;
myMax = fxLimits.maxPrio;
myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict
} else { } else {
// No clue - punt // No clue - punt
if (ThreadPriorityVerbose) if (ThreadPriorityVerbose)
...@@ -3872,8 +3904,9 @@ int lwp_priocntl_init () ...@@ -3872,8 +3904,9 @@ int lwp_priocntl_init ()
return EINVAL; // no clue, punt return EINVAL; // no clue, punt
} }
if (ThreadPriorityVerbose) if (ThreadPriorityVerbose) {
tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
}
priocntl_enable = true; // Enable changing priorities priocntl_enable = true; // Enable changing priorities
return 0; return 0;
...@@ -3882,6 +3915,7 @@ int lwp_priocntl_init () ...@@ -3882,6 +3915,7 @@ int lwp_priocntl_init ()
#define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
#define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
#define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
#define FXPRI(x) ((fxparms_t *)((x).pc_clparms))
// scale_to_lwp_priority // scale_to_lwp_priority
...@@ -3900,13 +3934,13 @@ int scale_to_lwp_priority (int rMin, int rMax, int x) ...@@ -3900,13 +3934,13 @@ int scale_to_lwp_priority (int rMin, int rMax, int x)
} }
// set_lwp_priority // set_lwp_class_and_priority
// //
// Set the priority of the lwp. This call should only be made // Set the class and priority of the lwp. This call should only
// when using bound threads (T2 threads are bound by default). // be made when using bound threads (T2 threads are bound by default).
// //
int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) int set_lwp_class_and_priority(int ThreadID, int lwpid,
{ int newPrio, int new_class, bool scale) {
int rslt; int rslt;
int Actual, Expected, prv; int Actual, Expected, prv;
pcparms_t ParmInfo; // for GET-SET pcparms_t ParmInfo; // for GET-SET
...@@ -3927,19 +3961,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) ...@@ -3927,19 +3961,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
return EINVAL; return EINVAL;
} }
// If lwp hasn't started yet, just return // If lwp hasn't started yet, just return
// the _start routine will call us again. // the _start routine will call us again.
if ( lwpid <= 0 ) { if ( lwpid <= 0 ) {
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set", tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
INTPTR_FORMAT " to %d, lwpid not set",
ThreadID, newPrio); ThreadID, newPrio);
} }
return 0; return 0;
} }
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", tty->print_cr ("set_lwp_class_and_priority("
INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
ThreadID, lwpid, newPrio); ThreadID, lwpid, newPrio);
} }
...@@ -3948,40 +3983,70 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) ...@@ -3948,40 +3983,70 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
if (rslt < 0) return errno; if (rslt < 0) return errno;
if (ParmInfo.pc_cid == rtLimits.schedPolicy) { int cur_class = ParmInfo.pc_cid;
ParmInfo.pc_cid = (id_t)new_class;
if (new_class == rtLimits.schedPolicy) {
rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio); rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio,
rtLimits.maxPrio, newPrio)
: newPrio;
rtInfo->rt_tqsecs = RT_NOCHANGE; rtInfo->rt_tqsecs = RT_NOCHANGE;
rtInfo->rt_tqnsecs = RT_NOCHANGE; rtInfo->rt_tqnsecs = RT_NOCHANGE;
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
} }
} else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { } else if (new_class == iaLimits.schedPolicy) {
iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim); int maxClamped = MIN2(iaLimits.maxPrio,
iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio); cur_class == new_class
iaInfo->ia_uprilim = IA_NOCHANGE; ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio,
maxClamped, newPrio)
: newPrio;
iaInfo->ia_uprilim = cur_class == new_class
? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
iaInfo->ia_mode = IA_NOCHANGE; iaInfo->ia_mode = IA_NOCHANGE;
iaInfo->ia_nice = cur_class == new_class ? IA_NOCHANGE : NZERO;
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr ("IA: [%d...%d] %d->%d\n", tty->print_cr("IA: [%d...%d] %d->%d\n",
iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
} }
} else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { } else if (new_class == tsLimits.schedPolicy) {
tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim); int maxClamped = MIN2(tsLimits.maxPrio,
prv = tsInfo->ts_upri; cur_class == new_class
tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio); ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
tsInfo->ts_uprilim = IA_NOCHANGE; tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio,
maxClamped, newPrio)
: newPrio;
tsInfo->ts_uprilim = cur_class == new_class
? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
if (ThreadPriorityVerbose) { if (ThreadPriorityVerbose) {
tty->print_cr ("TS: %d [%d...%d] %d->%d\n", tty->print_cr("TS: [%d...%d] %d->%d\n",
prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
}
} else if (new_class == fxLimits.schedPolicy) {
fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
int maxClamped = MIN2(fxLimits.maxPrio,
cur_class == new_class
? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio,
maxClamped, newPrio)
: newPrio;
fxInfo->fx_uprilim = cur_class == new_class
? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
fxInfo->fx_tqsecs = FX_NOCHANGE;
fxInfo->fx_tqnsecs = FX_NOCHANGE;
if (ThreadPriorityVerbose) {
tty->print_cr("FX: [%d...%d] %d->%d\n",
fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
} }
if (prv == tsInfo->ts_upri) return 0;
} else { } else {
if ( ThreadPriorityVerbose ) { if (ThreadPriorityVerbose) {
tty->print_cr ("Unknown scheduling class\n"); tty->print_cr("Unknown new scheduling class %d\n", new_class);
} }
return EINVAL; // no clue, punt return EINVAL; // no clue, punt
} }
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
...@@ -4016,16 +4081,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) ...@@ -4016,16 +4081,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
} else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
Actual = TSPRI(ReadBack)->ts_upri; Actual = TSPRI(ReadBack)->ts_upri;
Expected = TSPRI(ParmInfo)->ts_upri; Expected = TSPRI(ParmInfo)->ts_upri;
} else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
Actual = FXPRI(ReadBack)->fx_upri;
Expected = FXPRI(ParmInfo)->fx_upri;
} else { } else {
if ( ThreadPriorityVerbose ) { if (ThreadPriorityVerbose) {
tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid); tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
ParmInfo.pc_cid);
} }
} }
if (Actual != Expected) { if (Actual != Expected) {
if ( ThreadPriorityVerbose ) { if (ThreadPriorityVerbose) {
tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
} }
} }
#endif #endif
...@@ -4033,8 +4102,6 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) ...@@ -4033,8 +4102,6 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
return 0; return 0;
} }
// Solaris only gives access to 128 real priorities at a time, // Solaris only gives access to 128 real priorities at a time,
// so we expand Java's ten to fill this range. This would be better // so we expand Java's ten to fill this range. This would be better
// if we dynamically adjusted relative priorities. // if we dynamically adjusted relative priorities.
...@@ -4055,8 +4122,7 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) ...@@ -4055,8 +4122,7 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
// which do not explicitly alter their thread priorities. // which do not explicitly alter their thread priorities.
// //
int os::java_to_os_priority[CriticalPriority + 1] = {
int os::java_to_os_priority[MaxPriority + 1] = {
-99999, // 0 Entry should never be used -99999, // 0 Entry should never be used
0, // 1 MinPriority 0, // 1 MinPriority
...@@ -4071,17 +4137,51 @@ int os::java_to_os_priority[MaxPriority + 1] = { ...@@ -4071,17 +4137,51 @@ int os::java_to_os_priority[MaxPriority + 1] = {
127, // 8 127, // 8
127, // 9 NearMaxPriority 127, // 9 NearMaxPriority
127 // 10 MaxPriority 127, // 10 MaxPriority
};
-criticalPrio // 11 CriticalPriority
};
OSReturn os::set_native_priority(Thread* thread, int newpri) { OSReturn os::set_native_priority(Thread* thread, int newpri) {
OSThread* osthread = thread->osthread();
// Save requested priority in case the thread hasn't been started
osthread->set_native_priority(newpri);
// Check for critical priority request
bool fxcritical = false;
if (newpri == -criticalPrio) {
fxcritical = true;
newpri = criticalPrio;
}
assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
if ( !UseThreadPriorities ) return OS_OK; if (!UseThreadPriorities) return OS_OK;
int status = thr_setprio(thread->osthread()->thread_id(), newpri);
if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) ) int status = 0;
status |= (set_lwp_priority (thread->osthread()->thread_id(),
thread->osthread()->lwp_id(), newpri )); if (!fxcritical) {
// Use thr_setprio only if we have a priority that thr_setprio understands
status = thr_setprio(thread->osthread()->thread_id(), newpri);
}
if (os::Solaris::T2_libthread() ||
(UseBoundThreads && osthread->is_vm_created())) {
int lwp_status =
set_lwp_class_and_priority(osthread->thread_id(),
osthread->lwp_id(),
newpri,
fxcritical ? fxLimits.schedPolicy : myClass,
!fxcritical);
if (lwp_status != 0 && fxcritical) {
// Try again, this time without changing the scheduling class
newpri = java_MaxPriority_to_os_priority;
lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
osthread->lwp_id(),
newpri, myClass, false);
}
status |= lwp_status;
}
return (status == 0) ? OS_OK : OS_ERR; return (status == 0) ? OS_OK : OS_ERR;
} }
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -3296,7 +3296,7 @@ void os::yield_all(int attempts) { ...@@ -3296,7 +3296,7 @@ void os::yield_all(int attempts) {
// so we compress Java's ten down to seven. It would be better // so we compress Java's ten down to seven. It would be better
// if we dynamically adjusted relative priorities. // if we dynamically adjusted relative priorities.
int os::java_to_os_priority[MaxPriority + 1] = { int os::java_to_os_priority[CriticalPriority + 1] = {
THREAD_PRIORITY_IDLE, // 0 Entry should never be used THREAD_PRIORITY_IDLE, // 0 Entry should never be used
THREAD_PRIORITY_LOWEST, // 1 MinPriority THREAD_PRIORITY_LOWEST, // 1 MinPriority
THREAD_PRIORITY_LOWEST, // 2 THREAD_PRIORITY_LOWEST, // 2
...@@ -3307,10 +3307,11 @@ int os::java_to_os_priority[MaxPriority + 1] = { ...@@ -3307,10 +3307,11 @@ int os::java_to_os_priority[MaxPriority + 1] = {
THREAD_PRIORITY_ABOVE_NORMAL, // 7 THREAD_PRIORITY_ABOVE_NORMAL, // 7
THREAD_PRIORITY_ABOVE_NORMAL, // 8 THREAD_PRIORITY_ABOVE_NORMAL, // 8
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
THREAD_PRIORITY_HIGHEST // 10 MaxPriority THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
}; };
int prio_policy1[MaxPriority + 1] = { int prio_policy1[CriticalPriority + 1] = {
THREAD_PRIORITY_IDLE, // 0 Entry should never be used THREAD_PRIORITY_IDLE, // 0 Entry should never be used
THREAD_PRIORITY_LOWEST, // 1 MinPriority THREAD_PRIORITY_LOWEST, // 1 MinPriority
THREAD_PRIORITY_LOWEST, // 2 THREAD_PRIORITY_LOWEST, // 2
...@@ -3321,17 +3322,21 @@ int prio_policy1[MaxPriority + 1] = { ...@@ -3321,17 +3322,21 @@ int prio_policy1[MaxPriority + 1] = {
THREAD_PRIORITY_ABOVE_NORMAL, // 7 THREAD_PRIORITY_ABOVE_NORMAL, // 7
THREAD_PRIORITY_HIGHEST, // 8 THREAD_PRIORITY_HIGHEST, // 8
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
THREAD_PRIORITY_TIME_CRITICAL // 10 MaxPriority THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
}; };
static int prio_init() { static int prio_init() {
// If ThreadPriorityPolicy is 1, switch tables // If ThreadPriorityPolicy is 1, switch tables
if (ThreadPriorityPolicy == 1) { if (ThreadPriorityPolicy == 1) {
int i; int i;
for (i = 0; i < MaxPriority + 1; i++) { for (i = 0; i < CriticalPriority + 1; i++) {
os::java_to_os_priority[i] = prio_policy1[i]; os::java_to_os_priority[i] = prio_policy1[i];
} }
} }
if (UseCriticalJavaThreadPriority) {
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
}
return 0; return 0;
} }
......
...@@ -1354,9 +1354,10 @@ class LIR_OpBranch: public LIR_Op { ...@@ -1354,9 +1354,10 @@ class LIR_OpBranch: public LIR_Op {
CodeStub* _stub; // if this is a branch to a stub, this is the stub CodeStub* _stub; // if this is a branch to a stub, this is the stub
public: public:
LIR_OpBranch(LIR_Condition cond, Label* lbl) LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
, _cond(cond) , _cond(cond)
, _type(type)
, _label(lbl) , _label(lbl)
, _block(NULL) , _block(NULL)
, _ublock(NULL) , _ublock(NULL)
...@@ -2053,7 +2054,7 @@ class LIR_List: public CompilationResourceObj { ...@@ -2053,7 +2054,7 @@ class LIR_List: public CompilationResourceObj {
void jump(CodeStub* stub) { void jump(CodeStub* stub) {
append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub)); append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
} }
void branch(LIR_Condition cond, Label* lbl) { append(new LIR_OpBranch(cond, lbl)); } void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); }
void branch(LIR_Condition cond, BasicType type, BlockBegin* block) { void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons"); assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
append(new LIR_OpBranch(cond, type, block)); append(new LIR_OpBranch(cond, type, block));
......
...@@ -2350,7 +2350,7 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi ...@@ -2350,7 +2350,7 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi
} else { } else {
LabelObj* L = new LabelObj(); LabelObj* L = new LabelObj();
__ cmp(lir_cond_less, value, low_key); __ cmp(lir_cond_less, value, low_key);
__ branch(lir_cond_less, L->label()); __ branch(lir_cond_less, T_INT, L->label());
__ cmp(lir_cond_lessEqual, value, high_key); __ cmp(lir_cond_lessEqual, value, high_key);
__ branch(lir_cond_lessEqual, T_INT, dest); __ branch(lir_cond_lessEqual, T_INT, dest);
__ branch_destination(L->label()); __ branch_destination(L->label());
......
...@@ -413,8 +413,9 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth ...@@ -413,8 +413,9 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
} }
bci = branch_bci + offset; bci = branch_bci + offset;
} }
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD); osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
return osr_nm; return osr_nm;
} }
......
...@@ -1347,7 +1347,13 @@ class BacktraceBuilder: public StackObj { ...@@ -1347,7 +1347,13 @@ class BacktraceBuilder: public StackObj {
return _backtrace(); return _backtrace();
} }
inline void push(methodOop method, short bci, TRAPS) { inline void push(methodOop method, int bci, TRAPS) {
// Smear the -1 bci to 0 since the array only holds unsigned
// shorts. The later line number lookup would just smear the -1
// to a 0 even if it could be recorded.
if (bci == SynchronizationEntryBCI) bci = 0;
assert(bci == (jushort)bci, "doesn't fit");
if (_index >= trace_chunk_size) { if (_index >= trace_chunk_size) {
methodHandle mhandle(THREAD, method); methodHandle mhandle(THREAD, method);
expand(CHECK); expand(CHECK);
...@@ -1574,8 +1580,13 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t ...@@ -1574,8 +1580,13 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
int chunk_count = 0; int chunk_count = 0;
for (;!st.at_end(); st.next()) { for (;!st.at_end(); st.next()) {
// add element // Add entry and smear the -1 bci to 0 since the array only holds
bcis->ushort_at_put(chunk_count, st.bci()); // unsigned shorts. The later line number lookup would just smear
// the -1 to a 0 even if it could be recorded.
int bci = st.bci();
if (bci == SynchronizationEntryBCI) bci = 0;
assert(bci == (jushort)bci, "doesn't fit");
bcis->ushort_at_put(chunk_count, bci);
methods->obj_at_put(chunk_count, st.method()); methods->obj_at_put(chunk_count, st.method());
chunk_count++; chunk_count++;
......
/* /*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -855,23 +855,23 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue ...@@ -855,23 +855,23 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// Note that this only sets the JavaThread _priority field, which by // Note that this only sets the JavaThread _priority field, which by
// definition is limited to Java priorities and not OS priorities. // definition is limited to Java priorities and not OS priorities.
// The os-priority is set in the CompilerThread startup code itself // The os-priority is set in the CompilerThread startup code itself
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
// CLEANUP PRIORITIES: This -if- statement hids a bug whereby the compiler
// threads never have their OS priority set. The assumption here is to // Note that we cannot call os::set_priority because it expects Java
// enable the Performance group to do flag tuning, figure out a suitable // priorities and we are *explicitly* using OS priorities so that it's
// CompilerThreadPriority, and then remove this 'if' statement (and // possible to set the compiler thread priority higher than any Java
// comment) and unconditionally set the priority. // thread.
// Compiler Threads should be at the highest Priority int native_prio = CompilerThreadPriority;
if ( CompilerThreadPriority != -1 ) if (native_prio == -1) {
os::set_native_priority( compiler_thread, CompilerThreadPriority ); if (UseCriticalCompilerThreadPriority) {
else native_prio = os::java_to_os_priority[CriticalPriority];
os::set_native_priority( compiler_thread, os::java_to_os_priority[NearMaxPriority]); } else {
native_prio = os::java_to_os_priority[NearMaxPriority];
// Note that I cannot call os::set_priority because it expects Java }
// priorities and I am *explicitly* using OS priorities so that it's }
// possible to set the compiler thread priority higher than any Java os::set_native_priority(compiler_thread, native_prio);
// thread.
java_lang_Thread::set_daemon(thread_oop()); java_lang_Thread::set_daemon(thread_oop());
...@@ -879,6 +879,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue ...@@ -879,6 +879,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
Threads::add(compiler_thread); Threads::add(compiler_thread);
Thread::start(compiler_thread); Thread::start(compiler_thread);
} }
// Let go of Threads_lock before yielding // Let go of Threads_lock before yielding
os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
...@@ -961,7 +962,7 @@ void CompileBroker::compile_method_base(methodHandle method, ...@@ -961,7 +962,7 @@ void CompileBroker::compile_method_base(methodHandle method,
methodHandle hot_method, methodHandle hot_method,
int hot_count, int hot_count,
const char* comment, const char* comment,
TRAPS) { Thread* thread) {
// do nothing if compiler thread(s) is not available // do nothing if compiler thread(s) is not available
if (!_initialized ) { if (!_initialized ) {
return; return;
...@@ -1037,7 +1038,7 @@ void CompileBroker::compile_method_base(methodHandle method, ...@@ -1037,7 +1038,7 @@ void CompileBroker::compile_method_base(methodHandle method,
// Acquire our lock. // Acquire our lock.
{ {
MutexLocker locker(queue->lock(), THREAD); MutexLocker locker(queue->lock(), thread);
// Make sure the method has not slipped into the queues since // Make sure the method has not slipped into the queues since
// last we checked; note that those checks were "fast bail-outs". // last we checked; note that those checks were "fast bail-outs".
...@@ -1119,7 +1120,7 @@ void CompileBroker::compile_method_base(methodHandle method, ...@@ -1119,7 +1120,7 @@ void CompileBroker::compile_method_base(methodHandle method,
nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
int comp_level, int comp_level,
methodHandle hot_method, int hot_count, methodHandle hot_method, int hot_count,
const char* comment, TRAPS) { const char* comment, Thread* THREAD) {
// make sure arguments make sense // make sure arguments make sense
assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method"); assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method");
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
...@@ -1173,10 +1174,10 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, ...@@ -1173,10 +1174,10 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
// some prerequisites that are compiler specific // some prerequisites that are compiler specific
if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) { if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
method->constants()->resolve_string_constants(CHECK_0); method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
// Resolve all classes seen in the signature of the method // Resolve all classes seen in the signature of the method
// we are compiling. // we are compiling.
methodOopDesc::load_signature_classes(method, CHECK_0); methodOopDesc::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
} }
// If the method is native, do the lookup in the thread requesting // If the method is native, do the lookup in the thread requesting
...@@ -1230,7 +1231,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, ...@@ -1230,7 +1231,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
return NULL; return NULL;
} }
} else { } else {
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, CHECK_0); compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
} }
// return requested nmethod // return requested nmethod
......
...@@ -333,7 +333,7 @@ class CompileBroker: AllStatic { ...@@ -333,7 +333,7 @@ class CompileBroker: AllStatic {
methodHandle hot_method, methodHandle hot_method,
int hot_count, int hot_count,
const char* comment, const char* comment,
TRAPS); Thread* thread);
static CompileQueue* compile_queue(int comp_level) { static CompileQueue* compile_queue(int comp_level) {
if (is_c2_compile(comp_level)) return _c2_method_queue; if (is_c2_compile(comp_level)) return _c2_method_queue;
if (is_c1_compile(comp_level)) return _c1_method_queue; if (is_c1_compile(comp_level)) return _c1_method_queue;
...@@ -363,7 +363,7 @@ class CompileBroker: AllStatic { ...@@ -363,7 +363,7 @@ class CompileBroker: AllStatic {
int comp_level, int comp_level,
methodHandle hot_method, methodHandle hot_method,
int hot_count, int hot_count,
const char* comment, TRAPS); const char* comment, Thread* thread);
static void compiler_thread_loop(); static void compiler_thread_loop();
......
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -75,10 +75,25 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector) ...@@ -75,10 +75,25 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
set_name("Concurrent Mark-Sweep GC Thread"); set_name("Concurrent Mark-Sweep GC Thread");
if (os::create_thread(this, os::cgc_thread)) { if (os::create_thread(this, os::cgc_thread)) {
// XXX: need to set this to low priority // An old comment here said: "Priority should be just less
// unless "agressive mode" set; priority // than that of VMThread". Since the VMThread runs at
// should be just less than that of VMThread. // NearMaxPriority, the old comment was inaccurate, but
os::set_priority(this, NearMaxPriority); // changing the default priority to NearMaxPriority-1
// could change current behavior, so the default of
// NearMaxPriority stays in place.
//
// Note that there's a possibility of the VMThread
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
int native_prio;
if (UseCriticalCMSThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
os::set_native_priority(this, native_prio);
if (!DisableStartThread) { if (!DisableStartThread) {
os::start_thread(this); os::start_thread(this);
} }
......
...@@ -84,8 +84,8 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC { ...@@ -84,8 +84,8 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
} }
// iteration // iteration
bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); } inline bool iterate(BitMapClosure* cl, MemRegion mr);
bool iterate(BitMapClosure* cl, MemRegion mr); inline bool iterate(BitMapClosure* cl);
// Return the address corresponding to the next marked bit at or after // Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no // "addr", and before "limit", if "limit" is non-NULL. If there is no
...@@ -349,10 +349,62 @@ typedef enum { ...@@ -349,10 +349,62 @@ typedef enum {
high_verbose // per object verbose high_verbose // per object verbose
} CMVerboseLevel; } CMVerboseLevel;
class YoungList;
// Root Regions are regions that are not empty at the beginning of a
// marking cycle and which we might collect during an evacuation pause
// while the cycle is active. Given that, during evacuation pauses, we
// do not copy objects that are explicitly marked, what we have to do
// for the root regions is to scan them and mark all objects reachable
// from them. According to the SATB assumptions, we only need to visit
// each object once during marking. So, as long as we finish this scan
// before the next evacuation pause, we can copy the objects from the
// root regions without having to mark them or do anything else to them.
//
// Currently, we only support root region scanning once (at the start
// of the marking cycle) and the root regions are all the survivor
// regions populated during the initial-mark pause.
class CMRootRegions VALUE_OBJ_CLASS_SPEC {
private:
YoungList* _young_list;
ConcurrentMark* _cm;
volatile bool _scan_in_progress;
volatile bool _should_abort;
HeapRegion* volatile _next_survivor;
public:
CMRootRegions();
// We actually do most of the initialization in this method.
void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
// Reset the claiming / scanning of the root regions.
void prepare_for_scan();
// Forces get_next() to return NULL so that the iteration aborts early.
void abort() { _should_abort = true; }
// Return true if the CM thread are actively scanning root regions,
// false otherwise.
bool scan_in_progress() { return _scan_in_progress; }
// Claim the next root region to scan atomically, or return NULL if
// all have been claimed.
HeapRegion* claim_next();
// Flag that we're done with root region scanning and notify anyone
// who's waiting on it. If aborted is false, assume that all regions
// have been claimed.
void scan_finished();
// If CM threads are still scanning root regions, wait until they
// are done. Return true if we had to wait, false otherwise.
bool wait_until_scan_finished();
};
class ConcurrentMarkThread; class ConcurrentMarkThread;
class ConcurrentMark: public CHeapObj { class ConcurrentMark : public CHeapObj {
friend class ConcurrentMarkThread; friend class ConcurrentMarkThread;
friend class CMTask; friend class CMTask;
friend class CMBitMapClosure; friend class CMBitMapClosure;
...@@ -386,7 +438,7 @@ protected: ...@@ -386,7 +438,7 @@ protected:
FreeRegionList _cleanup_list; FreeRegionList _cleanup_list;
// CMS marking support structures // Concurrent marking support structures
CMBitMap _markBitMap1; CMBitMap _markBitMap1;
CMBitMap _markBitMap2; CMBitMap _markBitMap2;
CMBitMapRO* _prevMarkBitMap; // completed mark bitmap CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
...@@ -400,6 +452,9 @@ protected: ...@@ -400,6 +452,9 @@ protected:
HeapWord* _heap_start; HeapWord* _heap_start;
HeapWord* _heap_end; HeapWord* _heap_end;
// Root region tracking and claiming.
CMRootRegions _root_regions;
// For gray objects // For gray objects
CMMarkStack _markStack; // Grey objects behind global finger. CMMarkStack _markStack; // Grey objects behind global finger.
CMRegionStack _regionStack; // Grey regions behind global finger. CMRegionStack _regionStack; // Grey regions behind global finger.
...@@ -426,7 +481,6 @@ protected: ...@@ -426,7 +481,6 @@ protected:
WorkGangBarrierSync _first_overflow_barrier_sync; WorkGangBarrierSync _first_overflow_barrier_sync;
WorkGangBarrierSync _second_overflow_barrier_sync; WorkGangBarrierSync _second_overflow_barrier_sync;
// this is set by any task, when an overflow on the global data // this is set by any task, when an overflow on the global data
// structures is detected. // structures is detected.
volatile bool _has_overflown; volatile bool _has_overflown;
...@@ -554,9 +608,9 @@ protected: ...@@ -554,9 +608,9 @@ protected:
bool has_overflown() { return _has_overflown; } bool has_overflown() { return _has_overflown; }
void set_has_overflown() { _has_overflown = true; } void set_has_overflown() { _has_overflown = true; }
void clear_has_overflown() { _has_overflown = false; } void clear_has_overflown() { _has_overflown = false; }
bool restart_for_overflow() { return _restart_for_overflow; }
bool has_aborted() { return _has_aborted; } bool has_aborted() { return _has_aborted; }
bool restart_for_overflow() { return _restart_for_overflow; }
// Methods to enter the two overflow sync barriers // Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(int task_num); void enter_first_sync_barrier(int task_num);
...@@ -578,6 +632,27 @@ protected: ...@@ -578,6 +632,27 @@ protected:
} }
} }
// Live Data Counting data structures...
// These data structures are initialized at the start of
// marking. They are written to while marking is active.
// They are aggregated during remark; the aggregated values
// are then used to populate the _region_bm, _card_bm, and
// the total live bytes, which are then subsequently updated
// during cleanup.
// An array of bitmaps (one bit map per task). Each bitmap
// is used to record the cards spanned by the live objects
// marked by that task/worker.
BitMap* _count_card_bitmaps;
// Used to record the number of marked live bytes
// (for each region, by worker thread).
size_t** _count_marked_bytes;
// Card index of the bottom of the G1 heap. Used for biasing indices into
// the card bitmaps.
intptr_t _heap_bottom_card_num;
public: public:
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// Notice that the first mark_stack_push is CAS-based, whereas the // Notice that the first mark_stack_push is CAS-based, whereas the
...@@ -671,6 +746,8 @@ public: ...@@ -671,6 +746,8 @@ public:
// Returns true if there are any aborted memory regions. // Returns true if there are any aborted memory regions.
bool has_aborted_regions(); bool has_aborted_regions();
CMRootRegions* root_regions() { return &_root_regions; }
bool concurrent_marking_in_progress() { bool concurrent_marking_in_progress() {
return _concurrent_marking_in_progress; return _concurrent_marking_in_progress;
} }
...@@ -703,6 +780,7 @@ public: ...@@ -703,6 +780,7 @@ public:
ConcurrentMark(ReservedSpace rs, int max_regions); ConcurrentMark(ReservedSpace rs, int max_regions);
~ConcurrentMark(); ~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; } ConcurrentMarkThread* cmThread() { return _cmThread; }
CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
...@@ -720,8 +798,17 @@ public: ...@@ -720,8 +798,17 @@ public:
// G1CollectedHeap // G1CollectedHeap
// This notifies CM that a root during initial-mark needs to be // This notifies CM that a root during initial-mark needs to be
// grayed. It is MT-safe. // grayed. It is MT-safe. word_size is the size of the object in
inline void grayRoot(oop obj, size_t word_size); // words. It is passed explicitly as sometimes we cannot calculate
// it from the given object because it might be in an inconsistent
// state (e.g., in to-space and being copied). So the caller is
// responsible for dealing with this issue (e.g., get the size from
// the from-space image when the to-space image might be
// inconsistent) and always passing the size. hr is the region that
// contains the object and it's passed optionally from callers who
// might already have it (no point in recalculating it).
inline void grayRoot(oop obj, size_t word_size,
uint worker_id, HeapRegion* hr = NULL);
// It's used during evacuation pauses to gray a region, if // It's used during evacuation pauses to gray a region, if
// necessary, and it's MT-safe. It assumes that the caller has // necessary, and it's MT-safe. It assumes that the caller has
...@@ -772,6 +859,13 @@ public: ...@@ -772,6 +859,13 @@ public:
void checkpointRootsInitialPre(); void checkpointRootsInitialPre();
void checkpointRootsInitialPost(); void checkpointRootsInitialPost();
// Scan all the root regions and mark everything reachable from
// them.
void scanRootRegions();
// Scan a single root region and mark everything reachable from it.
void scanRootRegion(HeapRegion* hr, uint worker_id);
// Do concurrent phase of marking, to a tentative transitive closure. // Do concurrent phase of marking, to a tentative transitive closure.
void markFromRoots(); void markFromRoots();
...@@ -781,15 +875,13 @@ public: ...@@ -781,15 +875,13 @@ public:
void checkpointRootsFinal(bool clear_all_soft_refs); void checkpointRootsFinal(bool clear_all_soft_refs);
void checkpointRootsFinalWork(); void checkpointRootsFinalWork();
void calcDesiredRegions();
void cleanup(); void cleanup();
void completeCleanup(); void completeCleanup();
// Mark in the previous bitmap. NB: this is usually read-only, so use // Mark in the previous bitmap. NB: this is usually read-only, so use
// this carefully! // this carefully!
inline void markPrev(oop p); inline void markPrev(oop p);
inline void markNext(oop p);
void clear(oop p);
// Clears marks for all objects in the given range, for the prev, // Clears marks for all objects in the given range, for the prev,
// next, or both bitmaps. NB: the previous bitmap is usually // next, or both bitmaps. NB: the previous bitmap is usually
// read-only, so use this carefully! // read-only, so use this carefully!
...@@ -913,6 +1005,114 @@ public: ...@@ -913,6 +1005,114 @@ public:
bool verbose_high() { bool verbose_high() {
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
} }
// Counting data structure accessors
// Returns the card number of the bottom of the G1 heap.
// Used in biasing indices into accounting card bitmaps.
intptr_t heap_bottom_card_num() const {
return _heap_bottom_card_num;
}
// Returns the card bitmap for a given task or worker id.
BitMap* count_card_bitmap_for(uint worker_id) {
assert(0 <= worker_id && worker_id < _max_task_num, "oob");
assert(_count_card_bitmaps != NULL, "uninitialized");
BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
return task_card_bm;
}
// Returns the array containing the marked bytes for each region,
// for the given worker or task id.
size_t* count_marked_bytes_array_for(uint worker_id) {
assert(0 <= worker_id && worker_id < _max_task_num, "oob");
assert(_count_marked_bytes != NULL, "uninitialized");
size_t* marked_bytes_array = _count_marked_bytes[worker_id];
assert(marked_bytes_array != NULL, "uninitialized");
return marked_bytes_array;
}
// Returns the index in the liveness accounting card table bitmap
// for the given address
inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
// Counts the size of the given memory region in the the given
// marked_bytes array slot for the given HeapRegion.
// Sets the bits in the given card bitmap that are associated with the
// cards that are spanned by the memory region.
inline void count_region(MemRegion mr, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void count_region(MemRegion mr, uint worker_id);
// Counts the given object in the given task/worker counting
// data structures.
inline void count_object(oop obj, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Counts the given object in the task/worker counting data
// structures for the given worker id.
inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool par_mark_and_count(oop obj, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool par_mark_and_count(oop obj, size_t word_size,
HeapRegion* hr, uint worker_id);
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
// Similar to the above routine but we don't know the heap region that
// contains the object to be marked/counted, which this routine looks up.
inline bool par_mark_and_count(oop obj, uint worker_id);
// Similar to the above routine but there are times when we cannot
// safely calculate the size of obj due to races and we, therefore,
// pass the size in as a parameter. It is the caller's reponsibility
// to ensure that the size passed in for obj is valid.
inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
// Unconditionally mark the given object, and unconditinally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj, HeapRegion* hr);
// Similar to the above routine but we don't know the heap region that
// contains the object to be marked/counted, which this routine looks up.
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj);
protected:
// Clear all the per-task bitmaps and arrays used to store the
// counting data.
void clear_all_count_data();
// Aggregates the counting data for each worker/task
// that was constructed while marking. Also sets
// the amount of marked bytes for each region and
// the top at concurrent mark count.
void aggregate_count_data();
// Verification routine
void verify_count_data();
}; };
// A class representing a marking task. // A class representing a marking task.
...@@ -1031,6 +1231,12 @@ private: ...@@ -1031,6 +1231,12 @@ private:
TruncatedSeq _marking_step_diffs_ms; TruncatedSeq _marking_step_diffs_ms;
// Counting data structures. Embedding the task's marked_bytes_array
// and card bitmap into the actual task saves having to go through
// the ConcurrentMark object.
size_t* _marked_bytes_array;
BitMap* _card_bm;
// LOTS of statistics related with this task // LOTS of statistics related with this task
#if _MARKING_STATS_ #if _MARKING_STATS_
NumberSeq _all_clock_intervals_ms; NumberSeq _all_clock_intervals_ms;
...@@ -1196,6 +1402,7 @@ public: ...@@ -1196,6 +1402,7 @@ public:
} }
CMTask(int task_num, ConcurrentMark *cm, CMTask(int task_num, ConcurrentMark *cm,
size_t* marked_bytes, BitMap* card_bm,
CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
// it prints statistics associated with this task // it prints statistics associated with this task
......
...@@ -28,6 +28,214 @@ ...@@ -28,6 +28,214 @@
#include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
// Returns the index in the liveness accounting card bitmap
// for the given address
inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
// Below, the term "card num" means the result of shifting an address
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
return card_num - heap_bottom_card_num();
}
// Counts the given memory region in the given task/worker
// counting data structures.
inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
G1CollectedHeap* g1h = _g1h;
HeapWord* start = mr.start();
HeapWord* last = mr.last();
size_t region_size_bytes = mr.byte_size();
size_t index = hr->hrs_index();
assert(!hr->continuesHumongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity");
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
assert(marked_bytes_array != NULL, "pre-condition");
assert(task_card_bm != NULL, "pre-condition");
// Add to the task local marked bytes for this region.
marked_bytes_array[index] += region_size_bytes;
BitMap::idx_t start_idx = card_bitmap_index_for(start);
BitMap::idx_t last_idx = card_bitmap_index_for(last);
// The card bitmap is task/worker specific => no need to use 'par' routines.
// Set bits in the inclusive bit range [start_idx, last_idx].
//
// For small ranges use a simple loop; otherwise use set_range
// The range are the cards that are spanned by the object/region
// so 8 cards will allow objects/regions up to 4K to be handled
// using the loop.
if ((last_idx - start_idx) <= 8) {
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
task_card_bm->set_bit(i);
}
} else {
assert(last_idx < task_card_bm->size(), "sanity");
// Note: BitMap::set_range() is exclusive.
task_card_bm->set_range(start_idx, last_idx+1);
}
}
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void ConcurrentMark::count_region(MemRegion mr,
HeapRegion* hr,
uint worker_id) {
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
// Counts the given memory region, which may be a single object, in the
// task/worker counting data structures for the given worker id.
inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
HeapWord* addr = mr.start();
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
count_region(mr, hr, worker_id);
}
// Counts the given object in the given task/worker counting data structures.
inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
MemRegion mr((HeapWord*)obj, obj->size());
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
// Counts the given object in the task/worker counting data
// structures for the given worker id.
inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
uint worker_id) {
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
HeapWord* addr = (HeapWord*) obj;
count_object(obj, hr, marked_bytes_array, task_card_bm);
}
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object.
count_object(obj, hr, marked_bytes_array, task_card_bm);
return true;
}
return false;
}
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
size_t word_size,
HeapRegion* hr,
uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
MemRegion mr(addr, word_size);
count_region(mr, hr, worker_id);
return true;
}
return false;
}
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
HeapRegion* hr,
uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object.
count_object(obj, hr, worker_id);
return true;
}
return false;
}
// As above - but we don't know the heap region containing the
// object and so have to supply it.
inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
return par_mark_and_count(obj, hr, worker_id);
}
// Similar to the above routine but we already know the size, in words, of
// the object that we wish to mark/count
inline bool ConcurrentMark::par_mark_and_count(oop obj,
size_t word_size,
uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object.
MemRegion mr(addr, word_size);
count_region(mr, worker_id);
return true;
}
return false;
}
// Unconditionally mark the given object, and unconditinally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
HeapWord* addr = (HeapWord*)obj;
_nextMarkBitMap->mark(addr);
// Update the task specific count data for the object.
count_object(obj, hr, 0 /* worker_id */);
return true;
}
// As above - but we don't have the heap region containing the
// object, so we have to supply it.
inline bool ConcurrentMark::mark_and_count(oop obj) {
HeapWord* addr = (HeapWord*)obj;
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
return mark_and_count(obj, hr);
}
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
HeapWord* start_addr = MAX2(startWord(), mr.start());
HeapWord* end_addr = MIN2(endWord(), mr.end());
if (end_addr > start_addr) {
// Right-open interval [start-offset, end-offset).
BitMap::idx_t start_offset = heapWordToOffset(start_addr);
BitMap::idx_t end_offset = heapWordToOffset(end_addr);
start_offset = _bm.get_next_one_offset(start_offset, end_offset);
while (start_offset < end_offset) {
HeapWord* obj_addr = offsetToHeapWord(start_offset);
oop obj = (oop) obj_addr;
if (!cl->do_bit(start_offset)) {
return false;
}
HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
BitMap::idx_t next_offset = heapWordToOffset(next_addr);
start_offset = _bm.get_next_one_offset(next_offset, end_offset);
}
}
return true;
}
inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
MemRegion mr(startWord(), sizeInWords());
return iterate(cl, mr);
}
inline void CMTask::push(oop obj) { inline void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj; HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
...@@ -84,7 +292,7 @@ inline void CMTask::deal_with_reference(oop obj) { ...@@ -84,7 +292,7 @@ inline void CMTask::deal_with_reference(oop obj) {
HeapWord* objAddr = (HeapWord*) obj; HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
if (_g1h->is_in_g1_reserved(objAddr)) { if (_g1h->is_in_g1_reserved(objAddr)) {
assert(obj != NULL, "null check is implicit"); assert(obj != NULL, "null check is implicit");
if (!_nextMarkBitMap->isMarked(objAddr)) { if (!_nextMarkBitMap->isMarked(objAddr)) {
// Only get the containing region if the object is not marked on the // Only get the containing region if the object is not marked on the
...@@ -98,9 +306,9 @@ inline void CMTask::deal_with_reference(oop obj) { ...@@ -98,9 +306,9 @@ inline void CMTask::deal_with_reference(oop obj) {
} }
// we need to mark it first // we need to mark it first
if (_nextMarkBitMap->parMark(objAddr)) { if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
// No OrderAccess:store_load() is needed. It is implicit in the // No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in parMark(objAddr) above // CAS done in CMBitMap::parMark() call in the routine above.
HeapWord* global_finger = _cm->finger(); HeapWord* global_finger = _cm->finger();
#if _CHECK_BOTH_FINGERS_ #if _CHECK_BOTH_FINGERS_
...@@ -160,25 +368,20 @@ inline void ConcurrentMark::markPrev(oop p) { ...@@ -160,25 +368,20 @@ inline void ConcurrentMark::markPrev(oop p) {
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p); ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
} }
inline void ConcurrentMark::markNext(oop p) { inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity"); uint worker_id, HeapRegion* hr) {
_nextMarkBitMap->mark((HeapWord*) p); assert(obj != NULL, "pre-condition");
}
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
HeapWord* addr = (HeapWord*) obj; HeapWord* addr = (HeapWord*) obj;
if (hr == NULL) {
// Currently we don't do anything with word_size but we will use it hr = _g1h->heap_region_containing_raw(addr);
// in the very near future in the liveness calculation piggy-backing } else {
// changes. assert(hr->is_in(addr), "pre-condition");
}
#ifdef ASSERT
HeapRegion* hr = _g1h->heap_region_containing(addr);
assert(hr != NULL, "sanity"); assert(hr != NULL, "sanity");
assert(!hr->is_survivor(), "should not allocate survivors during IM"); // Given that we're looking for a region that contains an object
assert(addr < hr->next_top_at_mark_start(), // header it's impossible to get back a HC region.
err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT, assert(!hr->continuesHumongous(), "sanity");
addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
// We cannot assert that word_size == obj->size() given that obj // We cannot assert that word_size == obj->size() given that obj
// might not be in a consistent state (another thread might be in // might not be in a consistent state (another thread might be in
// the process of copying it). So the best thing we can do is to // the process of copying it). So the best thing we can do is to
...@@ -188,10 +391,11 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) { ...@@ -188,10 +391,11 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT, err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
word_size * HeapWordSize, hr->capacity(), word_size * HeapWordSize, hr->capacity(),
HR_FORMAT_PARAMS(hr))); HR_FORMAT_PARAMS(hr)));
#endif // ASSERT
if (!_nextMarkBitMap->isMarked(addr)) { if (addr < hr->next_top_at_mark_start()) {
_nextMarkBitMap->parMark(addr); if (!_nextMarkBitMap->isMarked(addr)) {
par_mark_and_count(obj, word_size, hr, worker_id);
}
} }
} }
......
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -44,9 +44,7 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) : ...@@ -44,9 +44,7 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
_started(false), _started(false),
_in_progress(false), _in_progress(false),
_vtime_accum(0.0), _vtime_accum(0.0),
_vtime_mark_accum(0.0), _vtime_mark_accum(0.0) {
_vtime_count_accum(0.0)
{
create_and_start(); create_and_start();
} }
...@@ -94,9 +92,36 @@ void ConcurrentMarkThread::run() { ...@@ -94,9 +92,36 @@ void ConcurrentMarkThread::run() {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
double cycle_start = os::elapsedVTime(); double cycle_start = os::elapsedVTime();
double mark_start_sec = os::elapsedTime();
char verbose_str[128]; char verbose_str[128];
// We have to ensure that we finish scanning the root regions
// before the next GC takes place. To ensure this we have to
// make sure that we do not join the STS until the root regions
// have been scanned. If we did then it's possible that a
// subsequent GC could block us from joining the STS and proceed
// without the root regions have been scanned which would be a
// correctness issue.
double scan_start = os::elapsedTime();
if (!cm()->has_aborted()) {
if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
_cm->scanRootRegions();
double scan_end = os::elapsedTime();
if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
scan_end - scan_start);
}
}
double mark_start_sec = os::elapsedTime();
if (PrintGC) { if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->stamp(PrintGCTimeStamps);
...@@ -148,36 +173,12 @@ void ConcurrentMarkThread::run() { ...@@ -148,36 +173,12 @@ void ConcurrentMarkThread::run() {
} }
} while (cm()->restart_for_overflow()); } while (cm()->restart_for_overflow());
double counting_start_time = os::elapsedVTime();
if (!cm()->has_aborted()) {
double count_start_sec = os::elapsedTime();
if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-count-start]");
}
_sts.join();
_cm->calcDesiredRegions();
_sts.leave();
if (!cm()->has_aborted()) {
double count_end_sec = os::elapsedTime();
if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-count-end, %1.7lf]",
count_end_sec - count_start_sec);
}
}
}
double end_time = os::elapsedVTime(); double end_time = os::elapsedVTime();
_vtime_count_accum += (end_time - counting_start_time);
// Update the total virtual time before doing this, since it will try // Update the total virtual time before doing this, since it will try
// to measure it to get the vtime for this marking. We purposely // to measure it to get the vtime for this marking. We purposely
// neglect the presumably-short "completeCleanup" phase here. // neglect the presumably-short "completeCleanup" phase here.
_vtime_accum = (end_time - _vtime_start); _vtime_accum = (end_time - _vtime_start);
if (!cm()->has_aborted()) { if (!cm()->has_aborted()) {
if (g1_policy->adaptive_young_list_length()) { if (g1_policy->adaptive_young_list_length()) {
double now = os::elapsedTime(); double now = os::elapsedTime();
......
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -40,7 +40,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread { ...@@ -40,7 +40,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double _vtime_accum; // Accumulated virtual time. double _vtime_accum; // Accumulated virtual time.
double _vtime_mark_accum; double _vtime_mark_accum;
double _vtime_count_accum;
public: public:
virtual void run(); virtual void run();
...@@ -69,8 +68,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread { ...@@ -69,8 +68,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double vtime_accum(); double vtime_accum();
// Marking virtual time so far // Marking virtual time so far
double vtime_mark_accum(); double vtime_mark_accum();
// Counting virtual time so far.
double vtime_count_accum() { return _vtime_count_accum; }
ConcurrentMark* cm() { return _cm; } ConcurrentMark* cm() { return _cm; }
......
...@@ -174,13 +174,10 @@ public: ...@@ -174,13 +174,10 @@ public:
} }
}; };
YoungList::YoungList(G1CollectedHeap* g1h) YoungList::YoungList(G1CollectedHeap* g1h) :
: _g1h(g1h), _head(NULL), _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
_length(0), _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
_last_sampled_rs_lengths(0), guarantee(check_list_empty(false), "just making sure...");
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
{
guarantee( check_list_empty(false), "just making sure..." );
} }
void YoungList::push_region(HeapRegion *hr) { void YoungList::push_region(HeapRegion *hr) {
...@@ -1029,6 +1026,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1029,6 +1026,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
assert(isHumongous(word_size), "attempt_allocation_humongous() " assert(isHumongous(word_size), "attempt_allocation_humongous() "
"should only be called for humongous allocations"); "should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
collect(GCCause::_g1_humongous_allocation);
}
// We will loop until a) we manage to successfully perform the // We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which // allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll // fails to perform the allocation. b) is the only case when we'll
...@@ -1111,7 +1117,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, ...@@ -1111,7 +1117,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
return _mutator_alloc_region.attempt_allocation_locked(word_size, return _mutator_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */); false /* bot_updates */);
} else { } else {
return humongous_obj_allocate(word_size); HeapWord* result = humongous_obj_allocate(word_size);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
g1_policy()->set_initiate_conc_mark_if_possible();
}
return result;
} }
ShouldNotReachHere(); ShouldNotReachHere();
...@@ -1257,7 +1267,18 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1257,7 +1267,18 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
double start = os::elapsedTime(); double start = os::elapsedTime();
g1_policy()->record_full_collection_start(); g1_policy()->record_full_collection_start();
// Note: When we have a more flexible GC logging framework that
// allows us to add optional attributes to a GC log record we
// could consider timing and reporting how long we wait in the
// following two methods.
wait_while_free_regions_coming(); wait_while_free_regions_coming();
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
append_secondary_free_list_if_not_empty_with_lock(); append_secondary_free_list_if_not_empty_with_lock();
gc_prologue(true); gc_prologue(true);
...@@ -1286,7 +1307,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1286,7 +1307,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
ref_processor_cm()->verify_no_references_recorded(); ref_processor_cm()->verify_no_references_recorded();
// Abandon current iterations of concurrent marking and concurrent // Abandon current iterations of concurrent marking and concurrent
// refinement, if any are in progress. // refinement, if any are in progress. We have to do this before
// wait_until_scan_finished() below.
concurrent_mark()->abort(); concurrent_mark()->abort();
// Make sure we'll choose a new allocation region afterwards. // Make sure we'll choose a new allocation region afterwards.
...@@ -2295,7 +2317,8 @@ size_t G1CollectedHeap::unsafe_max_alloc() { ...@@ -2295,7 +2317,8 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
cause == GCCause::_g1_humongous_allocation);
} }
#ifndef PRODUCT #ifndef PRODUCT
...@@ -3545,19 +3568,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3545,19 +3568,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
verify_region_sets_optional(); verify_region_sets_optional();
verify_dirty_young_regions(); verify_dirty_young_regions();
{ // This call will decide whether this pause is an initial-mark
// This call will decide whether this pause is an initial-mark // pause. If it is, during_initial_mark_pause() will return true
// pause. If it is, during_initial_mark_pause() will return true // for the duration of this pause.
// for the duration of this pause. g1_policy()->decide_on_conc_mark_initiation();
g1_policy()->decide_on_conc_mark_initiation();
// We do not allow initial-mark to be piggy-backed on a mixed GC. // We do not allow initial-mark to be piggy-backed on a mixed GC.
assert(!g1_policy()->during_initial_mark_pause() || assert(!g1_policy()->during_initial_mark_pause() ||
g1_policy()->gcs_are_young(), "sanity"); g1_policy()->gcs_are_young(), "sanity");
// We also do not allow mixed GCs during marking. // We also do not allow mixed GCs during marking.
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity"); assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
// Record whether this pause is an initial mark. When the current
// thread has completed its logging output and it's safe to signal
// the CM thread, the flag's value in the policy has been reset.
bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
// Inner scope for scope based logging, timers, and stats collection
{
char verbose_str[128]; char verbose_str[128];
sprintf(verbose_str, "GC pause "); sprintf(verbose_str, "GC pause ");
if (g1_policy()->gcs_are_young()) { if (g1_policy()->gcs_are_young()) {
...@@ -3613,7 +3642,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3613,7 +3642,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
Universe::verify(/* allow dirty */ false, Universe::verify(/* allow dirty */ false,
/* silent */ false, /* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking); /* option */ VerifyOption_G1UsePrevMarking);
} }
COMPILER2_PRESENT(DerivedPointerTable::clear()); COMPILER2_PRESENT(DerivedPointerTable::clear());
...@@ -3656,6 +3684,18 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3656,6 +3684,18 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->record_collection_pause_start(start_time_sec, g1_policy()->record_collection_pause_start(start_time_sec,
start_used_bytes); start_used_bytes);
double scan_wait_start = os::elapsedTime();
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = _cm->root_regions()->wait_until_scan_finished();
if (waited) {
double scan_wait_end = os::elapsedTime();
double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
}
#if YOUNG_LIST_VERBOSE #if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
_young_list->print(); _young_list->print();
...@@ -3765,16 +3805,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3765,16 +3805,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
} }
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
// We have to do this before we notify the CM threads that
// they can start working to make sure that all the
// appropriate initialization is done on the CM object.
concurrent_mark()->checkpointRootsInitialPost(); concurrent_mark()->checkpointRootsInitialPost();
set_marking_started(); set_marking_started();
// CAUTION: after the doConcurrentMark() call below, // Note that we don't actually trigger the CM thread at
// the concurrent marking thread(s) could be running // this point. We do that later when we're sure that
// concurrently with us. Make sure that anything after // the current thread has completed its logging output.
// this point does not assume that we are the only GC thread
// running. Note: of course, the actual marking work will
// not start until the safepoint itself is released in
// ConcurrentGCThread::safepoint_desynchronize().
doConcurrentMark();
} }
allocate_dummy_regions(); allocate_dummy_regions();
...@@ -3884,6 +3922,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3884,6 +3922,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
} }
} }
// The closing of the inner scope, immediately above, will complete
// the PrintGC logging output. The record_collection_pause_end() call
// above will complete the logging output of PrintGCDetails.
//
// It is not yet to safe, however, to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
_hrs.verify_optional(); _hrs.verify_optional();
verify_region_sets_optional(); verify_region_sets_optional();
...@@ -3901,6 +3948,21 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3901,6 +3948,21 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_rem_set()->print_summary_info(); g1_rem_set()->print_summary_info();
} }
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
if (should_start_conc_mark) {
// CAUTION: after the doConcurrentMark() call below,
// the concurrent marking thread(s) could be running
// concurrently with us. Make sure that anything after
// this point does not assume that we are the only GC thread
// running. Note: of course, the actual marking work will
// not start until the safepoint itself is released in
// ConcurrentGCThread::safepoint_desynchronize().
doConcurrentMark();
}
return true; return true;
} }
...@@ -4162,7 +4224,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, ...@@ -4162,7 +4224,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(false) { } ParGCAllocBuffer(gclab_word_size), _retired(false) { }
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
: _g1h(g1h), : _g1h(g1h),
_refs(g1h->task_queue(queue_num)), _refs(g1h->task_queue(queue_num)),
_dcq(&g1h->dirty_card_queue_set()), _dcq(&g1h->dirty_card_queue_set()),
...@@ -4283,6 +4345,7 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, ...@@ -4283,6 +4345,7 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
G1ParScanThreadState* par_scan_state) : G1ParScanThreadState* par_scan_state) :
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
_par_scan_state(par_scan_state), _par_scan_state(par_scan_state),
_worker_id(par_scan_state->queue_num()),
_during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
_mark_in_progress(_g1->mark_in_progress()) { } _mark_in_progress(_g1->mark_in_progress()) { }
...@@ -4294,7 +4357,7 @@ void G1ParCopyHelper::mark_object(oop obj) { ...@@ -4294,7 +4357,7 @@ void G1ParCopyHelper::mark_object(oop obj) {
#endif // ASSERT #endif // ASSERT
// We know that the object is not moving so it's safe to read its size. // We know that the object is not moving so it's safe to read its size.
_cm->grayRoot(obj, (size_t) obj->size()); _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
} }
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
...@@ -4316,7 +4379,7 @@ void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { ...@@ -4316,7 +4379,7 @@ void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
// worker so we cannot trust that its to-space image is // worker so we cannot trust that its to-space image is
// well-formed. So we have to read its size from its from-space // well-formed. So we have to read its size from its from-space
// image which we know should not be changing. // image which we know should not be changing.
_cm->grayRoot(to_obj, (size_t) from_obj->size()); _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
} }
oop G1ParCopyHelper::copy_to_survivor_space(oop old) { oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
...@@ -4406,6 +4469,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4406,6 +4469,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
assert(barrier != G1BarrierRS || obj != NULL, assert(barrier != G1BarrierRS || obj != NULL,
"Precondition: G1BarrierRS implies obj is non-NULL"); "Precondition: G1BarrierRS implies obj is non-NULL");
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
// here the null check is implicit in the cset_fast_test() test // here the null check is implicit in the cset_fast_test() test
if (_g1->in_cset_fast_test(obj)) { if (_g1->in_cset_fast_test(obj)) {
oop forwardee; oop forwardee;
...@@ -4424,7 +4489,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4424,7 +4489,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
// When scanning the RS, we only care about objs in CS. // When scanning the RS, we only care about objs in CS.
if (barrier == G1BarrierRS) { if (barrier == G1BarrierRS) {
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); _par_scan_state->update_rs(_from, p, _worker_id);
} }
} else { } else {
// The object is not in collection set. If we're a root scanning // The object is not in collection set. If we're a root scanning
...@@ -4436,7 +4501,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object> ...@@ -4436,7 +4501,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
} }
if (barrier == G1BarrierEvac && obj != NULL) { if (barrier == G1BarrierEvac && obj != NULL) {
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); _par_scan_state->update_rs(_from, p, _worker_id);
} }
if (do_gen_barrier && obj != NULL) { if (do_gen_barrier && obj != NULL) {
...@@ -5666,16 +5731,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { ...@@ -5666,16 +5731,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
// And the region is empty. // And the region is empty.
assert(!used_mr.is_empty(), "Should not have empty regions in a CS."); assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
// If marking is in progress then clear any objects marked in
// the current region. Note mark_in_progress() returns false,
// even during an initial mark pause, until the set_marking_started()
// call which takes place later in the pause.
if (mark_in_progress()) {
assert(!g1_policy()->during_initial_mark_pause(), "sanity");
_cm->nextMarkBitMap()->clearRange(used_mr);
}
free_region(cur, &pre_used, &local_free_list, false /* par */); free_region(cur, &pre_used, &local_free_list, false /* par */);
} else { } else {
cur->uninstall_surv_rate_group(); cur->uninstall_surv_rate_group();
...@@ -5742,8 +5797,9 @@ void G1CollectedHeap::set_free_regions_coming() { ...@@ -5742,8 +5797,9 @@ void G1CollectedHeap::set_free_regions_coming() {
} }
void G1CollectedHeap::reset_free_regions_coming() { void G1CollectedHeap::reset_free_regions_coming() {
assert(free_regions_coming(), "pre-condition");
{ {
assert(free_regions_coming(), "pre-condition");
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
_free_regions_coming = false; _free_regions_coming = false;
SecondaryFreeList_lock->notify_all(); SecondaryFreeList_lock->notify_all();
......
...@@ -355,6 +355,7 @@ private: ...@@ -355,6 +355,7 @@ private:
// explicitly started if: // explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
// (c) cause == _g1_humongous_allocation
bool should_do_concurrent_full_gc(GCCause::Cause cause); bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "full collections" (i.e., Full GCs or // Keeps track of how many "full collections" (i.e., Full GCs or
...@@ -1172,6 +1173,10 @@ public: ...@@ -1172,6 +1173,10 @@ public:
_old_set.remove(hr); _old_set.remove(hr);
} }
size_t non_young_capacity_bytes() {
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
}
void set_free_regions_coming(); void set_free_regions_coming();
void reset_free_regions_coming(); void reset_free_regions_coming();
bool free_regions_coming() { return _free_regions_coming; } bool free_regions_coming() { return _free_regions_coming; }
...@@ -1904,7 +1909,7 @@ protected: ...@@ -1904,7 +1909,7 @@ protected:
G1ParScanPartialArrayClosure* _partial_scan_cl; G1ParScanPartialArrayClosure* _partial_scan_cl;
int _hash_seed; int _hash_seed;
int _queue_num; uint _queue_num;
size_t _term_attempts; size_t _term_attempts;
...@@ -1948,7 +1953,7 @@ protected: ...@@ -1948,7 +1953,7 @@ protected:
} }
public: public:
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num); G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
~G1ParScanThreadState() { ~G1ParScanThreadState() {
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
...@@ -2040,7 +2045,7 @@ public: ...@@ -2040,7 +2045,7 @@ public:
} }
int* hash_seed() { return &_hash_seed; } int* hash_seed() { return &_hash_seed; }
int queue_num() { return _queue_num; } uint queue_num() { return _queue_num; }
size_t term_attempts() const { return _term_attempts; } size_t term_attempts() const { return _term_attempts; }
void note_term_attempt() { _term_attempts++; } void note_term_attempt() { _term_attempts++; }
......
...@@ -141,6 +141,7 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -141,6 +141,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_cur_clear_ct_time_ms(0.0), _cur_clear_ct_time_ms(0.0),
_mark_closure_time_ms(0.0), _mark_closure_time_ms(0.0),
_root_region_scan_wait_time_ms(0.0),
_cur_ref_proc_time_ms(0.0), _cur_ref_proc_time_ms(0.0),
_cur_ref_enq_time_ms(0.0), _cur_ref_enq_time_ms(0.0),
...@@ -213,8 +214,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -213,8 +214,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_survivor_bytes_before_gc(0), _survivor_bytes_before_gc(0),
_capacity_before_gc(0), _capacity_before_gc(0),
_prev_collection_pause_used_at_end_bytes(0),
_eden_cset_region_length(0), _eden_cset_region_length(0),
_survivor_cset_region_length(0), _survivor_cset_region_length(0),
_old_cset_region_length(0), _old_cset_region_length(0),
...@@ -905,19 +904,10 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -905,19 +904,10 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed"); gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
} }
if (!during_initial_mark_pause()) { // We only need to do this here as the policy will only be applied
// We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this
// to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length.
// every time we calculate / recalculate the target young length. update_survivors_policy();
update_survivors_policy();
} else {
// The marking phase has a "we only copy implicitly live
// objects during marking" invariant. The easiest way to ensure it
// holds is not to allocate any survivor regions and tenure all
// objects. In the future we might change this and handle survivor
// regions specially during marking.
tenure_all_objects();
}
assert(_g1->used() == _g1->recalculate_used(), assert(_g1->used() == _g1->recalculate_used(),
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
...@@ -969,6 +959,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -969,6 +959,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
// This is initialized to zero here and is set during // This is initialized to zero here and is set during
// the evacuation pause if marking is in progress. // the evacuation pause if marking is in progress.
_cur_satb_drain_time_ms = 0.0; _cur_satb_drain_time_ms = 0.0;
// This is initialized to zero here and is set during the evacuation
// pause if we actually waited for the root region scanning to finish.
_root_region_scan_wait_time_ms = 0.0;
_last_gc_was_young = false; _last_gc_was_young = false;
...@@ -1140,6 +1133,50 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) { ...@@ -1140,6 +1133,50 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) {
return ret; return ret;
} }
bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
return false;
}
size_t marking_initiating_used_threshold =
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
size_t cur_used_bytes = _g1->non_young_capacity_bytes();
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
if (gcs_are_young()) {
ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
cur_used_bytes,
alloc_byte_size,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent,
source);
return true;
} else {
ergo_verbose5(ErgoConcCycles,
"do not request concurrent cycle initiation",
ergo_format_reason("still doing mixed collections")
ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
cur_used_bytes,
alloc_byte_size,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent,
source);
}
}
return false;
}
// Anything below that is considered to be zero // Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001 #define MIN_TIMER_GRANULARITY 0.0000001
...@@ -1166,44 +1203,16 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { ...@@ -1166,44 +1203,16 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
#endif // PRODUCT #endif // PRODUCT
last_pause_included_initial_mark = during_initial_mark_pause(); last_pause_included_initial_mark = during_initial_mark_pause();
if (last_pause_included_initial_mark) if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0); record_concurrent_mark_init_end(0.0);
size_t marking_initiating_used_threshold =
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
if (!_g1->mark_in_progress() && !_last_young_gc) {
assert(!last_pause_included_initial_mark, "invariant");
if (cur_used_bytes > marking_initiating_used_threshold) {
if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
assert(!during_initial_mark_pause(), "we should not see this here");
ergo_verbose3(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy")
ergo_format_byte_perc("threshold"),
cur_used_bytes,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent);
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible();
} else {
ergo_verbose2(ErgoConcCycles,
"do not request concurrent cycle initiation",
ergo_format_reason("occupancy lower than previous occupancy")
ergo_format_byte("occupancy")
ergo_format_byte("previous occupancy"),
cur_used_bytes,
_prev_collection_pause_used_at_end_bytes);
}
}
} }
_prev_collection_pause_used_at_end_bytes = cur_used_bytes; if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible();
}
_mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
end_time_sec, false); end_time_sec, false);
...@@ -1257,6 +1266,10 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { ...@@ -1257,6 +1266,10 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
// is in progress. // is in progress.
other_time_ms -= _cur_satb_drain_time_ms; other_time_ms -= _cur_satb_drain_time_ms;
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
other_time_ms -= _root_region_scan_wait_time_ms;
if (parallel) { if (parallel) {
other_time_ms -= _cur_collection_par_time_ms; other_time_ms -= _cur_collection_par_time_ms;
} else { } else {
...@@ -1289,6 +1302,8 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { ...@@ -1289,6 +1302,8 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
// each other. Therefore we unconditionally record the SATB drain // each other. Therefore we unconditionally record the SATB drain
// time - even if it's zero. // time - even if it's zero.
body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms); body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
body_summary->record_root_region_scan_wait_time_ms(
_root_region_scan_wait_time_ms);
body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
body_summary->record_satb_filtering_time_ms(satb_filtering_time); body_summary->record_satb_filtering_time_ms(satb_filtering_time);
...@@ -1385,6 +1400,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { ...@@ -1385,6 +1400,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
(last_pause_included_initial_mark) ? " (initial-mark)" : "", (last_pause_included_initial_mark) ? " (initial-mark)" : "",
elapsed_ms / 1000.0); elapsed_ms / 1000.0);
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
if (parallel) { if (parallel) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms); print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms); print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
...@@ -1988,6 +2006,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { ...@@ -1988,6 +2006,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
if (summary->get_total_seq()->num() > 0) { if (summary->get_total_seq()->num() > 0) {
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
if (body_summary != NULL) { if (body_summary != NULL) {
print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
if (parallel) { if (parallel) {
print_summary(1, "Parallel Time", body_summary->get_parallel_seq()); print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq()); print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
...@@ -2029,15 +2048,17 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { ...@@ -2029,15 +2048,17 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
// parallel // parallel
NumberSeq* other_parts[] = { NumberSeq* other_parts[] = {
body_summary->get_satb_drain_seq(), body_summary->get_satb_drain_seq(),
body_summary->get_root_region_scan_wait_seq(),
body_summary->get_parallel_seq(), body_summary->get_parallel_seq(),
body_summary->get_clear_ct_seq() body_summary->get_clear_ct_seq()
}; };
calc_other_times_ms = NumberSeq(summary->get_total_seq(), calc_other_times_ms = NumberSeq(summary->get_total_seq(),
3, other_parts); 4, other_parts);
} else { } else {
// serial // serial
NumberSeq* other_parts[] = { NumberSeq* other_parts[] = {
body_summary->get_satb_drain_seq(), body_summary->get_satb_drain_seq(),
body_summary->get_root_region_scan_wait_seq(),
body_summary->get_update_rs_seq(), body_summary->get_update_rs_seq(),
body_summary->get_ext_root_scan_seq(), body_summary->get_ext_root_scan_seq(),
body_summary->get_satb_filtering_seq(), body_summary->get_satb_filtering_seq(),
...@@ -2045,7 +2066,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const { ...@@ -2045,7 +2066,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
body_summary->get_obj_copy_seq() body_summary->get_obj_copy_seq()
}; };
calc_other_times_ms = NumberSeq(summary->get_total_seq(), calc_other_times_ms = NumberSeq(summary->get_total_seq(),
6, other_parts); 7, other_parts);
} }
check_other_times(1, summary->get_other_seq(), &calc_other_times_ms); check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
} }
......
...@@ -65,6 +65,7 @@ public: ...@@ -65,6 +65,7 @@ public:
class MainBodySummary: public CHeapObj { class MainBodySummary: public CHeapObj {
define_num_seq(satb_drain) // optional define_num_seq(satb_drain) // optional
define_num_seq(root_region_scan_wait)
define_num_seq(parallel) // parallel only define_num_seq(parallel) // parallel only
define_num_seq(ext_root_scan) define_num_seq(ext_root_scan)
define_num_seq(satb_filtering) define_num_seq(satb_filtering)
...@@ -177,7 +178,6 @@ private: ...@@ -177,7 +178,6 @@ private:
double _cur_collection_start_sec; double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes; size_t _cur_collection_pause_used_at_start_bytes;
size_t _cur_collection_pause_used_regions_at_start; size_t _cur_collection_pause_used_regions_at_start;
size_t _prev_collection_pause_used_at_end_bytes;
double _cur_collection_par_time_ms; double _cur_collection_par_time_ms;
double _cur_satb_drain_time_ms; double _cur_satb_drain_time_ms;
double _cur_clear_ct_time_ms; double _cur_clear_ct_time_ms;
...@@ -716,6 +716,7 @@ private: ...@@ -716,6 +716,7 @@ private:
double _mark_remark_start_sec; double _mark_remark_start_sec;
double _mark_cleanup_start_sec; double _mark_cleanup_start_sec;
double _mark_closure_time_ms; double _mark_closure_time_ms;
double _root_region_scan_wait_time_ms;
// Update the young list target length either by setting it to the // Update the young list target length either by setting it to the
// desired fixed value or by calculating it using G1's pause // desired fixed value or by calculating it using G1's pause
...@@ -800,6 +801,8 @@ public: ...@@ -800,6 +801,8 @@ public:
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
// Update the heuristic info to record a collection pause of the given // Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start. // start time, where the given number of bytes were used at the start.
// This may involve changing the desired size of a collection set. // This may involve changing the desired size of a collection set.
...@@ -816,6 +819,10 @@ public: ...@@ -816,6 +819,10 @@ public:
_mark_closure_time_ms = mark_closure_time_ms; _mark_closure_time_ms = mark_closure_time_ms;
} }
void record_root_region_scan_wait_time(double time_ms) {
_root_region_scan_wait_time_ms = time_ms;
}
void record_concurrent_mark_remark_start(); void record_concurrent_mark_remark_start();
void record_concurrent_mark_remark_end(); void record_concurrent_mark_remark_end();
...@@ -1146,11 +1153,6 @@ public: ...@@ -1146,11 +1153,6 @@ public:
_survivor_surv_rate_group->stop_adding_regions(); _survivor_surv_rate_group->stop_adding_regions();
} }
void tenure_all_objects() {
_max_survivor_regions = 0;
_tenuring_threshold = 0;
}
void record_survivor_regions(size_t regions, void record_survivor_regions(size_t regions,
HeapRegion* head, HeapRegion* head,
HeapRegion* tail) { HeapRegion* tail) {
......
...@@ -70,16 +70,20 @@ private: ...@@ -70,16 +70,20 @@ private:
OopsInHeapRegionClosure *_update_rset_cl; OopsInHeapRegionClosure *_update_rset_cl;
bool _during_initial_mark; bool _during_initial_mark;
bool _during_conc_mark; bool _during_conc_mark;
uint _worker_id;
public: public:
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
HeapRegion* hr, HeapRegion* hr,
OopsInHeapRegionClosure* update_rset_cl, OopsInHeapRegionClosure* update_rset_cl,
bool during_initial_mark, bool during_initial_mark,
bool during_conc_mark) : bool during_conc_mark,
uint worker_id) :
_g1(g1), _cm(cm), _hr(hr), _marked_bytes(0), _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
_update_rset_cl(update_rset_cl), _update_rset_cl(update_rset_cl),
_during_initial_mark(during_initial_mark), _during_initial_mark(during_initial_mark),
_during_conc_mark(during_conc_mark) { } _during_conc_mark(during_conc_mark),
_worker_id(worker_id) { }
size_t marked_bytes() { return _marked_bytes; } size_t marked_bytes() { return _marked_bytes; }
...@@ -123,7 +127,7 @@ public: ...@@ -123,7 +127,7 @@ public:
// explicitly and all objects in the CSet are considered // explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and // (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS. // we'll leave them over NTAMS.
_cm->markNext(obj); _cm->grayRoot(obj, obj_size, _worker_id, _hr);
} }
_marked_bytes += (obj_size * HeapWordSize); _marked_bytes += (obj_size * HeapWordSize);
obj->set_mark(markOopDesc::prototype()); obj->set_mark(markOopDesc::prototype());
...@@ -155,12 +159,14 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { ...@@ -155,12 +159,14 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
ConcurrentMark* _cm; ConcurrentMark* _cm;
OopsInHeapRegionClosure *_update_rset_cl; OopsInHeapRegionClosure *_update_rset_cl;
uint _worker_id;
public: public:
RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
OopsInHeapRegionClosure* update_rset_cl) : OopsInHeapRegionClosure* update_rset_cl,
uint worker_id) :
_g1h(g1h), _update_rset_cl(update_rset_cl), _g1h(g1h), _update_rset_cl(update_rset_cl),
_cm(_g1h->concurrent_mark()) { } _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
bool doHeapRegion(HeapRegion *hr) { bool doHeapRegion(HeapRegion *hr) {
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
...@@ -173,7 +179,8 @@ public: ...@@ -173,7 +179,8 @@ public:
if (hr->evacuation_failed()) { if (hr->evacuation_failed()) {
RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl, RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
during_initial_mark, during_initial_mark,
during_conc_mark); during_conc_mark,
_worker_id);
MemRegion mr(hr->bottom(), hr->end()); MemRegion mr(hr->bottom(), hr->end());
// We'll recreate the prev marking info so we'll first clear // We'll recreate the prev marking info so we'll first clear
...@@ -226,7 +233,7 @@ public: ...@@ -226,7 +233,7 @@ public:
update_rset_cl = &immediate_update; update_rset_cl = &immediate_update;
} }
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl); RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &rsfp_cl); _g1h->collection_set_iterate_from(hr, &rsfp_cl);
......
...@@ -89,16 +89,15 @@ class G1CollectedHeap; ...@@ -89,16 +89,15 @@ class G1CollectedHeap;
// //
// * Min Capacity // * Min Capacity
// //
// We set this to 0 for all spaces. We could consider setting the old // We set this to 0 for all spaces.
// min capacity to the min capacity of the heap (see 7078465).
// //
// * Max Capacity // * Max Capacity
// //
// For jstat, we set the max capacity of all spaces to heap_capacity, // For jstat, we set the max capacity of all spaces to heap_capacity,
// given that we don't always have a reasonably upper bound on how big // given that we don't always have a reasonable upper bound on how big
// each space can grow. For the memory pools, we actually make the max // each space can grow. For the memory pools, we make the max
// capacity undefined. We could consider setting the old max capacity // capacity undefined with the exception of the old memory pool for
// to the max capacity of the heap (see 7078465). // which we make the max capacity same as the max heap capacity.
// //
// If we had more accurate occupancy / capacity information per // If we had more accurate occupancy / capacity information per
// region set the above calculations would be greatly simplified and // region set the above calculations would be greatly simplified and
......
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -191,44 +191,5 @@ public: ...@@ -191,44 +191,5 @@ public:
virtual void do_oop( oop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); }
}; };
class UpdateRSOrPushRefOopClosure: public OopClosure {
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
OopsInHeapRegionClosure* _push_ref_cl;
bool _record_refs_into_cset;
int _worker_i;
template <class T> void do_oop_work(T* p);
public:
UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
int worker_i = 0) :
_g1(g1h),
_g1_rem_set(rs),
_from(NULL),
_record_refs_into_cset(record_refs_into_cset),
_push_ref_cl(push_ref_cl),
_worker_i(worker_i) { }
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
_from = from;
}
bool self_forwarded(oop obj) {
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
return result;
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
...@@ -374,7 +374,9 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -374,7 +374,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
ParVerifyClaimValue = 4, ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5, RebuildRSClaimValue = 5,
CompleteMarkCSetClaimValue = 6, CompleteMarkCSetClaimValue = 6,
ParEvacFailureClaimValue = 7 ParEvacFailureClaimValue = 7,
AggregateCountClaimValue = 8,
VerifyCountClaimValue = 9
}; };
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
......
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -59,6 +59,7 @@ class HRSPhaseSetter; ...@@ -59,6 +59,7 @@ class HRSPhaseSetter;
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class hrs_ext_msg; friend class hrs_ext_msg;
friend class HRSPhaseSetter; friend class HRSPhaseSetter;
friend class VMStructs;
protected: protected:
static size_t calculate_region_num(HeapRegion* hr); static size_t calculate_region_num(HeapRegion* hr);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册