提交 e5446645 编写于 作者: T trims

Merge

上级 15e3c3f0 4de99979
master
Tags不可用
无相关合并请求
......@@ -118,9 +118,9 @@ public interface Debugger extends SymbolLookup, ThreadAccess {
public long getJIntSize();
public long getJLongSize();
public long getJShortSize();
public long getHeapBase();
public long getHeapOopSize();
public long getLogMinObjAlignmentInBytes();
public long getNarrowOopBase();
public int getNarrowOopShift();
public ReadResult readBytesFromProcess(long address, long numBytes)
throws DebuggerException;
......
......@@ -56,8 +56,8 @@ public abstract class DebuggerBase implements Debugger {
// heap data.
protected long oopSize;
protected long heapOopSize;
protected long heapBase; // heap base for compressed oops.
protected long logMinObjAlignmentInBytes; // Used to decode compressed oops.
protected long narrowOopBase; // heap base for compressed oops.
protected int narrowOopShift; // shift to decode compressed oops.
// Should be initialized if desired by calling initCache()
private PageCache cache;
......@@ -159,10 +159,10 @@ public abstract class DebuggerBase implements Debugger {
javaPrimitiveTypesConfigured = true;
}
public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignmentInBytes) {
this.heapBase = heapBase;
public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift) {
this.heapOopSize = heapOopSize;
this.logMinObjAlignmentInBytes = logMinObjAlignmentInBytes;
this.narrowOopBase = narrowOopBase;
this.narrowOopShift = narrowOopShift;
}
/** May be called by subclasses if desired to initialize the page
......@@ -459,7 +459,7 @@ public abstract class DebuggerBase implements Debugger {
long value = readCInteger(address, getHeapOopSize(), true);
if (value != 0) {
// See oop.inline.hpp decode_heap_oop
value = (long)(heapBase + (long)(value << logMinObjAlignmentInBytes));
value = (long)(narrowOopBase + (long)(value << narrowOopShift));
}
return value;
}
......@@ -545,10 +545,10 @@ public abstract class DebuggerBase implements Debugger {
return heapOopSize;
}
public long getHeapBase() {
return heapBase;
public long getNarrowOopBase() {
return narrowOopBase;
}
public long getLogMinObjAlignmentInBytes() {
return logMinObjAlignmentInBytes;
public int getNarrowOopShift() {
return narrowOopShift;
}
}
......@@ -42,5 +42,5 @@ public interface JVMDebugger extends Debugger {
long jintSize,
long jlongSize,
long jshortSize);
public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignment);
public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift);
}
......@@ -65,9 +65,10 @@ public interface RemoteDebugger extends Remote {
public long getJIntSize() throws RemoteException;
public long getJLongSize() throws RemoteException;
public long getJShortSize() throws RemoteException;
public long getHeapBase() throws RemoteException;
public long getHeapOopSize() throws RemoteException;
public long getLogMinObjAlignmentInBytes() throws RemoteException;
public long getNarrowOopBase() throws RemoteException;
public int getNarrowOopShift() throws RemoteException;
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException;
public int getThreadHashCode(long addrOrId, boolean isAddress) throws RemoteException;
......
......@@ -85,9 +85,9 @@ public class RemoteDebuggerClient extends DebuggerBase implements JVMDebugger {
jlongSize = remoteDebugger.getJLongSize();
jshortSize = remoteDebugger.getJShortSize();
javaPrimitiveTypesConfigured = true;
heapBase = remoteDebugger.getHeapBase();
narrowOopBase = remoteDebugger.getNarrowOopBase();
narrowOopShift = remoteDebugger.getNarrowOopShift();
heapOopSize = remoteDebugger.getHeapOopSize();
logMinObjAlignmentInBytes = remoteDebugger.getLogMinObjAlignmentInBytes();
}
catch (RemoteException e) {
throw new DebuggerException(e);
......
......@@ -114,17 +114,18 @@ public class RemoteDebuggerServer extends UnicastRemoteObject
return debugger.getJShortSize();
}
public long getHeapBase() throws RemoteException {
return debugger.getHeapBase();
}
public long getHeapOopSize() throws RemoteException {
return debugger.getHeapOopSize();
}
public long getLogMinObjAlignmentInBytes() throws RemoteException {
return debugger.getLogMinObjAlignmentInBytes();
public long getNarrowOopBase() throws RemoteException {
return debugger.getNarrowOopBase();
}
public int getNarrowOopShift() throws RemoteException {
return debugger.getNarrowOopShift();
}
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException {
ThreadProxy t1 = getThreadProxy(addrOrId1, isAddress1);
......
......@@ -53,7 +53,8 @@ public class Universe {
// system obj array klass object
private static sun.jvm.hotspot.types.OopField systemObjArrayKlassObjField;
private static AddressField heapBaseField;
private static AddressField narrowOopBaseField;
private static CIntegerField narrowOopShiftField;
static {
VM.registerVMInitializedObserver(new Observer() {
......@@ -86,7 +87,8 @@ public class Universe {
systemObjArrayKlassObjField = type.getOopField("_systemObjArrayKlassObj");
heapBaseField = type.getAddressField("_heap_base");
narrowOopBaseField = type.getAddressField("_narrow_oop._base");
narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
}
public Universe() {
......@@ -100,14 +102,18 @@ public class Universe {
}
}
public static long getHeapBase() {
if (heapBaseField.getValue() == null) {
public static long getNarrowOopBase() {
if (narrowOopBaseField.getValue() == null) {
return 0;
} else {
return heapBaseField.getValue().minus(null);
return narrowOopBaseField.getValue().minus(null);
}
}
public static int getNarrowOopShift() {
return (int)narrowOopShiftField.getValue();
}
/** Returns "TRUE" iff "p" points into the allocated area of the heap. */
public boolean isIn(Address p) {
return heap().isIn(p);
......
......@@ -342,11 +342,12 @@ public class VM {
throw new RuntimeException("Attempt to initialize VM twice");
}
soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian());
debugger.putHeapConst(Universe.getHeapBase(), soleInstance.getHeapOopSize(),
soleInstance.logMinObjAlignmentInBytes);
debugger.putHeapConst(soleInstance.getHeapOopSize(), Universe.getNarrowOopBase(),
Universe.getNarrowOopShift());
for (Iterator iter = vmInitializedObservers.iterator(); iter.hasNext(); ) {
((Observer) iter.next()).update(null, null);
}
}
/** This is used by the debugging system */
......
......@@ -19,12 +19,12 @@
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
#
# Properties for jprt
# All build result bundles are full jdks, so the 64bit testing does not
# All build result bundles are full jdks, so the 64bit testing does not
# need the 32bit sibling bundle installed.
# Note: If the hotspot/make/Makefile changed to only bundle the 64bit files
# when bundling 64bit, and stripped out the 64bit files from any 32bit
......@@ -89,60 +89,52 @@ jprt.my.solaris.sparc.test.targets= \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese_Xcomp, \
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese_Xcomp_2, \
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese_Xcomp_3, \
${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_default, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_default_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_default, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark_2, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark_3
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_G1, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParOldGC
jprt.my.solaris.sparcv9.test.targets= \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
${jprt.my.solaris.sparcv9}-product-c2-runThese, \
${jprt.my.solaris.sparcv9}-product-c2-runThese_Xcomp, \
${jprt.my.solaris.sparcv9}-product-c2-runThese_Xcomp_2, \
${jprt.my.solaris.sparcv9}-product-c2-runThese_Xcomp_3, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark_2, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark_3
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.solaris.x64.test.targets= \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
......@@ -154,73 +146,80 @@ jprt.my.solaris.x64.test.targets= \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_default_2, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC_2, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC_2, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC_2, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS_2, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_CMS
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
jprt.my.solaris.i586.test.targets= \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
${jprt.my.solaris.i586}-product-c2-runThese_Xcomp_2, \
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp_2, \
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \
${jprt.my.solaris.i586}-product-c1-GCBasher_default, \
${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \
${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \
${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \
${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \
${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \
${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_default, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \
${jprt.my.solaris.i586}-product-c1-GCOld_default, \
${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \
${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \
${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \
${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \
${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark_2, \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark_3
${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParOldGC
jprt.my.linux.i586.test.targets = \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
${jprt.my.linux.i586}-product-c1-runThese_Xcomp_2, \
${jprt.my.linux.i586}-product-c1-runThese_Xcomp_3, \
${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \
${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp_2, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_default, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-scimark_2, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-scimark_3
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC
jprt.my.linux.x64.test.targets = \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
......@@ -230,15 +229,19 @@ jprt.my.linux.x64.test.targets = \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark_2, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark_3
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
......@@ -251,16 +254,20 @@ jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_default, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
${jprt.my.windows.i586}-product-{c1|c2}-scimark_2, \
${jprt.my.windows.i586}-product-{c1|c2}-scimark_3
${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC
jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
......@@ -272,16 +279,20 @@ jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.windows.x64}-product-c2-jbb_CMS, \
${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark_2, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark_3
${jprt.my.windows.x64}-product-c2-jbb_G1, \
${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
# The complete list of test targets for jprt
......
......@@ -52,6 +52,19 @@ CAT="$MKS_HOME/cat.exe"
RM="$MKS_HOME/rm.exe"
DUMPBIN="link.exe /dump"
# When called from IDE the first param should contain the link version, otherwise may be nill
if [ "x$1" != "x" ]; then
LINK_VER="$1"
fi
if [ "x$LINK_VER" != "x800" -a "x$LINK_VER" != "x900" ]; then
$DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def
else
# Can't use pipes when calling cl.exe or link.exe from IDE. Using transit file vm3.def
$DUMPBIN /OUT:vm3.def /symbols *.obj
"$CAT" vm3.def | "$GREP" "??_7.*@@6B@" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def
"$RM" -f vm3.def
fi
"$CAT" vm1.def vm2.def > vm.def
"$RM" -f vm1.def vm2.def
......@@ -72,12 +72,20 @@ REM figure out MSC version
for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i
echo **************************************************************
set ProjectFile=vm.vcproj
if "%MSC_VER%" == "1200" (
set ProjectFile=vm.dsp
echo Will generate VC6 project {unsupported}
) else (
set ProjectFile=vm.vcproj
echo Will generate VC7 project
if "%MSC_VER%" == "1400" (
echo Will generate VC8 {Visual Studio 2005}
) else (
if "%MSC_VER%" == "1500" (
echo Will generate VC9 {Visual Studio 2008}
) else (
echo Will generate VC7 project {Visual Studio 2003 .NET}
)
)
)
echo %ProjectFile%
echo **************************************************************
......
......@@ -29,6 +29,7 @@
# cl version 13.10.3077 returns "MSC_VER=1310"
# cl version 14.00.30701 returns "MSC_VER=1399" (OLD_MSSDK version)
# cl version 14.00.40310.41 returns "MSC_VER=1400"
# cl version 15.00.21022.8 returns "MSC_VER=1500"
# Note that we currently do not have a way to set HotSpotMksHome in
# the batch build, but so far this has not seemed to be a problem. The
......
......@@ -46,6 +46,7 @@ ADLCFLAGS=-q -T -D_LP64
ADLCFLAGS=-q -T -U_LP64
!endif
CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
CPP_INCLUDE_DIRS=\
/I "..\generated" \
......
......@@ -170,10 +170,6 @@ LINK_FLAGS = /manifest $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
!if "$(BUILDARCH)" == "i486"
# VS2005 on x86 restricts the use of certain libc functions without this
CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE
!endif
!endif
!if "$(COMPILER_NAME)" == "VS2008"
......@@ -185,10 +181,6 @@ LINK_FLAGS = /manifest $(LINK_FLAGS)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
!if "$(BUILDARCH)" == "i486"
# VS2005 on x86 restricts the use of certain libc functions without this
CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE
!endif
!endif
# Compile for space above time.
......
......@@ -48,6 +48,8 @@ MakeDepsSources=\
$(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatform.java \
$(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC6.java \
$(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC7.java \
$(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC8.java \
$(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC9.java \
$(WorkSpace)\src\share\tools\MakeDeps\Util.java \
$(WorkSpace)\src\share\tools\MakeDeps\BuildConfig.java \
$(WorkSpace)\src\share\tools\MakeDeps\ArgsParser.java
......@@ -121,7 +123,7 @@ MakeDepsIDEOptions=\
-additionalFile includeDB_gc_shared \
-additionalFile includeDB_gc_serial \
-additionalGeneratedFile $(HOTSPOTBUILDSPACE)\%f\%b vm.def \
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh" \
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
$(MakeDepsIncludesPRIVATE)
# Add in build-specific options
......
......@@ -42,10 +42,23 @@ COMPILE_RMIC=rmic
BOOT_JAVA_HOME=
!endif
ProjectFile=vm.vcproj
!if "$(MSC_VER)" == "1200"
VcVersion=VC6
ProjectFile=vm.dsp
!elseif "$(MSC_VER)" == "1400"
VcVersion=VC8
!elseif "$(MSC_VER)" == "1500"
VcVersion=VC9
!else
VcVersion=VC7
ProjectFile=vm.vcproj
!endif
......@@ -89,9 +89,11 @@ checkAndBuildSA:: $(SAWINDBG)
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
!elseif "$(BUILDARCH)" == "amd64"
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
!if "$(COMPILER_NAME)" == "VS2005"
# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line,
# otherwise we get missing __security_check_cookie externals at link time.
SA_LINK_FLAGS = bufferoverflowU.lib
!endif
!else
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
!endif
......
......@@ -27,9 +27,9 @@
all: checkCL checkLink
checkCL:
@ if "$(MSC_VER)" NEQ "1310" if "$(MSC_VER)" NEQ "1399" if "$(MSC_VER)" NEQ "1400" \
@ if "$(MSC_VER)" NEQ "1310" if "$(MSC_VER)" NEQ "1399" if "$(MSC_VER)" NEQ "1400" if "$(MSC_VER)" NEQ "1500" \
echo *** WARNING *** unrecognized cl.exe version $(MSC_VER) ($(RAW_MSC_VER)). Use FORCE_MSC_VER to override automatic detection.
checkLink:
@ if "$(LINK_VER)" NEQ "710" if "$(LINK_VER)" NEQ "800" \
@ if "$(LINK_VER)" NEQ "710" if "$(LINK_VER)" NEQ "800" if "$(LINK_VER)" NEQ "900" \
echo *** WARNING *** unrecognized link.exe version $(LINK_VER) ($(RAW_LINK_VER)). Use FORCE_LINK_VER to override automatic detection.
......@@ -2767,6 +2767,268 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
void MacroAssembler::check_klass_subtype(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Label& L_success) {
Label L_failure, L_pop_to_failure;
check_klass_subtype_fast_path(sub_klass, super_klass,
temp_reg, temp2_reg,
&L_success, &L_failure, NULL);
Register sub_2 = sub_klass;
Register sup_2 = super_klass;
if (!sub_2->is_global()) sub_2 = L0;
if (!sup_2->is_global()) sup_2 = L1;
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
check_klass_subtype_slow_path(sub_2, sup_2,
L2, L3, L4, L5,
NULL, &L_pop_to_failure);
// on success:
restore();
ba(false, L_success);
delayed()->nop();
// on failure:
bind(L_pop_to_failure);
restore();
bind(L_failure);
}
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Label* L_success,
Label* L_failure,
Label* L_slow_path,
RegisterConstant super_check_offset,
Register instanceof_hack) {
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
bool need_slow_path = (must_load_sco ||
super_check_offset.constant_or_zero() == sco_offset);
assert_different_registers(sub_klass, super_klass, temp_reg);
if (super_check_offset.is_register()) {
assert_different_registers(sub_klass, super_klass,
super_check_offset.as_register());
} else if (must_load_sco) {
assert(temp2_reg != noreg, "supply either a temp or a register offset");
}
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1 || instanceof_hack != noreg ||
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
"at most one NULL in the batch, usually");
// Support for the instanceof hack, which uses delay slots to
// set a destination register to zero or one.
bool do_bool_sets = (instanceof_hack != noreg);
#define BOOL_SET(bool_value) \
if (do_bool_sets && bool_value >= 0) \
set(bool_value, instanceof_hack)
#define DELAYED_BOOL_SET(bool_value) \
if (do_bool_sets && bool_value >= 0) \
delayed()->set(bool_value, instanceof_hack); \
else delayed()->nop()
// Hacked ba(), which may only be used just before L_fallthrough.
#define FINAL_JUMP(label, bool_value) \
if (&(label) == &L_fallthrough) { \
BOOL_SET(bool_value); \
} else { \
ba((do_bool_sets && bool_value >= 0), label); \
DELAYED_BOOL_SET(bool_value); \
}
// If the pointers are equal, we are done (e.g., String[] elements).
// This self-check enables sharing of secondary supertype arrays among
// non-primary types such as array-of-interface. Otherwise, each such
// type would need its own customized SSA.
// We move this check to the front of the fast path because many
// type checks are in fact trivially successful in this manner,
// so we get a nicely predicted branch right at the start of the check.
cmp(super_klass, sub_klass);
brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
DELAYED_BOOL_SET(1);
// Check the supertype display:
if (must_load_sco) {
// The super check offset is always positive...
lduw(super_klass, sco_offset, temp2_reg);
super_check_offset = RegisterConstant(temp2_reg);
}
ld_ptr(sub_klass, super_check_offset, temp_reg);
cmp(super_klass, temp_reg);
// This check has worked decisively for primary supers.
// Secondary supers are sought in the super_cache ('super_cache_addr').
// (Secondary supers are interfaces and very deeply nested subtypes.)
// This works in the same check above because of a tricky aliasing
// between the super_cache and the primary super display elements.
// (The 'super_check_addr' can address either, as the case requires.)
// Note that the cache is updated below if it does not help us find
// what we need immediately.
// So if it was a primary super, we can just fail immediately.
// Otherwise, it's the slow path for us (no success at this point).
if (super_check_offset.is_register()) {
brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
delayed(); if (do_bool_sets) BOOL_SET(1);
// if !do_bool_sets, sneak the next cmp into the delay slot:
cmp(super_check_offset.as_register(), sc_offset);
if (L_failure == &L_fallthrough) {
brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_slow_path);
delayed()->nop();
BOOL_SET(0); // fallthrough on failure
} else {
brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
DELAYED_BOOL_SET(0);
FINAL_JUMP(*L_slow_path, -1); // -1 => vanilla delay slot
}
} else if (super_check_offset.as_constant() == sc_offset) {
// Need a slow path; fast failure is impossible.
if (L_slow_path == &L_fallthrough) {
brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
DELAYED_BOOL_SET(1);
} else {
brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
delayed()->nop();
FINAL_JUMP(*L_success, 1);
}
} else {
// No slow path; it's a fast decision.
if (L_failure == &L_fallthrough) {
brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
DELAYED_BOOL_SET(1);
BOOL_SET(0);
} else {
brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
DELAYED_BOOL_SET(0);
FINAL_JUMP(*L_success, 1);
}
}
bind(L_fallthrough);
#undef final_jump
#undef bool_set
#undef DELAYED_BOOL_SET
#undef final_jump
}
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
Register super_klass,
Register count_temp,
Register scan_temp,
Register scratch_reg,
Register coop_reg,
Label* L_success,
Label* L_failure) {
assert_different_registers(sub_klass, super_klass,
count_temp, scan_temp, scratch_reg, coop_reg);
Label L_fallthrough, L_loop;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
// a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_supers_offset_in_bytes());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
// Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
#ifndef PRODUCT
int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
inc_counter((address) pst_counter, count_temp, scan_temp);
#endif
// We will consult the secondary-super array.
ld_ptr(sub_klass, ss_offset, scan_temp);
// Compress superclass if necessary.
Register search_key = super_klass;
bool decode_super_klass = false;
if (UseCompressedOops) {
if (coop_reg != noreg) {
encode_heap_oop_not_null(super_klass, coop_reg);
search_key = coop_reg;
} else {
encode_heap_oop_not_null(super_klass);
decode_super_klass = true; // scarce temps!
}
// The superclass is never null; it would be a basic system error if a null
// pointer were to sneak in here. Note that we have already loaded the
// Klass::super_check_offset from the super_klass in the fast path,
// so if there is a null in that register, we are already in the afterlife.
}
// Load the array length. (Positive movl does right thing on LP64.)
lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp);
// Check for empty secondary super list
tst(count_temp);
// Top of search loop
bind(L_loop);
br(Assembler::equal, false, Assembler::pn, *L_failure);
delayed()->add(scan_temp, heapOopSize, scan_temp);
assert(heapOopSize != 0, "heapOopSize should be initialized");
// Skip the array header in all array accesses.
int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
elem_offset -= heapOopSize; // the scan pointer was pre-incremented also
// Load next super to check
if (UseCompressedOops) {
// Don't use load_heap_oop; we don't want to decode the element.
lduw( scan_temp, elem_offset, scratch_reg );
} else {
ld_ptr( scan_temp, elem_offset, scratch_reg );
}
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
cmp(scratch_reg, search_key);
// A miss means we are NOT a subtype and need to keep looping
brx(Assembler::notEqual, false, Assembler::pn, L_loop);
delayed()->deccc(count_temp); // decrement trip counter in delay slot
// Falling out the bottom means we found a hit; we ARE a subtype
if (decode_super_klass) decode_heap_oop(super_klass);
// Success. Cache the super we found and proceed in triumph.
st_ptr(super_klass, sub_klass, sc_offset);
if (L_success != &L_fallthrough) {
ba(false, *L_success);
delayed()->nop();
}
bind(L_fallthrough);
}
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
Register temp_reg,
Label& done, Label* slow_case,
......@@ -4316,7 +4578,13 @@ void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
void MacroAssembler::encode_heap_oop(Register src, Register dst) {
assert (UseCompressedOops, "must be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
verify_oop(src);
if (Universe::narrow_oop_base() == NULL) {
srlx(src, LogMinObjAlignmentInBytes, dst);
return;
}
Label done;
if (src == dst) {
// optimize for frequent case src == dst
......@@ -4338,26 +4606,39 @@ void MacroAssembler::encode_heap_oop(Register src, Register dst) {
void MacroAssembler::encode_heap_oop_not_null(Register r) {
assert (UseCompressedOops, "must be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
verify_oop(r);
sub(r, G6_heapbase, r);
if (Universe::narrow_oop_base() != NULL)
sub(r, G6_heapbase, r);
srlx(r, LogMinObjAlignmentInBytes, r);
}
void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
assert (UseCompressedOops, "must be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
verify_oop(src);
sub(src, G6_heapbase, dst);
srlx(dst, LogMinObjAlignmentInBytes, dst);
if (Universe::narrow_oop_base() == NULL) {
srlx(src, LogMinObjAlignmentInBytes, dst);
} else {
sub(src, G6_heapbase, dst);
srlx(dst, LogMinObjAlignmentInBytes, dst);
}
}
// Same algorithm as oops.inline.hpp decode_heap_oop.
void MacroAssembler::decode_heap_oop(Register src, Register dst) {
assert (UseCompressedOops, "must be compressed");
Label done;
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sllx(src, LogMinObjAlignmentInBytes, dst);
bpr(rc_nz, true, Assembler::pt, dst, done);
delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
bind(done);
if (Universe::narrow_oop_base() != NULL) {
Label done;
bpr(rc_nz, true, Assembler::pt, dst, done);
delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
bind(done);
}
verify_oop(dst);
}
......@@ -4366,8 +4647,11 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) {
// pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
assert (UseCompressedOops, "must be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sllx(r, LogMinObjAlignmentInBytes, r);
add(r, G6_heapbase, r);
if (Universe::narrow_oop_base() != NULL)
add(r, G6_heapbase, r);
}
void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
......@@ -4375,14 +4659,17 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
// pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
assert (UseCompressedOops, "must be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sllx(src, LogMinObjAlignmentInBytes, dst);
add(dst, G6_heapbase, dst);
if (Universe::narrow_oop_base() != NULL)
add(dst, G6_heapbase, dst);
}
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
// call indirectly to solve generation ordering problem
Address base(G6_heapbase, (address)Universe::heap_base_addr());
Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr());
load_ptr_contents(base, G6_heapbase);
}
}
......@@ -2327,6 +2327,46 @@ class MacroAssembler: public Assembler {
Register temp_reg, Register temp2_reg,
Label& no_such_interface);
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg and temp2_reg.
// If super_check_offset is not -1, temp2_reg is not used and can be noreg.
void check_klass_subtype_fast_path(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Label* L_success,
Label* L_failure,
Label* L_slow_path,
RegisterConstant super_check_offset = RegisterConstant(-1),
Register instanceof_hack = noreg);
// The rest of the type check; must be wired to a corresponding fast path.
// It does not repeat the fast path logic, so don't use it standalone.
// The temp_reg can be noreg, if no temps are available.
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
// Updates the sub's secondary super cache as necessary.
void check_klass_subtype_slow_path(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Register temp3_reg,
Register temp4_reg,
Label* L_success,
Label* L_failure);
// Simplified, combined version, good for typical uses.
// Falls through on failure.
void check_klass_subtype(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Label& L_success);
// Stack overflow checking
// Note: this clobbers G3_scratch
......
......@@ -2393,23 +2393,11 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get instance klass
load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
// get super_check_offset
load(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), Rtmp1, T_INT, NULL);
// See if we get an immediate positive hit
__ ld_ptr(klass_RInfo, Rtmp1, FrameMap::O7_oop_opr->as_register());
__ cmp(k_RInfo, O7);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// check for immediate negative hit
__ cmp(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ br(Assembler::notEqual, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
// check for self
__ cmp(klass_RInfo, k_RInfo);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
// assert(sub.is_same(FrameMap::G3_RInfo) && super.is_same(FrameMap::G1_RInfo), "incorrect call setup");
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
......@@ -2493,58 +2481,30 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ delayed()->nop();
__ bind(done);
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
load(klass_RInfo, k->super_check_offset(), Rtmp1, T_OBJECT, NULL);
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
// See if we get an immediate positive hit
__ cmp(Rtmp1, k_RInfo );
__ br(Assembler::notEqual, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
} else {
// See if we get an immediate positive hit
assert_different_registers(Rtmp1, k_RInfo, klass_RInfo);
__ cmp(Rtmp1, k_RInfo );
__ br(Assembler::equal, false, Assembler::pn, done);
// check for self
__ delayed()->cmp(klass_RInfo, k_RInfo);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// assert(sub.is_same(FrameMap::G3_RInfo) && super.is_same(FrameMap::G1_RInfo), "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
}
__ bind(done);
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
(need_slow_path ? &done : NULL),
stub->entry(), NULL,
RegisterConstant(k->super_check_offset()));
} else {
assert_different_registers(Rtmp1, klass_RInfo, k_RInfo);
load(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), Rtmp1, T_INT, NULL);
// See if we get an immediate positive hit
load(klass_RInfo, Rtmp1, FrameMap::O7_oop_opr, T_OBJECT);
__ cmp(k_RInfo, O7);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// check for immediate negative hit
__ cmp(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ br(Assembler::notEqual, false, Assembler::pn, *stub->entry());
// check for self
__ delayed()->cmp(klass_RInfo, k_RInfo);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// assert(sub.is_same(FrameMap::G3_RInfo) && super.is_same(FrameMap::G1_RInfo), "incorrect call setup");
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
&done, stub->entry(), NULL);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
__ bind(done);
}
__ bind(done);
}
__ mov(obj, dst);
} else if (code == lir_instanceof) {
......@@ -2582,58 +2542,32 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ set(0, dst);
__ bind(done);
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
assert_different_registers(Rtmp1, klass_RInfo, k_RInfo);
load(klass_RInfo, k->super_check_offset(), Rtmp1, T_OBJECT, NULL);
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
// See if we get an immediate positive hit
__ cmp(Rtmp1, k_RInfo );
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
__ set(0, dst);
__ bind(done);
} else {
// See if we get an immediate positive hit
assert_different_registers(Rtmp1, k_RInfo, klass_RInfo);
__ cmp(Rtmp1, k_RInfo );
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
// check for self
__ cmp(klass_RInfo, k_RInfo);
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
// assert(sub.is_same(FrameMap::G3_RInfo) && super.is_same(FrameMap::G1_RInfo), "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ mov(G3, dst);
__ bind(done);
}
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
(need_slow_path ? &done : NULL),
(need_slow_path ? &done : NULL), NULL,
RegisterConstant(k->super_check_offset()),
dst);
} else {
assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
load(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), dst, T_INT, NULL);
// See if we get an immediate positive hit
load(klass_RInfo, dst, FrameMap::O7_oop_opr, T_OBJECT);
__ cmp(k_RInfo, O7);
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
// check for immediate negative hit
__ cmp(dst, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ br(Assembler::notEqual, true, Assembler::pt, done);
__ delayed()->set(0, dst);
// check for self
__ cmp(klass_RInfo, k_RInfo);
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
// assert(sub.is_same(FrameMap::G3_RInfo) && super.is_same(FrameMap::G1_RInfo), "incorrect call setup");
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
&done, &done, NULL,
RegisterConstant(-1),
dst);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ mov(G3, dst);
__ bind(done);
}
__ bind(done);
}
} else {
ShouldNotReachHere();
......
......@@ -714,38 +714,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// sub : G3, argument, destroyed
// super: G1, argument, not changed
// raddr: O7, blown by call
Label loop, miss;
Label miss;
__ save_frame(0); // Blow no registers!
__ ld_ptr( G3, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
__ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0); // length in l0
__ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1); // ptr into array
__ clr(L4); // Index
// Load a little early; will load 1 off the end of the array.
// Ok for now; revisit if we have other uses of this routine.
__ ld_ptr(L1,0,L2); // Will load a little early
// The scan loop
__ bind(loop);
__ add(L1,wordSize,L1); // Bump by OOP size
__ cmp(L4,L0);
__ br(Assembler::equal,false,Assembler::pn,miss);
__ delayed()->inc(L4); // Bump index
__ subcc(L2,G1,L3); // Check for match; zero in L3 for a hit
__ brx( Assembler::notEqual, false, Assembler::pt, loop );
__ delayed()->ld_ptr(L1,0,L2); // Will load a little early
// Got a hit; report success; set cache
__ st_ptr( G1, G3, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
__ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);
__ mov(1, G3);
__ ret(); // Result in G5 is ok; flags set
__ ret(); // Result in G5 is 'true'
__ delayed()->restore(); // free copy or add can go here
__ bind(miss);
__ mov(0, G3);
__ ret(); // Result in G5 is ok; flags set
__ ret(); // Result in G5 is 'false'
__ delayed()->restore(); // free copy or add can go here
}
......
......@@ -866,65 +866,18 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Register Rtmp2,
Register Rtmp3,
Label &ok_is_subtype ) {
Label not_subtype, loop;
Label not_subtype;
// Profile the not-null value's klass.
profile_typecheck(Rsub_klass, Rtmp1);
// Load the super-klass's check offset into Rtmp1
ld( Rsuper_klass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), Rtmp1 );
// Load from the sub-klass's super-class display list, or a 1-word cache of
// the secondary superclass list, or a failing value with a sentinel offset
// if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way.
ld_ptr( Rsub_klass, Rtmp1, Rtmp2 );
// See if we get an immediate positive hit
cmp( Rtmp2, Rsuper_klass );
brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
// In the delay slot, check for immediate negative hit
delayed()->cmp( Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
br( Assembler::notEqual, false, Assembler::pt, not_subtype );
// In the delay slot, check for self
delayed()->cmp( Rsub_klass, Rsuper_klass );
brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
// Now do a linear scan of the secondary super-klass chain.
delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
// compress superclass
if (UseCompressedOops) encode_heap_oop(Rsuper_klass);
// Rtmp2 holds the objArrayOop of secondary supers.
ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
// Check for empty secondary super list
tst(Rtmp1);
// Top of search loop
bind( loop );
br( Assembler::equal, false, Assembler::pn, not_subtype );
delayed()->nop();
check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
Rtmp1, Rtmp2,
&ok_is_subtype, &not_subtype, NULL);
// load next super to check
if (UseCompressedOops) {
lduw( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
// Bump array pointer forward one oop
add( Rtmp2, 4, Rtmp2 );
} else {
ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
// Bump array pointer forward one oop
add( Rtmp2, wordSize, Rtmp2);
}
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
cmp( Rtmp3, Rsuper_klass );
// A miss means we are NOT a subtype and need to keep looping
brx( Assembler::notEqual, false, Assembler::pt, loop );
delayed()->deccc( Rtmp1 ); // dec trip counter in delay slot
// Falling out the bottom means we found a hit; we ARE a subtype
if (UseCompressedOops) decode_heap_oop(Rsuper_klass);
br( Assembler::always, false, Assembler::pt, ok_is_subtype );
// Update the cache
delayed()->st_ptr( Rsuper_klass, Rsub_klass,
sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
&ok_is_subtype, NULL);
bind(not_subtype);
profile_typecheck_failed(Rtmp1);
......
......@@ -547,7 +547,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int klass_load_size;
if (UseCompressedOops) {
klass_load_size = 3*BytesPerInstWord; // see MacroAssembler::load_klass()
assert(Universe::heap() != NULL, "java heap should be initialized");
if (Universe::narrow_oop_base() == NULL)
klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
else
klass_load_size = 3*BytesPerInstWord;
} else {
klass_load_size = 1*BytesPerInstWord;
}
......@@ -1601,9 +1605,11 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
if (UseCompressedOops) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
st->print_cr("\tSLL R_G5,3,R_G5");
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
if (Universe::narrow_oop_base() != NULL)
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
} else {
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
}
......@@ -2502,7 +2508,11 @@ encode %{
__ load_klass(O0, G3_scratch);
int klass_load_size;
if (UseCompressedOops) {
klass_load_size = 3*BytesPerInstWord;
assert(Universe::heap() != NULL, "java heap should be initialized");
if (Universe::narrow_oop_base() == NULL)
klass_load_size = 2*BytesPerInstWord;
else
klass_load_size = 3*BytesPerInstWord;
} else {
klass_load_size = 1*BytesPerInstWord;
}
......@@ -9005,6 +9015,33 @@ instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, note
ins_pipe(long_memory_op);
%}
//---------- Population Count Instructions -------------------------------------
instruct popCountI(iRegI dst, iRegI src) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountI src));
format %{ "POPC $src, $dst" %}
ins_encode %{
__ popc($src$$Register, $dst$$Register);
%}
ins_pipe(ialu_reg);
%}
// Note: Long.bitCount(long) returns an int.
instruct popCountL(iRegI dst, iRegL src) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountL src));
format %{ "POPC $src, $dst" %}
ins_encode %{
__ popc($src$$Register, $dst$$Register);
%}
ins_pipe(ialu_reg);
%}
// ============================================================================
//------------Bytes reverse--------------------------------------------------
......
......@@ -900,19 +900,7 @@ class StubGenerator: public StubCodeGenerator {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
address start = __ pc();
Label loop, miss;
// Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here,
// to increase overall code density, with no real loss of speed.
{ Label L;
__ cmp(O1, O2);
__ brx(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop();
__ retl();
__ delayed()->addcc(G0,0,O0); // set Z flags, zero result
__ bind(L);
}
Label miss;
#if defined(COMPILER2) && !defined(_LP64)
// Do not use a 'save' because it blows the 64-bit O registers.
......@@ -936,56 +924,12 @@ class StubGenerator: public StubCodeGenerator {
Register L2_super = L2;
Register L3_index = L3;
#ifdef _LP64
Register L4_ooptmp = L4;
if (UseCompressedOops) {
// this must be under UseCompressedOops check, as we rely upon fact
// that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save
// on stack, see several lines above
__ encode_heap_oop(Rsuper, L4_ooptmp);
}
#endif
inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1);
__ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
__ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0_ary_len);
__ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1_ary_ptr);
__ clr(L3_index); // zero index
// Load a little early; will load 1 off the end of the array.
// Ok for now; revisit if we have other uses of this routine.
if (UseCompressedOops) {
__ lduw(L1_ary_ptr,0,L2_super);// Will load a little early
} else {
__ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
}
assert(heapOopSize != 0, "heapOopSize should be initialized");
// The scan loop
__ BIND(loop);
__ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size
__ cmp(L3_index,L0_ary_len);
__ br(Assembler::equal,false,Assembler::pn,miss);
__ delayed()->inc(L3_index); // Bump index
if (UseCompressedOops) {
#ifdef _LP64
__ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit
__ br( Assembler::notEqual, false, Assembler::pt, loop );
__ delayed()->lduw(L1_ary_ptr,0,L2_super);// Will load a little early
#else
ShouldNotReachHere();
#endif
} else {
__ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
__ brx( Assembler::notEqual, false, Assembler::pt, loop );
__ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
}
__ check_klass_subtype_slow_path(Rsub, Rsuper,
L0, L1, L2, L3,
NULL, &miss);
// Got a hit; report success; set cache. Cache load doesn't
// happen here; for speed it is directly emitted by the compiler.
__ st_ptr( Rsuper, Rsub, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
// Match falls through here.
__ addcc(G0,0,Rret); // set Z flags, Z result
#if defined(COMPILER2) && !defined(_LP64)
__ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
......@@ -999,7 +943,6 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->restore();
#endif
// Hit or miss falls through here
__ BIND(miss);
__ addcc(G0,1,Rret); // set NZ flags, NZ result
......@@ -2330,50 +2273,30 @@ class StubGenerator: public StubCodeGenerator {
Register super_check_offset,
Register super_klass,
Register temp,
Label& L_success,
Register deccc_hack = noreg) {
Label& L_success) {
assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
BLOCK_COMMENT("type_check:");
Label L_miss;
Label L_miss, L_pop_to_miss;
assert_clean_int(super_check_offset, temp);
// maybe decrement caller's trip count:
#define DELAY_SLOT delayed(); \
{ if (deccc_hack == noreg) __ nop(); else __ deccc(deccc_hack); }
// if the pointers are equal, we are done (e.g., String[] elements)
__ cmp(sub_klass, super_klass);
__ brx(Assembler::equal, true, Assembler::pt, L_success);
__ DELAY_SLOT;
// check the supertype display:
__ ld_ptr(sub_klass, super_check_offset, temp); // query the super type
__ cmp(super_klass, temp); // test the super type
__ brx(Assembler::equal, true, Assembler::pt, L_success);
__ DELAY_SLOT;
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
__ cmp(super_klass, sc_offset);
__ brx(Assembler::notEqual, true, Assembler::pt, L_miss);
__ delayed()->nop();
__ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
&L_success, &L_miss, NULL,
super_check_offset);
BLOCK_COMMENT("type_check_slow_path:");
__ save_frame(0);
__ mov(sub_klass->after_save(), O1);
// mov(super_klass->after_save(), O2); //fill delay slot
assert(StubRoutines::Sparc::_partial_subtype_check != NULL, "order of generation");
__ call(StubRoutines::Sparc::_partial_subtype_check);
__ delayed()->mov(super_klass->after_save(), O2);
__ restore();
// Upon return, the condition codes are already set.
__ brx(Assembler::equal, true, Assembler::pt, L_success);
__ DELAY_SLOT;
__ check_klass_subtype_slow_path(sub_klass->after_save(),
super_klass->after_save(),
L0, L1, L2, L4,
NULL, &L_pop_to_miss);
__ ba(false, L_success);
__ delayed()->restore();
#undef DELAY_SLOT
__ bind(L_pop_to_miss);
__ restore();
// Fall through on failure!
__ BIND(L_miss);
......@@ -2411,7 +2334,7 @@ class StubGenerator: public StubCodeGenerator {
gen_write_ref_array_pre_barrier(O1, O2);
#ifdef ASSERT
// We sometimes save a frame (see partial_subtype_check below).
// We sometimes save a frame (see generate_type_check below).
// If this will cause trouble, let's fail now instead of later.
__ save_frame(0);
__ restore();
......@@ -2455,41 +2378,39 @@ class StubGenerator: public StubCodeGenerator {
// G3, G4, G5 --- current oop, oop.klass, oop.klass.super
__ align(16);
__ bind(store_element);
// deccc(G1_remain); // decrement the count (hoisted)
__ BIND(store_element);
__ deccc(G1_remain); // decrement the count
__ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
__ inc(O5_offset, heapOopSize); // step to next offset
__ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
__ delayed()->set(0, O0); // return -1 on success
// ======== loop entry is here ========
__ bind(load_element);
__ BIND(load_element);
__ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
__ br_null(G3_oop, true, Assembler::pt, store_element);
__ delayed()->deccc(G1_remain); // decrement the count
__ delayed()->nop();
__ load_klass(G3_oop, G4_klass); // query the object klass
generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
// branch to this on success:
store_element,
// decrement this on success:
G1_remain);
store_element);
// ======== end loop ========
// It was a real error; we must depend on the caller to finish the job.
// Register G1 has number of *remaining* oops, O2 number of *total* oops.
// Emit GC store barriers for the oops we have copied (O2 minus G1),
// and report their number to the caller.
__ bind(fail);
__ BIND(fail);
__ subcc(O2_count, G1_remain, O2_count);
__ brx(Assembler::zero, false, Assembler::pt, done);
__ delayed()->not1(O2_count, O0); // report (-1^K) to caller
__ bind(do_card_marks);
__ BIND(do_card_marks);
gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
__ bind(done);
__ BIND(done);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
__ retl();
__ delayed()->nop(); // return value in 00
......@@ -2942,14 +2863,15 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
StubRoutines::_fence_entry = generate_fence();
#endif // COMPILER2 !=> _LP64
StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check();
}
void generate_all() {
// Generates all stubs and initializes the entry points
// Generate partial_subtype_check first here since its code depends on
// UseZeroBaseCompressedOops which is defined after heap initialization.
StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check();
// These entry points require SharedInfo::stack0 to be set up in non-core builds
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -72,6 +72,9 @@ void VM_Version::initialize() {
FLAG_SET_ERGO(bool, UseCompressedOops, false);
}
}
// 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2
// Indirect branch is the same cost as direct
......@@ -89,16 +92,26 @@ void VM_Version::initialize() {
#endif
}
// Use hardware population count instruction if available.
if (has_hardware_popc()) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
}
char buf[512];
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v8() ? ", has_v8" : ""),
(has_v9() ? ", has_v9" : ""),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", has_vis1" : ""),
(has_vis2() ? ", has_vis2" : ""),
(is_ultra3() ? ", is_ultra3" : ""),
(is_sun4v() ? ", is_sun4v" : ""),
(is_niagara1() ? ", is_niagara1" : ""),
(!has_hardware_int_muldiv() ? ", no-muldiv" : ""),
(is_niagara1_plus() ? ", is_niagara1_plus" : ""),
(!has_hardware_mul32() ? ", no-mul32" : ""),
(!has_hardware_div32() ? ", no-div32" : ""),
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
// buf is started with ", " or is empty
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -25,34 +25,38 @@
class VM_Version: public Abstract_VM_Version {
protected:
enum Feature_Flag {
v8_instructions = 0,
hardware_int_muldiv = 1,
hardware_fsmuld = 2,
v9_instructions = 3,
vis1_instructions = 4,
vis2_instructions = 5,
sun4v_instructions = 6
v8_instructions = 0,
hardware_mul32 = 1,
hardware_div32 = 2,
hardware_fsmuld = 3,
hardware_popc = 4,
v9_instructions = 5,
vis1_instructions = 6,
vis2_instructions = 7,
sun4v_instructions = 8
};
enum Feature_Flag_Set {
unknown_m = 0,
all_features_m = -1,
v8_instructions_m = 1 << v8_instructions,
hardware_int_muldiv_m = 1 << hardware_int_muldiv,
hardware_fsmuld_m = 1 << hardware_fsmuld,
v9_instructions_m = 1 << v9_instructions,
vis1_instructions_m = 1 << vis1_instructions,
vis2_instructions_m = 1 << vis2_instructions,
sun4v_m = 1 << sun4v_instructions,
generic_v8_m = v8_instructions_m | hardware_int_muldiv_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m | vis1_instructions_m,
ultra3_m = generic_v9_m | vis2_instructions_m,
unknown_m = 0,
all_features_m = -1,
v8_instructions_m = 1 << v8_instructions,
hardware_mul32_m = 1 << hardware_mul32,
hardware_div32_m = 1 << hardware_div32,
hardware_fsmuld_m = 1 << hardware_fsmuld,
hardware_popc_m = 1 << hardware_popc,
v9_instructions_m = 1 << v9_instructions,
vis1_instructions_m = 1 << vis1_instructions,
vis2_instructions_m = 1 << vis2_instructions,
sun4v_m = 1 << sun4v_instructions,
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m,
ultra3_m = generic_v9_m | vis1_instructions_m | vis2_instructions_m,
// Temporary until we have something more accurate
niagara1_unique_m = sun4v_m,
niagara1_m = generic_v9_m | niagara1_unique_m
niagara1_unique_m = sun4v_m,
niagara1_m = generic_v9_m | niagara1_unique_m
};
static int _features;
......@@ -62,7 +66,7 @@ protected:
static int determine_features();
static int platform_features(int features);
static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; }
static bool is_niagara1(int features) { return (features & sun4v_m) != 0; }
static int maximum_niagara1_processor_count() { return 32; }
// Returns true if the platform is in the niagara line and
......@@ -76,8 +80,10 @@ public:
// Instruction support
static bool has_v8() { return (_features & v8_instructions_m) != 0; }
static bool has_v9() { return (_features & v9_instructions_m) != 0; }
static bool has_hardware_int_muldiv() { return (_features & hardware_int_muldiv_m) != 0; }
static bool has_hardware_mul32() { return (_features & hardware_mul32_m) != 0; }
static bool has_hardware_div32() { return (_features & hardware_div32_m) != 0; }
static bool has_hardware_fsmuld() { return (_features & hardware_fsmuld_m) != 0; }
static bool has_hardware_popc() { return (_features & hardware_popc_m) != 0; }
static bool has_vis1() { return (_features & vis1_instructions_m) != 0; }
static bool has_vis2() { return (_features & vis2_instructions_m) != 0; }
......
......@@ -221,13 +221,15 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (is_vtable_stub) {
// ld;ld;ld,jmp,nop
const int basic = 5*BytesPerInstWord +
// shift;add for load_klass
(UseCompressedOops ? 2*BytesPerInstWord : 0);
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedOops ?
((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
return basic + slop;
} else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
// shift;add for load_klass
(UseCompressedOops ? 2*BytesPerInstWord : 0);
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedOops ?
((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
return (basic + slop);
}
}
......
此差异已折叠。
......@@ -578,20 +578,25 @@ private:
// These are all easily abused and hence protected
void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format = 0);
// 32BIT ONLY SECTION
#ifndef _LP64
// Make these disappear in 64bit mode since they would never be correct
void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
#else
// 64BIT ONLY SECTION
void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY
void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
#endif // _LP64
// These are unique in that we are ensured by the caller that the 32bit
......@@ -1219,6 +1224,14 @@ private:
void popq(Address dst);
#endif
void popcntl(Register dst, Address src);
void popcntl(Register dst, Register src);
#ifdef _LP64
void popcntq(Register dst, Address src);
void popcntq(Register dst, Register src);
#endif
// Prefetches (SSE, SSE2, 3DNOW only)
void prefetchnta(Address src);
......@@ -1647,6 +1660,9 @@ class MacroAssembler: public Assembler {
void decode_heap_oop_not_null(Register dst, Register src);
void set_narrow_oop(Register dst, jobject obj);
void set_narrow_oop(Address dst, jobject obj);
void cmp_narrow_oop(Register dst, jobject obj);
void cmp_narrow_oop(Address dst, jobject obj);
// if heap base register is used - reinit it with the correct value
void reinit_heapbase();
......@@ -1791,6 +1807,40 @@ class MacroAssembler: public Assembler {
Register scan_temp,
Label& no_such_interface);
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg.
void check_klass_subtype_fast_path(Register sub_klass,
Register super_klass,
Register temp_reg,
Label* L_success,
Label* L_failure,
Label* L_slow_path,
RegisterConstant super_check_offset = RegisterConstant(-1));
// The rest of the type check; must be wired to a corresponding fast path.
// It does not repeat the fast path logic, so don't use it standalone.
// The temp_reg and temp2_reg can be noreg, if no temps are available.
// Updates the sub's secondary super cache as necessary.
// If set_cond_codes, condition codes will be Z on success, NZ on failure.
void check_klass_subtype_slow_path(Register sub_klass,
Register super_klass,
Register temp_reg,
Register temp2_reg,
Label* L_success,
Label* L_failure,
bool set_cond_codes = false);
// Simplified, combined version, good for typical uses.
// Falls through on failure.
void check_klass_subtype(Register sub_klass,
Register super_klass,
Register temp_reg,
Label& L_success);
//----
void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
......
......@@ -1598,18 +1598,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get instance klass
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
// get super_check_offset
__ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
__ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
__ jcc(Assembler::equal, done);
// check for immediate negative hit
__ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, *stub->entry());
// check for self
__ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, done);
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
......@@ -1735,17 +1726,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
}
__ bind(done);
} else {
__ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
__ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
__ jcc(Assembler::equal, done);
// check for immediate negative hit
__ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, *stub->entry());
// check for self
__ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, done);
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
......@@ -1821,23 +1804,15 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ pop(dst);
__ jmp(done);
}
} else {
#else
{ // YUCK
}
else // next block is unconditional if LP64:
#endif // LP64
{
assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
__ movl(dst, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
__ cmpptr(k_RInfo, Address(klass_RInfo, dst, Address::times_1));
__ jcc(Assembler::equal, one);
// check for immediate negative hit
__ cmpl(dst, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, zero);
// check for self
__ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, one);
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
......
......@@ -1354,6 +1354,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
case slow_subtype_check_id:
{
// Typical calling sequence:
// __ push(klass_RInfo); // object klass or other subclass
// __ push(sup_k_RInfo); // array element klass or other superclass
// __ call(slow_subtype_check);
// Note that the subclass is pushed first, and is therefore deepest.
// Previous versions of this code reversed the names 'sub' and 'super'.
// This was operationally harmless but made the code unreadable.
enum layout {
rax_off, SLOT2(raxH_off)
rcx_off, SLOT2(rcxH_off)
......@@ -1361,9 +1368,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
rdi_off, SLOT2(rdiH_off)
// saved_rbp_off, SLOT2(saved_rbpH_off)
return_off, SLOT2(returnH_off)
sub_off, SLOT2(subH_off)
super_off, SLOT2(superH_off)
framesize
sup_k_off, SLOT2(sup_kH_off)
klass_off, SLOT2(superH_off)
framesize,
result_off = klass_off // deepest argument is also the return value
};
__ set_info("slow_subtype_check", dont_gc_arguments);
......@@ -1373,19 +1381,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ push(rax);
// This is called by pushing args and not with C abi
__ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super
__ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub
__ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
// since size is postive movl does right thing on 64bit
__ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
__ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
__ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
Label miss;
__ repne_scan();
__ jcc(Assembler::notEqual, miss);
__ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax);
__ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result
__ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
// fallthrough on success:
__ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
__ pop(rax);
__ pop(rcx);
__ pop(rsi);
......@@ -1393,7 +1396,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ret(0);
__ bind(miss);
__ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
__ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
__ pop(rax);
__ pop(rcx);
__ pop(rsi);
......
......@@ -219,47 +219,16 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
// Resets EDI to locals. Register sub_klass cannot be any of the above.
void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &ok_is_subtype ) {
assert( Rsub_klass != rax, "rax, holds superklass" );
assert( Rsub_klass != rcx, "rcx holds 2ndary super array length" );
assert( Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr" );
Label not_subtype, loop;
assert( Rsub_klass != rcx, "used as a temp" );
assert( Rsub_klass != rdi, "used as a temp, restored from locals" );
// Profile the not-null value's klass.
profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi
// Load the super-klass's check offset into ECX
movl( rcx, Address(rax, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes() ) );
// Load from the sub-klass's super-class display list, or a 1-word cache of
// the secondary superclass list, or a failing value with a sentinel offset
// if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way.
// See if we get an immediate positive hit
cmpptr( rax, Address(Rsub_klass,rcx,Address::times_1) );
jcc( Assembler::equal,ok_is_subtype );
// Check for immediate negative hit
cmpl( rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
jcc( Assembler::notEqual, not_subtype );
// Check for self
cmpptr( Rsub_klass, rax );
jcc( Assembler::equal, ok_is_subtype );
// Now do a linear scan of the secondary super-klass chain.
movptr( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) );
// EDI holds the objArrayOop of secondary supers.
movl( rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));// Load the array length
// Skip to start of data; also clear Z flag incase ECX is zero
addptr( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) );
// Scan ECX words at [EDI] for occurance of EAX
// Set NZ/Z based on last compare
repne_scan();
restore_locals(); // Restore EDI; Must not blow flags
// Not equal?
jcc( Assembler::notEqual, not_subtype );
// Must be equal but missed in cache. Update cache.
movptr( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax );
jmp( ok_is_subtype );
bind(not_subtype);
profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
// Do the check.
check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
// Profile the failure of the check.
profile_typecheck_failed(rcx); // blows rcx
}
......
......@@ -232,65 +232,13 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
Label not_subtype, not_subtype_pop, loop;
// Profile the not-null value's klass.
profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi
// Load the super-klass's check offset into rcx
movl(rcx, Address(rax, sizeof(oopDesc) +
Klass::super_check_offset_offset_in_bytes()));
// Load from the sub-klass's super-class display list, or a 1-word
// cache of the secondary superclass list, or a failing value with a
// sentinel offset if the super-klass is an interface or
// exceptionally deep in the Java hierarchy and we have to scan the
// secondary superclass list the hard way. See if we get an
// immediate positive hit
cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1));
jcc(Assembler::equal,ok_is_subtype);
// Check for immediate negative hit
cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
jcc( Assembler::notEqual, not_subtype );
// Check for self
cmpptr(Rsub_klass, rax);
jcc(Assembler::equal, ok_is_subtype);
// Now do a linear scan of the secondary super-klass chain.
movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_supers_offset_in_bytes()));
// rdi holds the objArrayOop of secondary supers.
// Load the array length
movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data; also clear Z flag incase rcx is zero
addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
// this part is kind tricky, as values in supers array could be 32 or 64 bit wide
// and we store values in objArrays always encoded, thus we need to encode value
// before repne
if (UseCompressedOops) {
push(rax);
encode_heap_oop(rax);
repne_scanl();
// Not equal?
jcc(Assembler::notEqual, not_subtype_pop);
// restore heap oop here for movq
pop(rax);
} else {
repne_scan();
jcc(Assembler::notEqual, not_subtype);
}
// Must be equal but missed in cache. Update cache.
movptr(Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), rax);
jmp(ok_is_subtype);
bind(not_subtype_pop);
// restore heap oop here for miss
if (UseCompressedOops) pop(rax);
bind(not_subtype);
profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
// Do the check.
check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
// Profile the failure of the check.
profile_typecheck_failed(rcx); // blows rcx
}
......
......@@ -349,7 +349,7 @@ class SlowSignatureHandler
if (_num_args < Argument::n_float_register_parameters_c-1) {
*_reg_args++ = from_obj;
*_fp_identifiers |= (0x01 << (_num_args*2)); // mark as float
*_fp_identifiers |= (intptr_t)(0x01 << (_num_args*2)); // mark as float
_num_args++;
} else {
*_to++ = from_obj;
......@@ -364,7 +364,7 @@ class SlowSignatureHandler
if (_num_args < Argument::n_float_register_parameters_c-1) {
*_reg_args++ = from_obj;
*_fp_identifiers |= (0x3 << (_num_args*2)); // mark as double
*_fp_identifiers |= (intptr_t)(0x3 << (_num_args*2)); // mark as double
_num_args++;
} else {
*_to++ = from_obj;
......
......@@ -1310,81 +1310,51 @@ class StubGenerator: public StubCodeGenerator {
Address& super_check_offset_addr,
Address& super_klass_addr,
Register temp,
Label* L_success_ptr, Label* L_failure_ptr) {
Label* L_success, Label* L_failure) {
BLOCK_COMMENT("type_check:");
Label L_fallthrough;
bool fall_through_on_success = (L_success_ptr == NULL);
if (fall_through_on_success) {
L_success_ptr = &L_fallthrough;
} else {
L_failure_ptr = &L_fallthrough;
}
Label& L_success = *L_success_ptr;
Label& L_failure = *L_failure_ptr;
#define LOCAL_JCC(assembler_con, label_ptr) \
if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \
else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
// The following is a strange variation of the fast path which requires
// one less register, because needed values are on the argument stack.
// __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
// L_success, L_failure, NULL);
assert_different_registers(sub_klass, temp);
// a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_supers_offset_in_bytes());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
Address secondary_supers_addr(sub_klass, ss_offset);
Address super_cache_addr( sub_klass, sc_offset);
// if the pointers are equal, we are done (e.g., String[] elements)
__ cmpptr(sub_klass, super_klass_addr);
__ jcc(Assembler::equal, L_success);
LOCAL_JCC(Assembler::equal, L_success);
// check the supertype display:
__ movl2ptr(temp, super_check_offset_addr);
Address super_check_addr(sub_klass, temp, Address::times_1, 0);
__ movptr(temp, super_check_addr); // load displayed supertype
__ cmpptr(temp, super_klass_addr); // test the super type
__ jcc(Assembler::equal, L_success);
LOCAL_JCC(Assembler::equal, L_success);
// if it was a primary super, we can just fail immediately
__ cmpl(super_check_offset_addr, sc_offset);
__ jcc(Assembler::notEqual, L_failure);
LOCAL_JCC(Assembler::notEqual, L_failure);
// Now do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
inc_counter_np(SharedRuntime::_partial_subtype_ctr);
{
// The repne_scan instruction uses fixed registers, which we must spill.
// (We need a couple more temps in any case.)
__ push(rax);
__ push(rcx);
__ push(rdi);
assert_different_registers(sub_klass, rax, rcx, rdi);
__ movptr(rdi, secondary_supers_addr);
// Load the array length.
__ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data.
__ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [edi] for occurance of rax,
// Set NZ/Z based on last compare
__ movptr(rax, super_klass_addr);
__ repne_scan();
// Unspill the temp. registers:
__ pop(rdi);
__ pop(rcx);
__ pop(rax);
}
__ jcc(Assembler::notEqual, L_failure);
// The repne_scan instruction uses fixed registers, which will get spilled.
// We happen to know this works best when super_klass is in rax.
Register super_klass = temp;
__ movptr(super_klass, super_klass_addr);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg,
L_success, L_failure);
// Success. Cache the super we found and proceed in triumph.
__ movptr(temp, super_klass_addr); // note: rax, is dead
__ movptr(super_cache_addr, temp);
__ bind(L_fallthrough);
if (!fall_through_on_success)
__ jmp(L_success);
if (L_success == NULL) { BLOCK_COMMENT("L_success:"); }
if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); }
// Fall through on failure!
__ bind(L_fallthrough);
#undef LOCAL_JCC
}
//
......
......@@ -2091,66 +2091,9 @@ class StubGenerator: public StubCodeGenerator {
Label L_miss;
// a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_supers_offset_in_bytes());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
Address secondary_supers_addr(sub_klass, ss_offset);
Address super_cache_addr( sub_klass, sc_offset);
// if the pointers are equal, we are done (e.g., String[] elements)
__ cmpptr(super_klass, sub_klass);
__ jcc(Assembler::equal, L_success);
// check the supertype display:
Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
__ cmpptr(super_klass, super_check_addr); // test the super type
__ jcc(Assembler::equal, L_success);
// if it was a primary super, we can just fail immediately
__ cmpl(super_check_offset, sc_offset);
__ jcc(Assembler::notEqual, L_miss);
// Now do a linear scan of the secondary super-klass chain.
// The repne_scan instruction uses fixed registers, which we must spill.
// (We need a couple more temps in any case.)
// This code is rarely used, so simplicity is a virtue here.
inc_counter_np(SharedRuntime::_partial_subtype_ctr);
{
__ push(rax);
__ push(rcx);
__ push(rdi);
assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
__ movptr(rdi, secondary_supers_addr);
// Load the array length.
__ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data.
__ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
__ movptr(rax, super_klass);
if (UseCompressedOops) {
// Compare against compressed form. Don't need to uncompress because
// looks like orig rax is restored in popq below.
__ encode_heap_oop(rax);
__ repne_scanl();
} else {
__ repne_scan();
}
// Unspill the temp. registers:
__ pop(rdi);
__ pop(rcx);
__ pop(rax);
__ jcc(Assembler::notEqual, L_miss);
}
// Success. Cache the super we found and proceed in triumph.
__ movptr(super_cache_addr, super_klass); // note: rax is dead
__ jmp(L_success);
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
// Fall through on failure!
__ BIND(L_miss);
......
......@@ -284,7 +284,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
......@@ -297,6 +297,7 @@ void VM_Version::get_processor_features() {
(supports_ssse3()? ", ssse3": ""),
(supports_sse4_1() ? ", sse4.1" : ""),
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_popcnt() ? ", popcnt" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow() ? ", 3dnow" : ""),
(supports_3dnow2() ? ", 3dnowext" : ""),
......@@ -410,6 +411,13 @@ void VM_Version::get_processor_features() {
}
}
// Use population count instruction if available.
if (supports_popcnt()) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
}
assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
......
......@@ -70,7 +70,9 @@ public:
dca : 1,
sse4_1 : 1,
sse4_2 : 1,
: 11;
: 2,
popcnt : 1,
: 8;
} bits;
};
......@@ -179,7 +181,8 @@ protected:
CPU_SSSE3 = (1 << 9),
CPU_SSE4A = (1 << 10),
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12)
CPU_SSE4_2 = (1 << 12),
CPU_POPCNT = (1 << 13)
} cpuFeatureFlags;
// cpuid information block. All info derived from executing cpuid with
......@@ -290,6 +293,8 @@ protected:
result |= CPU_SSE4_1;
if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0)
result |= CPU_SSE4_2;
if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
result |= CPU_POPCNT;
return result;
}
......@@ -379,6 +384,7 @@ public:
static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; }
static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
static bool supports_popcnt() { return (_cpuFeatures & CPU_POPCNT) != 0; }
//
// AMD features
//
......
......@@ -1483,16 +1483,20 @@ encode %{
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
enc_class OpcP %{ // Emit opcode
emit_opcode(cbuf,$primary);
// Emit primary opcode
enc_class OpcP %{
emit_opcode(cbuf, $primary);
%}
enc_class OpcS %{ // Emit opcode
emit_opcode(cbuf,$secondary);
// Emit secondary opcode
enc_class OpcS %{
emit_opcode(cbuf, $secondary);
%}
enc_class Opcode(immI d8 ) %{ // Emit opcode
emit_opcode(cbuf,$d8$$constant);
// Emit opcode directly
enc_class Opcode(immI d8) %{
emit_opcode(cbuf, $d8$$constant);
%}
enc_class SizePrefix %{
......@@ -1688,26 +1692,15 @@ encode %{
Register Reax = as_Register(EAX_enc); // super class
Register Recx = as_Register(ECX_enc); // killed
Register Resi = as_Register(ESI_enc); // sub class
Label hit, miss;
Label miss;
MacroAssembler _masm(&cbuf);
// Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here,
// to allow platform-specific tweaking on sparc.
__ cmpptr(Reax, Resi);
__ jcc(Assembler::equal, hit);
#ifndef PRODUCT
__ incrementl(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
#endif //PRODUCT
__ movptr(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
__ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes()));
__ addptr(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ repne_scan();
__ jcc(Assembler::notEqual, miss);
__ movptr(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax);
__ bind(hit);
if( $primary )
__ xorptr(Redi,Redi);
__ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
NULL, &miss,
/*set_cond_codes:*/ true);
if ($primary) {
__ xorptr(Redi, Redi);
}
__ bind(miss);
%}
......@@ -6387,6 +6380,67 @@ instruct bytes_reverse_long(eRegL dst) %{
%}
//---------- Population Count Instructions -------------------------------------
instruct popCountI(eRegI dst, eRegI src) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountI src));
format %{ "POPCNT $dst, $src" %}
ins_encode %{
__ popcntl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct popCountI_mem(eRegI dst, memory mem) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountI (LoadI mem)));
format %{ "POPCNT $dst, $mem" %}
ins_encode %{
__ popcntl($dst$$Register, $mem$$Address);
%}
ins_pipe(ialu_reg);
%}
// Note: Long.bitCount(long) returns an int.
instruct popCountL(eRegI dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountL src));
effect(KILL cr, TEMP tmp, TEMP dst);
format %{ "POPCNT $dst, $src.lo\n\t"
"POPCNT $tmp, $src.hi\n\t"
"ADD $dst, $tmp" %}
ins_encode %{
__ popcntl($dst$$Register, $src$$Register);
__ popcntl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
__ addl($dst$$Register, $tmp$$Register);
%}
ins_pipe(ialu_reg);
%}
// Note: Long.bitCount(long) returns an int.
instruct popCountL_mem(eRegI dst, memory mem, eRegI tmp, eFlagsReg cr) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountL (LoadL mem)));
effect(KILL cr, TEMP tmp, TEMP dst);
format %{ "POPCNT $dst, $mem\n\t"
"POPCNT $tmp, $mem+4\n\t"
"ADD $dst, $tmp" %}
ins_encode %{
//__ popcntl($dst$$Register, $mem$$Address$$first);
//__ popcntl($tmp$$Register, $mem$$Address$$second);
__ popcntl($dst$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, false));
__ popcntl($tmp$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, false));
__ addl($dst$$Register, $tmp$$Register);
%}
ins_pipe(ialu_reg);
%}
//----------Load/Store/Move Instructions---------------------------------------
//----------Load Instructions--------------------------------------------------
// Load Byte (8bit signed)
......@@ -12501,15 +12555,12 @@ instruct partialSubtypeCheck( eDIRegP result, eSIRegP sub, eAXRegP super, eCXReg
effect( KILL rcx, KILL cr );
ins_cost(1100); // slightly larger than the next version
format %{ "CMPL EAX,ESI\n\t"
"JEQ,s hit\n\t"
"MOV EDI,[$sub+Klass::secondary_supers]\n\t"
format %{ "MOV EDI,[$sub+Klass::secondary_supers]\n\t"
"MOV ECX,[EDI+arrayKlass::length]\t# length to scan\n\t"
"ADD EDI,arrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
"REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
"JNE,s miss\t\t# Missed: EDI not-zero\n\t"
"MOV [$sub+Klass::secondary_super_cache],$super\t# Hit: update cache\n\t"
"hit:\n\t"
"XOR $result,$result\t\t Hit: EDI zero\n\t"
"miss:\t" %}
......@@ -12523,9 +12574,7 @@ instruct partialSubtypeCheck_vs_Zero( eFlagsReg cr, eSIRegP sub, eAXRegP super,
effect( KILL rcx, KILL result );
ins_cost(1000);
format %{ "CMPL EAX,ESI\n\t"
"JEQ,s miss\t# Actually a hit; we are done.\n\t"
"MOV EDI,[$sub+Klass::secondary_supers]\n\t"
format %{ "MOV EDI,[$sub+Klass::secondary_supers]\n\t"
"MOV ECX,[EDI+arrayKlass::length]\t# length to scan\n\t"
"ADD EDI,arrayKlass::base_offset\t# Skip to start of data; set NZ in case count is zero\n\t"
"REPNE SCASD\t# Scan *EDI++ for a match with EAX while CX-- != 0\n\t"
......
此差异已折叠。
......@@ -2269,15 +2269,16 @@ void linux_wrap_code(char* base, size_t size) {
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
// problem.
bool os::commit_memory(char* addr, size_t size) {
uintptr_t res = (uintptr_t) ::mmap(addr, size,
PROT_READ|PROT_WRITE|PROT_EXEC,
bool os::commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
}
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
return commit_memory(addr, size);
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
......@@ -2417,8 +2418,7 @@ os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
unsigned long* os::Linux::_numa_all_nodes;
bool os::uncommit_memory(char* addr, size_t size) {
return ::mmap(addr, size,
PROT_READ|PROT_WRITE|PROT_EXEC,
return ::mmap(addr, size, PROT_NONE,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0)
!= MAP_FAILED;
}
......@@ -2441,7 +2441,9 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
flags |= MAP_FIXED;
}
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE|PROT_EXEC,
// Map uncommitted pages PROT_READ and PROT_WRITE, change access
// to PROT_EXEC if executable when we commit the page.
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
flags, -1, 0);
if (addr != MAP_FAILED) {
......@@ -2582,7 +2584,9 @@ bool os::large_page_init() {
#define SHM_HUGETLB 04000
#endif
char* os::reserve_memory_special(size_t bytes) {
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages, "only for large pages");
key_t key = IPC_PRIVATE;
......
......@@ -249,6 +249,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
printf("\n");
GEN_OFFS(NarrowOopStruct, _base);
GEN_OFFS(NarrowOopStruct, _shift);
printf("\n");
GEN_VALUE(SIZE_HeapBlockHeader, sizeof(HeapBlock::Header));
GEN_SIZE(oopDesc);
GEN_SIZE(constantPoolOopDesc);
......
......@@ -46,7 +46,10 @@ extern pointer __JvmOffsets;
extern pointer __1cJCodeCacheF_heap_;
extern pointer __1cIUniverseP_methodKlassObj_;
extern pointer __1cIUniverseO_collectedHeap_;
extern pointer __1cIUniverseK_heap_base_;
extern pointer __1cIUniverseL_narrow_oop_;
#ifdef _LP64
extern pointer UseCompressedOops;
#endif
extern pointer __1cHnmethodG__vtbl_;
extern pointer __1cKBufferBlobG__vtbl_;
......@@ -56,6 +59,7 @@ extern pointer __1cKBufferBlobG__vtbl_;
#define copyin_uint16(ADDR) *(uint16_t*) copyin((pointer) (ADDR), sizeof(uint16_t))
#define copyin_uint32(ADDR) *(uint32_t*) copyin((pointer) (ADDR), sizeof(uint32_t))
#define copyin_int32(ADDR) *(int32_t*) copyin((pointer) (ADDR), sizeof(int32_t))
#define copyin_uint8(ADDR) *(uint8_t*) copyin((pointer) (ADDR), sizeof(uint8_t))
#define SAME(x) x
#define copyin_offset(JVM_CONST) JVM_CONST = \
......@@ -132,6 +136,9 @@ dtrace:helper:ustack:
copyin_offset(SIZE_oopDesc);
copyin_offset(SIZE_constantPoolOopDesc);
copyin_offset(OFFSET_NarrowOopStruct_base);
copyin_offset(OFFSET_NarrowOopStruct_shift);
/*
* The PC to translate is in arg0.
*/
......@@ -151,9 +158,19 @@ dtrace:helper:ustack:
this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_);
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_);
/* Reading volatile values */
#ifdef _LP64
this->Use_Compressed_Oops = copyin_uint8(&``UseCompressedOops);
#else
this->Use_Compressed_Oops = 0;
#endif
this->Universe_narrow_oop_base = copyin_ptr(&``__1cIUniverseL_narrow_oop_ +
OFFSET_NarrowOopStruct_base);
this->Universe_narrow_oop_shift = copyin_int32(&``__1cIUniverseL_narrow_oop_ +
OFFSET_NarrowOopStruct_shift);
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
......@@ -295,7 +312,7 @@ dtrace:helper:ustack:
dtrace:helper:ustack:
/!this->done && this->vtbl == this->BufferBlob_vtbl &&
this->Universe_heap_base == NULL &&
this->Use_Compressed_Oops == 0 &&
this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
{
MARK_LINE;
......@@ -306,7 +323,7 @@ this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
dtrace:helper:ustack:
/!this->done && this->vtbl == this->BufferBlob_vtbl &&
this->Universe_heap_base != NULL &&
this->Use_Compressed_Oops != 0 &&
this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
{
MARK_LINE;
......@@ -314,8 +331,8 @@ this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
* Read compressed pointer and decode heap oop, same as oop.inline.hpp
*/
this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata);
this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base +
((uintptr_t)this->cklass << 3));
this->klass = (uint64_t)((uintptr_t)this->Universe_narrow_oop_base +
((uintptr_t)this->cklass << this->Universe_narrow_oop_shift));
this->methodOop = this->klass == this->Universe_methodKlassOop;
this->done = !this->methodOop;
}
......
......@@ -146,13 +146,17 @@ struct jvm_agent {
uint64_t BufferBlob_vtbl;
uint64_t RuntimeStub_vtbl;
uint64_t Use_Compressed_Oops_address;
uint64_t Universe_methodKlassObj_address;
uint64_t Universe_narrow_oop_base_address;
uint64_t Universe_narrow_oop_shift_address;
uint64_t CodeCache_heap_address;
uint64_t Universe_heap_base_address;
/* Volatiles */
uint8_t Use_Compressed_Oops;
uint64_t Universe_methodKlassObj;
uint64_t Universe_heap_base;
uint64_t Universe_narrow_oop_base;
uint32_t Universe_narrow_oop_shift;
uint64_t CodeCache_low;
uint64_t CodeCache_high;
uint64_t CodeCache_segmap_low;
......@@ -279,8 +283,11 @@ static int parse_vmstructs(jvm_agent_t* J) {
if (strcmp("_methodKlassObj", vmp->fieldName) == 0) {
J->Universe_methodKlassObj_address = vmp->address;
}
if (strcmp("_heap_base", vmp->fieldName) == 0) {
J->Universe_heap_base_address = vmp->address;
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
J->Universe_narrow_oop_base_address = vmp->address;
}
if (strcmp("_narrow_oop._shift", vmp->fieldName) == 0) {
J->Universe_narrow_oop_shift_address = vmp->address;
}
}
CHECK_FAIL(err);
......@@ -298,14 +305,39 @@ static int parse_vmstructs(jvm_agent_t* J) {
return -1;
}
static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
psaddr_t sym_addr;
int err;
err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
if (err != PS_OK) goto fail;
*valuep = sym_addr;
return PS_OK;
fail:
return err;
}
static int read_volatiles(jvm_agent_t* J) {
uint64_t ptr;
int err;
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
if (err == PS_OK) {
err = ps_pread(J->P, J->Use_Compressed_Oops_address, &J->Use_Compressed_Oops, sizeof(uint8_t));
CHECK_FAIL(err);
} else {
J->Use_Compressed_Oops = 0;
}
err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj);
CHECK_FAIL(err);
err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base);
err = read_pointer(J, J->Universe_narrow_oop_base_address, &J->Universe_narrow_oop_base);
CHECK_FAIL(err);
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->CodeCache_low);
CHECK_FAIL(err);
......@@ -374,19 +406,6 @@ static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
return -1;
}
static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
psaddr_t sym_addr;
int err;
err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
if (err != PS_OK) goto fail;
*valuep = sym_addr;
return PS_OK;
fail:
return err;
}
static int find_jlong_constant(jvm_agent_t* J, const char *name, uint64_t* valuep) {
psaddr_t sym_addr;
int err = ps_pglobal_lookup(J->P, LIBJVM_SO, name, &sym_addr);
......@@ -458,14 +477,14 @@ void Jagent_destroy(jvm_agent_t *J) {
static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) {
uint64_t klass;
int err;
// If heap_base is nonnull, this was a compressed oop.
if (J->Universe_heap_base != NULL) {
// If UseCompressedOops, this was a compressed oop.
if (J->Use_Compressed_Oops != 0) {
uint32_t cklass;
err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata,
&cklass);
// decode heap oop, same as oop.inline.hpp
klass = (uint64_t)((uintptr_t)J->Universe_heap_base +
((uintptr_t)cklass << 3));
klass = (uint64_t)((uintptr_t)J->Universe_narrow_oop_base +
((uintptr_t)cklass << J->Universe_narrow_oop_shift));
} else {
err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass);
}
......
......@@ -2623,15 +2623,16 @@ int os::vm_allocation_granularity() {
return page_size;
}
bool os::commit_memory(char* addr, size_t bytes) {
bool os::commit_memory(char* addr, size_t bytes, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
size_t size = bytes;
return
NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED,
PROT_READ | PROT_WRITE | PROT_EXEC);
NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
}
bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint) {
if (commit_memory(addr, bytes)) {
bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
bool exec) {
if (commit_memory(addr, bytes, exec)) {
if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
// If the large page size has been set and the VM
// is using large pages, use the large page size
......@@ -3220,7 +3221,9 @@ bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
return true;
}
char* os::reserve_memory_special(size_t bytes) {
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseISM, "only for ISM large pages");
size_t size = bytes;
......@@ -4451,6 +4454,9 @@ int_fnP_thread_t_i os::Solaris::_thr_setmutator;
int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
int_fnP_thread_t os::Solaris::_thr_continue_mutator;
// (Static) wrapper for getisax(2) call.
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
// (Static) wrappers for the liblgrp API
os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
......@@ -4465,16 +4471,19 @@ os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
// (Static) wrapper for meminfo() call.
os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
static address resolve_symbol(const char *name) {
address addr;
addr = (address) dlsym(RTLD_DEFAULT, name);
static address resolve_symbol_lazy(const char* name) {
address addr = (address) dlsym(RTLD_DEFAULT, name);
if(addr == NULL) {
// RTLD_DEFAULT was not defined on some early versions of 2.5.1
addr = (address) dlsym(RTLD_NEXT, name);
if(addr == NULL) {
fatal(dlerror());
}
}
return addr;
}
static address resolve_symbol(const char* name) {
address addr = resolve_symbol_lazy(name);
if(addr == NULL) {
fatal(dlerror());
}
return addr;
}
......@@ -4673,15 +4682,26 @@ bool os::Solaris::liblgrp_init() {
}
void os::Solaris::misc_sym_init() {
address func = (address)dlsym(RTLD_DEFAULT, "meminfo");
if(func == NULL) {
func = (address) dlsym(RTLD_NEXT, "meminfo");
address func;
// getisax
func = resolve_symbol_lazy("getisax");
if (func != NULL) {
os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
}
// meminfo
func = resolve_symbol_lazy("meminfo");
if (func != NULL) {
os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
}
}
uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
assert(_getisax != NULL, "_getisax not set");
return _getisax(array, n);
}
// Symbol doesn't exist in Solaris 8 pset.h
#ifndef PS_MYID
#define PS_MYID -3
......@@ -4716,6 +4736,10 @@ void os::init(void) {
Solaris::initialize_system_info();
// Initialize misc. symbols as soon as possible, so we can use them
// if we need them.
Solaris::misc_sym_init();
int fd = open("/dev/zero", O_RDWR);
if (fd < 0) {
fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
......@@ -4857,7 +4881,6 @@ jint os::init_2(void) {
}
}
Solaris::misc_sym_init();
Solaris::signal_sets_init();
Solaris::init_signal_mem();
Solaris::install_signal_handlers();
......
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -72,6 +72,8 @@ class Solaris {
LGRP_VIEW_OS /* what's available to operating system */
} lgrp_view_t;
typedef uint_t (*getisax_func_t)(uint32_t* array, uint_t n);
typedef lgrp_id_t (*lgrp_home_func_t)(idtype_t idtype, id_t id);
typedef lgrp_cookie_t (*lgrp_init_func_t)(lgrp_view_t view);
typedef int (*lgrp_fini_func_t)(lgrp_cookie_t cookie);
......@@ -87,6 +89,8 @@ class Solaris {
const uint_t info_req[], int info_count,
uint64_t outdata[], uint_t validity[]);
static getisax_func_t _getisax;
static lgrp_home_func_t _lgrp_home;
static lgrp_init_func_t _lgrp_init;
static lgrp_fini_func_t _lgrp_fini;
......@@ -283,6 +287,9 @@ class Solaris {
}
static lgrp_cookie_t lgrp_cookie() { return _lgrp_cookie; }
static bool supports_getisax() { return _getisax != NULL; }
static uint_t getisax(uint32_t* array, uint_t n);
static void set_meminfo(meminfo_func_t func) { _meminfo = func; }
static int meminfo (const uint64_t inaddr[], int addr_count,
const uint_t info_req[], int info_count,
......
......@@ -2189,7 +2189,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory( (char *)addr, thread->stack_base() - addr );
os::commit_memory((char *)addr, thread->stack_base() - addr,
false );
return EXCEPTION_CONTINUE_EXECUTION;
}
else
......@@ -2565,8 +2566,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
assert(res == NULL || addr == NULL || addr == res,
"Unexpected address from reserve.");
return res;
......@@ -2595,7 +2595,7 @@ bool os::can_execute_large_page_memory() {
return true;
}
char* os::reserve_memory_special(size_t bytes) {
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
if (UseLargePagesIndividualAllocation) {
if (TracePageSizes && Verbose) {
......@@ -2615,10 +2615,10 @@ char* os::reserve_memory_special(size_t bytes) {
"use -XX:-UseLargePagesIndividualAllocation to turn off");
return NULL;
}
p_buf = (char *) VirtualAlloc(NULL,
p_buf = (char *) VirtualAlloc(addr,
size_of_reserve, // size of Reserve
MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
......@@ -2659,7 +2659,13 @@ char* os::reserve_memory_special(size_t bytes) {
p_new = (char *) VirtualAlloc(next_alloc_addr,
bytes_to_rq,
MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
PAGE_EXECUTE_READWRITE);
PAGE_READWRITE);
if (p_new != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(next_alloc_addr, bytes_to_rq,
PAGE_EXECUTE_READWRITE, &oldprot);
}
}
if (p_new == NULL) {
......@@ -2688,10 +2694,12 @@ char* os::reserve_memory_special(size_t bytes) {
} else {
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL,
bytes,
flag,
PAGE_EXECUTE_READWRITE);
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE);
if (res != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(res, bytes, PAGE_EXECUTE_READWRITE, &oldprot);
}
return res;
}
}
......@@ -2703,7 +2711,7 @@ bool os::release_memory_special(char* base, size_t bytes) {
void os::print_statistics() {
}
bool os::commit_memory(char* addr, size_t bytes) {
bool os::commit_memory(char* addr, size_t bytes, bool exec) {
if (bytes == 0) {
// Don't bother the OS with noops.
return true;
......@@ -2712,11 +2720,19 @@ bool os::commit_memory(char* addr, size_t bytes) {
assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
// Don't attempt to print anything if the OS call fails. We're
// probably low on resources, so the print itself may cause crashes.
return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL;
bool result = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) != 0;
if (result != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot) != 0;
} else {
return result;
}
}
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
return commit_memory(addr, size);
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
}
bool os::uncommit_memory(char* addr, size_t bytes) {
......@@ -2750,7 +2766,7 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
// Strange enough, but on Win32 one can change protection only for committed
// memory, not a big deal anyway, as bytes less or equal than 64K
if (!is_committed && !commit_memory(addr, bytes)) {
if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
fatal("cannot commit protection page");
}
// One cannot use os::guard_memory() here, as on Win32 guard page
......@@ -3248,10 +3264,10 @@ jint os::init_2(void) {
#endif
if (!UseMembar) {
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_EXECUTE_READWRITE);
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_EXECUTE_READWRITE);
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
guarantee( return_page != NULL, "Commit Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
......
......@@ -30,5 +30,7 @@
define_pd_global(uintx, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 4*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -43,5 +43,7 @@ define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -30,5 +30,9 @@
define_pd_global(uintx, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 4*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
/*
* Copyright 2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2006-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -25,58 +25,107 @@
# include "incls/_precompiled.incl"
# include "incls/_vm_version_solaris_sparc.cpp.incl"
# include <sys/auxv.h>
# include <sys/auxv_SPARC.h>
# include <sys/systeminfo.h>
int VM_Version::platform_features(int features) {
// We determine what sort of hardware we have via sysinfo(SI_ISALIST, ...).
// This isn't the best of all possible ways because there's not enough
// detail in the isa list it returns, but it's a bit less arcane than
// generating assembly code and an illegal instruction handler. We used
// to generate a getpsr trap, but that's even more arcane.
//
// Another possibility would be to use sysinfo(SI_PLATFORM, ...), but
// that would require more knowledge here than is wise.
// We need to keep these here as long as we have to build on Solaris
// versions before 10.
#ifndef SI_ARCHITECTURE_32
#define SI_ARCHITECTURE_32 516 /* basic 32-bit SI_ARCHITECTURE */
#endif
// isalist spec via 'man isalist' as of 01-Aug-2001
#ifndef SI_ARCHITECTURE_64
#define SI_ARCHITECTURE_64 517 /* basic 64-bit SI_ARCHITECTURE */
#endif
static void do_sysinfo(int si, const char* string, int* features, int mask) {
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*)malloc(bufsize);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') features |= hardware_int_muldiv_m;
else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
size_t bufsize = sysinfo(si, &tmp, 1);
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
// All SI defines used below must be supported.
guarantee(bufsize != -1, "must be supported");
char* buf = (char*) malloc(bufsize);
if (buf == NULL)
return;
if (sysinfo(si, buf, bufsize) == bufsize) {
// Compare the string.
if (strcmp(buf, string) == 0) {
*features |= mask;
}
free(buf);
}
bufsize = sysinfo(SI_MACHINE, &tmp, 1);
buf = (char*)malloc(bufsize);
free(buf);
}
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) supported.");
#endif
if (buf != NULL) {
if (sysinfo(SI_MACHINE, buf, bufsize) == bufsize) {
if (strstr(buf, "sun4v") != NULL) {
features |= sun4v_m;
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Extract valid instruction set extensions.
uint_t av;
uint_t avn = os::Solaris::getisax(&av, 1);
assert(avn == 1, "should only return one av");
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) malloc(bufsize);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
free(buf);
}
free(buf);
}
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
return features;
}
......@@ -46,5 +46,7 @@ define_pd_global(uintx, JVMInvokeMethodSlack, 10*K);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 256*M);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -45,5 +45,7 @@ define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -68,6 +68,9 @@ typedef struct _DISPATCHER_CONTEXT {
PVOID HandlerData;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
#if MSC_VER < 1500
/* Not needed for VS2008 compiler, comes from winnt.h. */
typedef EXCEPTION_DISPOSITION (*PEXCEPTION_ROUTINE) (
IN PEXCEPTION_RECORD ExceptionRecord,
IN ULONG64 EstablisherFrame,
......@@ -75,4 +78,6 @@ typedef EXCEPTION_DISPOSITION (*PEXCEPTION_ROUTINE) (
IN OUT PDISPATCHER_CONTEXT DispatcherContext
);
#endif
#endif // AMD64
......@@ -27,6 +27,8 @@ import java.util.*;
public class WinGammaPlatformVC7 extends WinGammaPlatform {
String projectVersion() {return "7.10";};
public void writeProjectFile(String projectFileName, String projectName,
Vector allConfigs) throws IOException {
System.out.println();
......@@ -40,7 +42,7 @@ public class WinGammaPlatformVC7 extends WinGammaPlatform {
"VisualStudioProject",
new String[] {
"ProjectType", "Visual C++",
"Version", "7.10",
"Version", projectVersion(),
"Name", projectName,
"ProjectGUID", "{8822CB5C-1C41-41C2-8493-9F6E1994338B}",
"SccProjectName", "",
......@@ -417,7 +419,9 @@ public class WinGammaPlatformVC7 extends WinGammaPlatform {
new String[] {
"Name", "VCPreLinkEventTool",
"Description", BuildConfig.getFieldString(null, "PrelinkDescription"),
"CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace('\t', '\n'))
//Caution: String.replace(String,String) is available from JDK5 onwards only
"CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace
("\t", "&#x0D;&#x0A;"))
}
);
......@@ -542,25 +546,41 @@ public class WinGammaPlatformVC7 extends WinGammaPlatform {
}
class CompilerInterfaceVC7 extends CompilerInterface {
Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
Vector rv = new Vector();
void getBaseCompilerFlags_common(Vector defines, Vector includes, String outDir,Vector rv) {
// advanced M$ IDE (2003) can only recognize name if it's first or
// second attribute in the tag - go guess
addAttr(rv, "Name", "VCCLCompilerTool");
addAttr(rv, "AdditionalIncludeDirectories", Util.join(",", includes));
addAttr(rv, "PreprocessorDefinitions", Util.join(";", defines).replace("\"","&quot;"));
addAttr(rv, "UsePrecompiledHeader", "3");
addAttr(rv, "PrecompiledHeaderThrough", "incls"+Util.sep+"_precompiled.incl");
addAttr(rv, "PreprocessorDefinitions",
Util.join(";", defines).replace("\"","&quot;"));
addAttr(rv, "PrecompiledHeaderThrough",
"incls"+Util.sep+"_precompiled.incl");
addAttr(rv, "PrecompiledHeaderFile", outDir+Util.sep+"vm.pch");
addAttr(rv, "AssemblerListingLocation", outDir);
addAttr(rv, "ObjectFile", outDir+Util.sep);
addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"vm.pdb");
// Set /nologo optin
addAttr(rv, "SuppressStartupBanner", "TRUE");
// Surpass the default /Tc or /Tp. 0 is compileAsDefault
addAttr(rv, "CompileAs", "0");
// Set /W3 option. 3 is warningLevel_3
addAttr(rv, "WarningLevel", "3");
// Set /WX option,
addAttr(rv, "WarnAsError", "TRUE");
// Set /GS option
addAttr(rv, "BufferSecurityCheck", "FALSE");
// Set /Zi option. 3 is debugEnabled
addAttr(rv, "DebugInformationFormat", "3");
}
Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
Vector rv = new Vector();
getBaseCompilerFlags_common(defines,includes, outDir, rv);
// Set /Yu option. 3 is pchUseUsingSpecific
// Note: Starting VC8 pchUseUsingSpecific is 2 !!!
addAttr(rv, "UsePrecompiledHeader", "3");
// Set /EHsc- option
addAttr(rv, "ExceptionHandling", "FALSE");
return rv;
......@@ -579,27 +599,39 @@ class CompilerInterfaceVC7 extends CompilerInterface {
"/export:jio_vsnprintf ");
addAttr(rv, "AdditionalDependencies", "Wsock32.lib winmm.lib");
addAttr(rv, "OutputFile", outDll);
// Set /INCREMENTAL option. 1 is linkIncrementalNo
addAttr(rv, "LinkIncremental", "1");
addAttr(rv, "SuppressStartupBanner", "TRUE");
addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"vm.pdb");
// Set /SUBSYSTEM option. 2 is subSystemWindows
addAttr(rv, "SubSystem", "2");
addAttr(rv, "BaseAddress", "0x8000000");
addAttr(rv, "ImportLibrary", outDir+Util.sep+"jvm.lib");
// Set /MACHINE option. 1 is machineX86
addAttr(rv, "TargetMachine", "1");
return rv;
}
Vector getDebugCompilerFlags(String opt) {
Vector rv = new Vector();
void getDebugCompilerFlags_common(String opt,Vector rv) {
// Set /On option
addAttr(rv, "Optimization", opt);
addAttr(rv, "OptimizeForProcessor", "1");
addAttr(rv, "DebugInformationFormat", "3");
addAttr(rv, "RuntimeLibrary", "2");
// Set /FR option. 1 is brAllInfo
addAttr(rv, "BrowseInformation", "1");
addAttr(rv, "BrowseInformationFile", "$(IntDir)" + Util.sep);
// Set /MD option. 2 is rtMultiThreadedDLL
addAttr(rv, "RuntimeLibrary", "2");
// Set /Oy- option
addAttr(rv, "OmitFramePointers", "FALSE");
}
Vector getDebugCompilerFlags(String opt) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt,rv);
return rv;
}
......@@ -607,18 +639,29 @@ class CompilerInterfaceVC7 extends CompilerInterface {
Vector getDebugLinkerFlags() {
Vector rv = new Vector();
addAttr(rv, "GenerateDebugInformation", "TRUE");
addAttr(rv, "GenerateDebugInformation", "TRUE"); // == /DEBUG option
return rv;
}
void getProductCompilerFlags_common(Vector rv) {
// Set /O2 option. 2 is optimizeMaxSpeed
addAttr(rv, "Optimization", "2");
// Set /Oy- option
addAttr(rv, "OmitFramePointers", "FALSE");
}
Vector getProductCompilerFlags() {
Vector rv = new Vector();
addAttr(rv, "Optimization", "2");
getProductCompilerFlags_common(rv);
// Set /Ob option. 1 is expandOnlyInline
addAttr(rv, "InlineFunctionExpansion", "1");
// Set /GF option.
addAttr(rv, "StringPooling", "TRUE");
// Set /MD option. 2 is rtMultiThreadedDLL
addAttr(rv, "RuntimeLibrary", "2");
// Set /Gy option
addAttr(rv, "EnableFunctionLevelLinking", "TRUE");
return rv;
......@@ -627,7 +670,9 @@ class CompilerInterfaceVC7 extends CompilerInterface {
Vector getProductLinkerFlags() {
Vector rv = new Vector();
// Set /OPT:REF option. 2 is optReferences
addAttr(rv, "OptimizeReferences", "2");
// Set /OPT:optFolding option. 2 is optFolding
addAttr(rv, "EnableCOMDATFolding", "2");
return rv;
......
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
import java.io.*;
import java.util.*;
public class WinGammaPlatformVC8 extends WinGammaPlatformVC7 {
String projectVersion() {return "8.00";};
}
class CompilerInterfaceVC8 extends CompilerInterfaceVC7 {
Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
Vector rv = new Vector();
getBaseCompilerFlags_common(defines,includes, outDir, rv);
// Set /Yu option. 2 is pchUseUsingSpecific
addAttr(rv, "UsePrecompiledHeader", "2");
// Set /EHsc- option. 0 is cppExceptionHandlingNo
addAttr(rv, "ExceptionHandling", "0");
return rv;
}
Vector getDebugCompilerFlags(String opt) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt,rv);
return rv;
}
Vector getProductCompilerFlags() {
Vector rv = new Vector();
getProductCompilerFlags_common(rv);
return rv;
}
}
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
import java.io.*;
import java.util.*;
public class WinGammaPlatformVC9 extends WinGammaPlatformVC8 {
String projectVersion() {return "9.00";};
}
class CompilerInterfaceVC9 extends CompilerInterfaceVC8 {
}
......@@ -44,7 +44,7 @@ using namespace std;
#error "Something is wrong with the detection of MSC_VER in the makefiles"
#endif
#if _MSC_VER >= 1400 && !defined(_WIN64)
#if _MSC_VER >= 1400
#define strdup _strdup
#endif
......
......@@ -321,16 +321,19 @@ void AbstractAssembler::block_comment(const char* comment) {
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
// Exception handler checks the nmethod's implicit null checks table
// only when this method returns false.
if (UseCompressedOops) {
#ifdef _LP64
if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
assert (Universe::heap() != NULL, "java heap should be initialized");
// The first page after heap_base is unmapped and
// the 'offset' is equal to [heap_base + offset] for
// narrow oop implicit null checks.
uintptr_t heap_base = (uintptr_t)Universe::heap_base();
if ((uintptr_t)offset >= heap_base) {
uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
if ((uintptr_t)offset >= base) {
// Normalize offset for the next check.
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
}
}
#endif
return offset < 0 || os::vm_page_size() <= offset;
}
......
......@@ -284,6 +284,11 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
//
int ex_start = handler->start();
int ex_end = handler->limit();
// ensure a block at the start of exception range and start of following code
(void) make_block_at(ex_start);
if (ex_end < _code_size)
(void) make_block_at(ex_end);
if (eb->is_handler()) {
// Extend old handler exception range to cover additional range.
int old_ex_start = eb->ex_start_bci();
......@@ -295,10 +300,6 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
eb->clear_exception_handler(); // Reset exception information
}
eb->set_exception_range(ex_start, ex_end);
// ensure a block at the start of exception range and start of following code
(void) make_block_at(ex_start);
if (ex_end < _code_size)
(void) make_block_at(ex_end);
}
}
......
......@@ -2747,9 +2747,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
super_klass(),
methods(),
access_flags,
class_loader(),
class_name(),
local_interfaces());
class_loader,
class_name,
local_interfaces(),
CHECK_(nullHandle));
// Size of Java itable (in words)
itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);
......@@ -3229,7 +3230,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
// print out the superclass.
const char * from = Klass::cast(this_klass())->external_name();
if (this_klass->java_super() != NULL) {
tty->print("RESOLVE %s %s\n", from, instanceKlass::cast(this_klass->java_super())->external_name());
tty->print("RESOLVE %s %s (super)\n", from, instanceKlass::cast(this_klass->java_super())->external_name());
}
// print out each of the interface classes referred to by this class.
objArrayHandle local_interfaces(THREAD, this_klass->local_interfaces());
......@@ -3239,7 +3240,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
klassOop k = klassOop(local_interfaces->obj_at(i));
instanceKlass* to_class = instanceKlass::cast(k);
const char * to = to_class->external_name();
tty->print("RESOLVE %s %s\n", from, to);
tty->print("RESOLVE %s %s (interface)\n", from, to);
}
}
}
......
......@@ -284,6 +284,7 @@
template(value_name, "value") \
template(frontCacheEnabled_name, "frontCacheEnabled") \
template(stringCacheEnabled_name, "stringCacheEnabled") \
template(bitCount_name, "bitCount") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \
......@@ -304,6 +305,7 @@
template(double_long_signature, "(D)J") \
template(double_double_signature, "(D)D") \
template(int_float_signature, "(I)F") \
template(long_int_signature, "(J)I") \
template(long_long_signature, "(J)J") \
template(long_double_signature, "(J)D") \
template(byte_signature, "B") \
......@@ -376,7 +378,7 @@
template(unknown_class_name, "<Unknown>") \
\
/* used to identify class loaders handling parallel class loading */ \
template(parallelCapable_name, "parallelLockMap;") \
template(parallelCapable_name, "parallelLockMap") \
\
/* JVM monitoring and management support */ \
template(java_lang_StackTraceElement_array, "[Ljava/lang/StackTraceElement;") \
......@@ -507,6 +509,10 @@
do_name( doubleToLongBits_name, "doubleToLongBits") \
do_intrinsic(_longBitsToDouble, java_lang_Double, longBitsToDouble_name, long_double_signature, F_S) \
do_name( longBitsToDouble_name, "longBitsToDouble") \
\
do_intrinsic(_bitCount_i, java_lang_Integer, bitCount_name, int_int_signature, F_S) \
do_intrinsic(_bitCount_l, java_lang_Long, bitCount_name, long_int_signature, F_S) \
\
do_intrinsic(_reverseBytes_i, java_lang_Integer, reverseBytes_name, int_int_signature, F_S) \
do_name( reverseBytes_name, "reverseBytes") \
do_intrinsic(_reverseBytes_l, java_lang_Long, reverseBytes_name, long_long_signature, F_S) \
......@@ -696,7 +702,6 @@
do_signature(putShort_raw_signature, "(JS)V") \
do_signature(getChar_raw_signature, "(J)C") \
do_signature(putChar_raw_signature, "(JC)V") \
do_signature(getInt_raw_signature, "(J)I") \
do_signature(putInt_raw_signature, "(JI)V") \
do_alias(getLong_raw_signature, /*(J)J*/ long_long_signature) \
do_alias(putLong_raw_signature, /*(JJ)V*/ long_long_void_signature) \
......@@ -713,7 +718,7 @@
do_intrinsic(_getByte_raw, sun_misc_Unsafe, getByte_name, getByte_raw_signature, F_RN) \
do_intrinsic(_getShort_raw, sun_misc_Unsafe, getShort_name, getShort_raw_signature, F_RN) \
do_intrinsic(_getChar_raw, sun_misc_Unsafe, getChar_name, getChar_raw_signature, F_RN) \
do_intrinsic(_getInt_raw, sun_misc_Unsafe, getInt_name, getInt_raw_signature, F_RN) \
do_intrinsic(_getInt_raw, sun_misc_Unsafe, getInt_name, long_int_signature, F_RN) \
do_intrinsic(_getLong_raw, sun_misc_Unsafe, getLong_name, getLong_raw_signature, F_RN) \
do_intrinsic(_getFloat_raw, sun_misc_Unsafe, getFloat_name, getFloat_raw_signature, F_RN) \
do_intrinsic(_getDouble_raw, sun_misc_Unsafe, getDouble_name, getDouble_raw_signature, F_RN) \
......
......@@ -145,14 +145,9 @@ void ConcurrentG1Refine::set_pya_restart() {
if (G1RSBarrierUseQueue) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
dcqs.abandon_logs();
if (_cg1rThread->do_traversal()) {
_pya = PYA_restart;
} else {
_cg1rThread->set_do_traversal(true);
// Reset the post-yield actions.
_pya = PYA_continue;
_last_pya = PYA_continue;
}
// Reset the post-yield actions.
_pya = PYA_continue;
_last_pya = PYA_continue;
} else {
_pya = PYA_restart;
}
......
......@@ -107,7 +107,7 @@ void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap,
#ifndef PRODUCT
bool CMBitMapRO::covers(ReservedSpace rs) const {
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
assert(((size_t)_bm.size() * (1 << _shifter)) == _bmWordSize,
assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize,
"size inconsistency");
return _bmStartWord == (HeapWord*)(rs.base()) &&
_bmWordSize == rs.size()>>LogHeapWordSize;
......@@ -1232,7 +1232,16 @@ public:
if (!_final && _regions_done == 0)
_start_vtime_sec = os::elapsedVTime();
if (hr->continuesHumongous()) return false;
if (hr->continuesHumongous()) {
HeapRegion* hum_start = hr->humongous_start_region();
// If the head region of the humongous region has been determined
// to be alive, then all the tail regions should be marked
// such as well.
if (_region_bm->at(hum_start->hrs_index())) {
_region_bm->par_at_put(hr->hrs_index(), 1);
}
return false;
}
HeapWord* nextTop = hr->next_top_at_mark_start();
HeapWord* start = hr->top_at_conc_mark_count();
......
......@@ -295,6 +295,14 @@
\
product(uintx, G1FixedSurvivorSpaceSize, 0, \
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size")
"otherwise SurvivorRatio is used to determine the size") \
\
experimental(bool, G1EnableParallelRSetUpdating, false, \
"Enables the parallelization of remembered set updating " \
"during evacuation pauses") \
\
experimental(bool, G1EnableParallelRSetScanning, false, \
"Enables the parallelization of remembered set scanning " \
"during evacuation pauses")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
......@@ -508,7 +508,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
typedef PosParPRT* PosParPRTPtr;
if (_max_fine_entries == 0) {
assert(_mod_max_fine_entries_mask == 0, "Both or none.");
_max_fine_entries = (1 << G1LogRSRegionEntries);
_max_fine_entries = (size_t)(1 << G1LogRSRegionEntries);
_mod_max_fine_entries_mask = _max_fine_entries - 1;
#if SAMPLE_FOR_EVICTION
assert(_fine_eviction_sample_size == 0
......
......@@ -4598,6 +4598,7 @@ vm_version_<arch>.cpp vm_version_<arch>.hpp
vm_version_<arch>.hpp globals_extension.hpp
vm_version_<arch>.hpp vm_version.hpp
vm_version_<os_arch>.cpp os.hpp
vm_version_<os_arch>.cpp vm_version_<arch>.hpp
vmreg.cpp assembler.hpp
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部