提交 7b5c803a 编写于 作者: K kvn

Merge

...@@ -48,6 +48,8 @@ if [ "$OS" = "Linux" ]; then ...@@ -48,6 +48,8 @@ if [ "$OS" = "Linux" ]; then
CPU=i386 CPU=i386
fi fi
else else
LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
export LD_AUDIT_32
SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p` SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p`
OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
CPU=sparc CPU=sparc
......
...@@ -43,6 +43,8 @@ else ...@@ -43,6 +43,8 @@ else
fi fi
fi fi
LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
export LD_AUDIT_64
SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU
OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
......
...@@ -56,24 +56,28 @@ i386:: javahomecheck ...@@ -56,24 +56,28 @@ i386:: javahomecheck
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
amd64:: javahomecheck amd64:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=amd64 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=amd64 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=amd64 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
sparc:: javahomecheck sparc:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=v8 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=v8 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=v8 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
sparcv9:: javahomecheck sparcv9:: javahomecheck
$(MKDIRS) $@ $(MKDIRS) $@
@javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
CC -G -KPIC -xarch=v9 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ CC -G -KPIC -xarch=v9 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \
-M mapfile -o $@/libsaproc.so -ldemangle -M mapfile -o $@/libsaproc.so -ldemangle
CC -xarch=v9 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc
clean:: clean::
$(RM) -rf sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h $(RM) -rf sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h
......
...@@ -45,6 +45,8 @@ SUNWprivate_1.1 { ...@@ -45,6 +45,8 @@ SUNWprivate_1.1 {
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_resume0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_resume0;
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_suspend0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_suspend0;
Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_writeBytesToProcess0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_writeBytesToProcess0;
# this is needed by saproc_audit.c to redirect opens in libproc.so
libsaproc_open;
local: local:
*; *;
}; };
...@@ -214,50 +214,59 @@ static void init_alt_root() { ...@@ -214,50 +214,59 @@ static void init_alt_root() {
} }
} }
static int find_file_hook(const char * name, int elf_checksum) { // This function is a complete substitute for the open system call
init_alt_root(); // since it's also used to override open calls from libproc to
// implement as a pathmap style facility for the SA. If libproc
if (_libsaproc_debug) { // starts using other interfaces then this might have to extended to
printf("libsaproc DEBUG: find_file_hook %s 0x%x\n", name, elf_checksum); // cover other calls.
} extern "C" int libsaproc_open(const char * name, int oflag, ...) {
if (oflag == O_RDONLY) {
if (alt_root_len > 0) { init_alt_root();
int fd = -1;
char alt_path[PATH_MAX+1]; if (_libsaproc_debug) {
printf("libsaproc DEBUG: libsaproc_open %s\n", name);
strcpy(alt_path, alt_root);
strcat(alt_path, name);
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
if (_libsaproc_debug) {
printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path);
}
return fd;
} }
if (strrchr(name, '/')) { if (alt_root_len > 0) {
int fd = -1;
char alt_path[PATH_MAX+1];
strcpy(alt_path, alt_root); strcpy(alt_path, alt_root);
strcat(alt_path, strrchr(name, '/')); strcat(alt_path, name);
fd = open(alt_path, O_RDONLY); fd = open(alt_path, O_RDONLY);
if (fd >= 0) { if (fd >= 0) {
if (_libsaproc_debug) { if (_libsaproc_debug) {
printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path); printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path);
} }
return fd; return fd;
} }
if (strrchr(name, '/')) {
strcpy(alt_path, alt_root);
strcat(alt_path, strrchr(name, '/'));
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
if (_libsaproc_debug) {
printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path);
}
return fd;
}
}
} }
} }
return -1;
}
static int pathmap_open(const char* name) { {
int fd = open(name, O_RDONLY); mode_t mode;
if (fd < 0) { va_list ap;
fd = find_file_hook(name, 0); va_start(ap, oflag);
mode = va_arg(ap, mode_t);
va_end(ap);
return open(name, oflag, mode);
} }
return fd;
} }
static void * pathmap_dlopen(const char * name, int mode) { static void * pathmap_dlopen(const char * name, int mode) {
init_alt_root(); init_alt_root();
...@@ -608,7 +617,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name ...@@ -608,7 +617,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
print_debug("looking for %s\n", classes_jsa); print_debug("looking for %s\n", classes_jsa);
// open the classes[_g].jsa // open the classes[_g].jsa
int fd = pathmap_open(classes_jsa); int fd = libsaproc_open(classes_jsa, O_RDONLY);
if (fd < 0) { if (fd < 0) {
char errMsg[ERR_MSG_SIZE]; char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "can't open shared archive file %s", classes_jsa); sprintf(errMsg, "can't open shared archive file %s", classes_jsa);
...@@ -1209,8 +1218,6 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d ...@@ -1209,8 +1218,6 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d
return res; return res;
} }
typedef int (*find_file_hook_t)(const char *, int elf_checksum);
/* /*
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
* Method: initIDs * Method: initIDs
...@@ -1230,16 +1237,6 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init ...@@ -1230,16 +1237,6 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init
if (libproc_handle == 0) if (libproc_handle == 0)
THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!"); THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
// If possible, set shared object find file hook.
void (*set_hook)(find_file_hook_t) = (void(*)(find_file_hook_t))dlsym(libproc_handle, "Pset_find_file_hook");
if (set_hook) {
// we found find file hook symbol, set up our hook function.
set_hook(find_file_hook);
} else if (getenv(SA_ALTROOT)) {
printf("libsaproc WARNING: %s set, but can't set file hook. " \
"Did you use right version of libproc.so?\n", SA_ALTROOT);
}
p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J"); p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
CHECK_EXCEPTION; CHECK_EXCEPTION;
......
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <varargs.h>
// This class sets up an interposer on open calls from libproc.so to
// support a pathmap facility in the SA.
static uintptr_t* libproc_cookie;
static uintptr_t* libc_cookie;
static uintptr_t* libsaproc_cookie;
uint_t
la_version(uint_t version)
{
return (LAV_CURRENT);
}
uint_t
la_objopen(Link_map * lmp, Lmid_t lmid, uintptr_t * cookie)
{
if (strstr(lmp->l_name, "/libproc.so") != NULL) {
libproc_cookie = cookie;
return LA_FLG_BINDFROM;
}
if (strstr(lmp->l_name, "/libc.so") != NULL) {
libc_cookie = cookie;
return LA_FLG_BINDTO;
}
if (strstr(lmp->l_name, "/libsaproc.so") != NULL) {
libsaproc_cookie = cookie;
return LA_FLG_BINDTO | LA_FLG_BINDFROM;
}
return 0;
}
#if defined(_LP64)
uintptr_t
la_symbind64(Elf64_Sym *symp, uint_t symndx, uintptr_t *refcook,
uintptr_t *defcook, uint_t *sb_flags, const char *sym_name)
#else
uintptr_t
la_symbind32(Elf32_Sym *symp, uint_t symndx, uintptr_t *refcook,
uintptr_t *defcook, uint_t *sb_flags)
#endif
{
#if !defined(_LP64)
const char *sym_name = (const char *)symp->st_name;
#endif
if (strcmp(sym_name, "open") == 0 && refcook == libproc_cookie) {
// redirect all open calls from libproc.so through libsaproc_open which will
// try the alternate library locations first.
void* handle = dlmopen(LM_ID_BASE, "libsaproc.so", RTLD_NOLOAD);
if (handle == NULL) {
fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc.so during linking\n");
} else {
uintptr_t libsaproc_open = (uintptr_t)dlsym(handle, "libsaproc_open");
if (libsaproc_open == 0) {
fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc_open during linking\n");
} else {
return libsaproc_open;
}
}
}
return symp->st_value;
}
...@@ -81,8 +81,4 @@ public class DebugInfoReadStream extends CompressedReadStream { ...@@ -81,8 +81,4 @@ public class DebugInfoReadStream extends CompressedReadStream {
Assert.that(false, "should not reach here"); Assert.that(false, "should not reach here");
return null; return null;
} }
public int readBCI() {
return readInt() + InvocationEntryBCI;
}
} }
...@@ -82,6 +82,7 @@ public class PCDesc extends VMObject { ...@@ -82,6 +82,7 @@ public class PCDesc extends VMObject {
tty.print(" "); tty.print(" ");
sd.getMethod().printValueOn(tty); sd.getMethod().printValueOn(tty);
tty.print(" @" + sd.getBCI()); tty.print(" @" + sd.getBCI());
tty.print(" reexecute=" + sd.getReexecute());
tty.println(); tty.println();
} }
} }
......
...@@ -41,6 +41,7 @@ public class ScopeDesc { ...@@ -41,6 +41,7 @@ public class ScopeDesc {
private NMethod code; private NMethod code;
private Method method; private Method method;
private int bci; private int bci;
private boolean reexecute;
/** Decoding offsets */ /** Decoding offsets */
private int decodeOffset; private int decodeOffset;
private int senderDecodeOffset; private int senderDecodeOffset;
...@@ -61,7 +62,7 @@ public class ScopeDesc { ...@@ -61,7 +62,7 @@ public class ScopeDesc {
senderDecodeOffset = stream.readInt(); senderDecodeOffset = stream.readInt();
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
bci = stream.readBCI(); setBCIAndReexecute(stream.readInt());
// Decode offsets for body and sender // Decode offsets for body and sender
localsDecodeOffset = stream.readInt(); localsDecodeOffset = stream.readInt();
expressionsDecodeOffset = stream.readInt(); expressionsDecodeOffset = stream.readInt();
...@@ -78,7 +79,7 @@ public class ScopeDesc { ...@@ -78,7 +79,7 @@ public class ScopeDesc {
senderDecodeOffset = stream.readInt(); senderDecodeOffset = stream.readInt();
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
bci = stream.readBCI(); setBCIAndReexecute(stream.readInt());
// Decode offsets for body and sender // Decode offsets for body and sender
localsDecodeOffset = stream.readInt(); localsDecodeOffset = stream.readInt();
expressionsDecodeOffset = stream.readInt(); expressionsDecodeOffset = stream.readInt();
...@@ -88,6 +89,7 @@ public class ScopeDesc { ...@@ -88,6 +89,7 @@ public class ScopeDesc {
public NMethod getNMethod() { return code; } public NMethod getNMethod() { return code; }
public Method getMethod() { return method; } public Method getMethod() { return method; }
public int getBCI() { return bci; } public int getBCI() { return bci; }
public boolean getReexecute() {return reexecute;}
/** Returns a List&lt;ScopeValue&gt; */ /** Returns a List&lt;ScopeValue&gt; */
public List getLocals() { public List getLocals() {
...@@ -150,6 +152,7 @@ public class ScopeDesc { ...@@ -150,6 +152,7 @@ public class ScopeDesc {
tty.print("ScopeDesc for "); tty.print("ScopeDesc for ");
method.printValueOn(tty); method.printValueOn(tty);
tty.println(" @bci " + bci); tty.println(" @bci " + bci);
tty.println(" reexecute: " + reexecute);
} }
// FIXME: add more accessors // FIXME: add more accessors
...@@ -157,6 +160,11 @@ public class ScopeDesc { ...@@ -157,6 +160,11 @@ public class ScopeDesc {
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// Internals only below this point // Internals only below this point
// //
private void setBCIAndReexecute(int combination) {
int InvocationEntryBci = VM.getVM().getInvocationEntryBCI();
bci = (combination >> 1) + InvocationEntryBci;
reexecute = (combination & 1)==1 ? true : false;
}
private DebugInfoReadStream streamAt(int decodeOffset) { private DebugInfoReadStream streamAt(int decodeOffset) {
return new DebugInfoReadStream(code, decodeOffset, objects); return new DebugInfoReadStream(code, decodeOffset, objects);
......
...@@ -8335,15 +8335,13 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) { ...@@ -8335,15 +8335,13 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_base() == NULL) { if (Universe::narrow_oop_shift() != 0) {
if (Universe::narrow_oop_shift() != 0) { assert (Address::times_8 == LogMinObjAlignmentInBytes &&
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes); // Don't use Shift since it modifies flags.
}
} else {
assert (Address::times_8 == LogMinObjAlignmentInBytes &&
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
} else {
assert (Universe::narrow_oop_base() == NULL, "sanity");
} }
} }
...@@ -8358,6 +8356,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { ...@@ -8358,6 +8356,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
} else if (dst != src) { } else if (dst != src) {
assert (Universe::narrow_oop_base() == NULL, "sanity");
movq(dst, src); movq(dst, src);
} }
} }
......
...@@ -1643,7 +1643,8 @@ inline hrtime_t oldgetTimeNanos() { ...@@ -1643,7 +1643,8 @@ inline hrtime_t oldgetTimeNanos() {
inline hrtime_t getTimeNanos() { inline hrtime_t getTimeNanos() {
if (VM_Version::supports_cx8()) { if (VM_Version::supports_cx8()) {
const hrtime_t now = gethrtime(); const hrtime_t now = gethrtime();
const hrtime_t prev = max_hrtime; // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
if (now <= prev) return prev; // same or retrograde time; if (now <= prev) return prev; // same or retrograde time;
const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
assert(obsv >= prev, "invariant"); // Monotonicity assert(obsv >= prev, "invariant"); // Monotonicity
......
...@@ -46,6 +46,8 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); ...@@ -46,6 +46,8 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest);
inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE
inline jint Atomic::add (jint add_value, volatile jint* dest) { inline jint Atomic::add (jint add_value, volatile jint* dest) {
......
...@@ -99,6 +99,8 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* ...@@ -99,6 +99,8 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP()); return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP());
} }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
...@@ -131,6 +133,15 @@ inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* ...@@ -131,6 +133,15 @@ inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
} }
extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst);
inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest;
_Atomic_load_long(src, &dest);
return dest;
}
#endif // AMD64 #endif // AMD64
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE
......
...@@ -97,6 +97,15 @@ ...@@ -97,6 +97,15 @@
popl %ebx popl %ebx
.end .end
// Support for void Atomic::load(volatile jlong* src, volatile jlong* dest).
.inline _Atomic_load_long,2
movl 0(%esp), %eax // src
fildll (%eax)
movl 4(%esp), %eax // dest
fistpll (%eax)
.end
// Support for OrderAccess::acquire() // Support for OrderAccess::acquire()
.inline _OrderAccess_acquire,0 .inline _OrderAccess_acquire,0
movl 0(%esp), %eax movl 0(%esp), %eax
......
...@@ -208,6 +208,15 @@ int IRScope::top_scope_bci() const { ...@@ -208,6 +208,15 @@ int IRScope::top_scope_bci() const {
return scope->caller_bci(); return scope->caller_bci();
} }
bool IRScopeDebugInfo::should_reexecute() {
ciMethod* cur_method = scope()->method();
int cur_bci = bci();
if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code);
} else
return false;
}
// Implementation of CodeEmitInfo // Implementation of CodeEmitInfo
...@@ -253,7 +262,7 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only) ...@@ -253,7 +262,7 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes // record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset); _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/);
recorder->end_safepoint(pc_offset); recorder->end_safepoint(pc_offset);
} }
......
...@@ -239,15 +239,20 @@ class IRScopeDebugInfo: public CompilationResourceObj { ...@@ -239,15 +239,20 @@ class IRScopeDebugInfo: public CompilationResourceObj {
GrowableArray<MonitorValue*>* monitors() { return _monitors; } GrowableArray<MonitorValue*>* monitors() { return _monitors; }
IRScopeDebugInfo* caller() { return _caller; } IRScopeDebugInfo* caller() { return _caller; }
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { //Whether we should reexecute this bytecode for deopt
bool should_reexecute();
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) {
if (caller() != NULL) { if (caller() != NULL) {
// Order is significant: Must record caller first. // Order is significant: Must record caller first.
caller()->record_debug_info(recorder, pc_offset); caller()->record_debug_info(recorder, pc_offset, false/*topmost*/);
} }
DebugToken* locvals = recorder->create_scope_values(locals()); DebugToken* locvals = recorder->create_scope_values(locals());
DebugToken* expvals = recorder->create_scope_values(expressions()); DebugToken* expvals = recorder->create_scope_values(expressions());
DebugToken* monvals = recorder->create_monitor_values(monitors()); DebugToken* monvals = recorder->create_monitor_values(monitors());
recorder->describe_scope(pc_offset, scope()->method(), bci(), locvals, expvals, monvals); // reexecute allowed only for the topmost frame
bool reexecute = topmost ? should_reexecute() : false;
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals);
} }
}; };
......
...@@ -379,7 +379,8 @@ void LIR_Assembler::record_non_safepoint_debug_info() { ...@@ -379,7 +379,8 @@ void LIR_Assembler::record_non_safepoint_debug_info() {
ValueStack* s = nth_oldest(vstack, n, s_bci); ValueStack* s = nth_oldest(vstack, n, s_bci);
if (s == NULL) break; if (s == NULL) break;
IRScope* scope = s->scope(); IRScope* scope = s->scope();
debug_info->describe_scope(pc_offset, scope->method(), s_bci); //Always pass false for reexecute since these ScopeDescs are never used for deopt
debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/);
} }
debug_info->end_non_safepoint(pc_offset); debug_info->end_non_safepoint(pc_offset);
......
...@@ -1229,10 +1229,13 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) { ...@@ -1229,10 +1229,13 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
// Compiled java method case. // Compiled java method case.
if (decode_offset != 0) { if (decode_offset != 0) {
bool dummy_reexecute = false;
DebugInfoReadStream stream(nm, decode_offset); DebugInfoReadStream stream(nm, decode_offset);
decode_offset = stream.read_int(); decode_offset = stream.read_int();
method = (methodOop)nm->oop_at(stream.read_int()); method = (methodOop)nm->oop_at(stream.read_int());
bci = stream.read_bci(); //fill_in_stack_trace does not need the reexecute information which is designed
//for the deopt to reexecute
bci = stream.read_bci_and_reexecute(dummy_reexecute);
} else { } else {
if (fr.is_first_frame()) break; if (fr.is_first_frame()) break;
address pc = fr.pc(); address pc = fr.pc();
......
...@@ -255,7 +255,8 @@ class DebugInfoReadStream : public CompressedReadStream { ...@@ -255,7 +255,8 @@ class DebugInfoReadStream : public CompressedReadStream {
ScopeValue* read_object_value(); ScopeValue* read_object_value();
ScopeValue* get_cached_object(); ScopeValue* get_cached_object();
// BCI encoding is mostly unsigned, but -1 is a distinguished value // BCI encoding is mostly unsigned, but -1 is a distinguished value
int read_bci() { return read_int() + InvocationEntryBci; } // Decoding based on encoding: bci = InvocationEntryBci + read_int()/2; reexecute = read_int()%2 == 1 ? true : false;
int read_bci_and_reexecute(bool& reexecute) { int i = read_int(); reexecute = (i & 1) ? true : false; return (i >> 1) + InvocationEntryBci; }
}; };
// DebugInfoWriteStream specializes CompressedWriteStream for // DebugInfoWriteStream specializes CompressedWriteStream for
...@@ -268,5 +269,6 @@ class DebugInfoWriteStream : public CompressedWriteStream { ...@@ -268,5 +269,6 @@ class DebugInfoWriteStream : public CompressedWriteStream {
public: public:
DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size); DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size);
void write_handle(jobject h); void write_handle(jobject h);
void write_bci(int bci) { write_int(bci - InvocationEntryBci); } //Encoding bci and reexecute into one word as (bci - InvocationEntryBci)*2 + reexecute
void write_bci_and_reexecute(int bci, bool reexecute) { write_int(((bci - InvocationEntryBci) << 1) + (reexecute ? 1 : 0)); }
}; };
...@@ -280,6 +280,7 @@ int DebugInformationRecorder::find_sharable_decode_offset(int stream_offset) { ...@@ -280,6 +280,7 @@ int DebugInformationRecorder::find_sharable_decode_offset(int stream_offset) {
void DebugInformationRecorder::describe_scope(int pc_offset, void DebugInformationRecorder::describe_scope(int pc_offset,
ciMethod* method, ciMethod* method,
int bci, int bci,
bool reexecute,
DebugToken* locals, DebugToken* locals,
DebugToken* expressions, DebugToken* expressions,
DebugToken* monitors) { DebugToken* monitors) {
...@@ -297,7 +298,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset, ...@@ -297,7 +298,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
// serialize scope // serialize scope
jobject method_enc = (method == NULL)? NULL: method->encoding(); jobject method_enc = (method == NULL)? NULL: method->encoding();
stream()->write_int(oop_recorder()->find_index(method_enc)); stream()->write_int(oop_recorder()->find_index(method_enc));
stream()->write_bci(bci); stream()->write_bci_and_reexecute(bci, reexecute);
assert(method == NULL || assert(method == NULL ||
(method->is_native() && bci == 0) || (method->is_native() && bci == 0) ||
(!method->is_native() && 0 <= bci && bci < method->code_size()) || (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
......
...@@ -87,6 +87,7 @@ class DebugInformationRecorder: public ResourceObj { ...@@ -87,6 +87,7 @@ class DebugInformationRecorder: public ResourceObj {
void describe_scope(int pc_offset, void describe_scope(int pc_offset,
ciMethod* method, ciMethod* method,
int bci, int bci,
bool reexecute,
DebugToken* locals = NULL, DebugToken* locals = NULL,
DebugToken* expressions = NULL, DebugToken* expressions = NULL,
DebugToken* monitors = NULL); DebugToken* monitors = NULL);
......
...@@ -46,6 +46,7 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) { ...@@ -46,6 +46,7 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
_decode_offset = parent->_sender_decode_offset; _decode_offset = parent->_sender_decode_offset;
_objects = parent->_objects; _objects = parent->_objects;
decode_body(); decode_body();
assert(_reexecute == false, "reexecute not allowed");
} }
...@@ -56,6 +57,7 @@ void ScopeDesc::decode_body() { ...@@ -56,6 +57,7 @@ void ScopeDesc::decode_body() {
_sender_decode_offset = DebugInformationRecorder::serialized_null; _sender_decode_offset = DebugInformationRecorder::serialized_null;
_method = methodHandle(_code->method()); _method = methodHandle(_code->method());
_bci = InvocationEntryBci; _bci = InvocationEntryBci;
_reexecute = false;
_locals_decode_offset = DebugInformationRecorder::serialized_null; _locals_decode_offset = DebugInformationRecorder::serialized_null;
_expressions_decode_offset = DebugInformationRecorder::serialized_null; _expressions_decode_offset = DebugInformationRecorder::serialized_null;
_monitors_decode_offset = DebugInformationRecorder::serialized_null; _monitors_decode_offset = DebugInformationRecorder::serialized_null;
...@@ -65,7 +67,8 @@ void ScopeDesc::decode_body() { ...@@ -65,7 +67,8 @@ void ScopeDesc::decode_body() {
_sender_decode_offset = stream->read_int(); _sender_decode_offset = stream->read_int();
_method = methodHandle((methodOop) stream->read_oop()); _method = methodHandle((methodOop) stream->read_oop());
_bci = stream->read_bci(); _bci = stream->read_bci_and_reexecute(_reexecute);
// decode offsets for body and sender // decode offsets for body and sender
_locals_decode_offset = stream->read_int(); _locals_decode_offset = stream->read_int();
_expressions_decode_offset = stream->read_int(); _expressions_decode_offset = stream->read_int();
...@@ -170,6 +173,7 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const { ...@@ -170,6 +173,7 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin()); st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin());
st->print_cr(" offset: %d", _decode_offset); st->print_cr(" offset: %d", _decode_offset);
st->print_cr(" bci: %d", bci()); st->print_cr(" bci: %d", bci());
st->print_cr(" reexecute: %s", should_reexecute() ? "true" : "false");
st->print_cr(" locals: %d", _locals_decode_offset); st->print_cr(" locals: %d", _locals_decode_offset);
st->print_cr(" stack: %d", _expressions_decode_offset); st->print_cr(" stack: %d", _expressions_decode_offset);
st->print_cr(" monitor: %d", _monitors_decode_offset); st->print_cr(" monitor: %d", _monitors_decode_offset);
......
...@@ -39,7 +39,8 @@ class SimpleScopeDesc : public StackObj { ...@@ -39,7 +39,8 @@ class SimpleScopeDesc : public StackObj {
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset()); DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
int ignore_sender = buffer.read_int(); int ignore_sender = buffer.read_int();
_method = methodOop(buffer.read_oop()); _method = methodOop(buffer.read_oop());
_bci = buffer.read_bci(); bool dummy_reexecute; //only methodOop and bci are needed!
_bci = buffer.read_bci_and_reexecute(dummy_reexecute);
} }
methodOop method() { return _method; } methodOop method() { return _method; }
...@@ -60,8 +61,9 @@ class ScopeDesc : public ResourceObj { ...@@ -60,8 +61,9 @@ class ScopeDesc : public ResourceObj {
ScopeDesc(const nmethod* code, int decode_offset); ScopeDesc(const nmethod* code, int decode_offset);
// JVM state // JVM state
methodHandle method() const { return _method; } methodHandle method() const { return _method; }
int bci() const { return _bci; } int bci() const { return _bci; }
bool should_reexecute() const { return _reexecute; }
GrowableArray<ScopeValue*>* locals(); GrowableArray<ScopeValue*>* locals();
GrowableArray<ScopeValue*>* expressions(); GrowableArray<ScopeValue*>* expressions();
...@@ -86,6 +88,7 @@ class ScopeDesc : public ResourceObj { ...@@ -86,6 +88,7 @@ class ScopeDesc : public ResourceObj {
// JVM state // JVM state
methodHandle _method; methodHandle _method;
int _bci; int _bci;
bool _reexecute;
// Decoding offsets // Decoding offsets
int _decode_offset; int _decode_offset;
......
...@@ -122,11 +122,15 @@ class AbstractInterpreter: AllStatic { ...@@ -122,11 +122,15 @@ class AbstractInterpreter: AllStatic {
static int size_top_interpreter_activation(methodOop method); static int size_top_interpreter_activation(methodOop method);
// Deoptimization support // Deoptimization support
static address continuation_for(methodOop method, // Compute the entry address for continuation after
address bcp, static address deopt_continue_after_entry(methodOop method,
int callee_parameters, address bcp,
bool is_top_frame, int callee_parameters,
bool& use_next_mdp); bool is_top_frame);
// Compute the entry address for reexecution
static address deopt_reexecute_entry(methodOop method, address bcp);
// Deoptimization should reexecute this bytecode
static bool bytecode_should_reexecute(Bytecodes::Code code);
// share implementation of size_activation and layout_activation: // share implementation of size_activation and layout_activation:
static int size_activation(methodOop method, static int size_activation(methodOop method,
......
...@@ -284,27 +284,75 @@ static BasicType constant_pool_type(methodOop method, int index) { ...@@ -284,27 +284,75 @@ static BasicType constant_pool_type(methodOop method, int index) {
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Deoptimization support // Deoptimization support
// If deoptimization happens, this method returns the point where to continue in // If deoptimization happens, this function returns the point of next bytecode to continue execution
// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
// bci and the top of stack is in eax/edx/FPU tos.
// For putfield/getfield, put/getstatic, the continuation is at the same
// bci and the TOS is on stack.
// Note: deopt_entry(type, 0) means reexecute bytecode
// deopt_entry(type, length) means continue at next bytecode
address AbstractInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) {
assert(method->contains(bcp), "just checkin'"); assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp); Bytecodes::Code code = Bytecodes::java_code_at(bcp);
assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
int bci = method->bci_from(bcp); int bci = method->bci_from(bcp);
int length = -1; // initial value for debugging int length = -1; // initial value for debugging
// compute continuation length // compute continuation length
length = Bytecodes::length_at(bcp); length = Bytecodes::length_at(bcp);
// compute result type // compute result type
BasicType type = T_ILLEGAL; BasicType type = T_ILLEGAL;
// when continuing after a compiler safepoint, re-execute the bytecode
// (an invoke is continued after the safepoint) switch (code) {
use_next_mdp = true; case Bytecodes::_invokevirtual :
case Bytecodes::_invokespecial :
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface: {
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh(thread, method);
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
// since the cache entry might not be initialized:
// (NOT needed for the old calling convension)
if (!is_top_frame) {
int index = Bytes::get_native_u2(bcp+1);
method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
}
break;
}
case Bytecodes::_ldc :
type = constant_pool_type( method, *(bcp+1) );
break;
case Bytecodes::_ldc_w : // fall through
case Bytecodes::_ldc2_w:
type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) );
break;
default:
type = Bytecodes::result_type(code);
break;
}
// return entry point for computed continuation state & bytecode length
return
is_top_frame
? Interpreter::deopt_entry (as_TosState(type), length)
: Interpreter::return_entry(as_TosState(type), length);
}
// If deoptimization happens, this function returns the point where the interpreter reexecutes
// the bytecode.
// Note: Bytecodes::_athrow is a special case in that it does not return
// Interpreter::deopt_entry(vtos, 0) like others
address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
#ifdef COMPILER1
if(code == Bytecodes::_athrow ) {
return Interpreter::rethrow_exception_entry();
}
#endif /* COMPILER1 */
return Interpreter::deopt_entry(vtos, 0);
}
// If deoptimization happens, the interpreter should reexecute these bytecodes.
// This function mainly helps the compilers to set up the reexecute bit.
bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
switch (code) { switch (code) {
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch: case Bytecodes::_tableswitch:
...@@ -340,56 +388,15 @@ address AbstractInterpreter::continuation_for(methodOop method, address bcp, int ...@@ -340,56 +388,15 @@ address AbstractInterpreter::continuation_for(methodOop method, address bcp, int
case Bytecodes::_getstatic : case Bytecodes::_getstatic :
case Bytecodes::_putstatic : case Bytecodes::_putstatic :
case Bytecodes::_aastore : case Bytecodes::_aastore :
// reexecute the operation and TOS value is on stack
assert(is_top_frame, "must be top frame");
use_next_mdp = false;
return Interpreter::deopt_entry(vtos, 0);
break;
#ifdef COMPILER1 #ifdef COMPILER1
//special case of reexecution
case Bytecodes::_athrow : case Bytecodes::_athrow :
assert(is_top_frame, "must be top frame"); #endif
use_next_mdp = false; return true;
return Interpreter::rethrow_exception_entry();
break;
#endif /* COMPILER1 */
case Bytecodes::_invokevirtual :
case Bytecodes::_invokespecial :
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface: {
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh(thread, method);
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
// since the cache entry might not be initialized:
// (NOT needed for the old calling convension)
if (!is_top_frame) {
int index = Bytes::get_native_u2(bcp+1);
method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
}
break;
}
case Bytecodes::_ldc :
type = constant_pool_type( method, *(bcp+1) );
break;
case Bytecodes::_ldc_w : // fall through
case Bytecodes::_ldc2_w:
type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) );
break;
default: default:
type = Bytecodes::result_type(code); return false;
break;
} }
// return entry point for computed continuation state & bytecode length
return
is_top_frame
? Interpreter::deopt_entry (as_TosState(type), length)
: Interpreter::return_entry(as_TosState(type), length);
} }
void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
......
...@@ -605,28 +605,41 @@ void TemplateInterpreter::ignore_safepoints() { ...@@ -605,28 +605,41 @@ void TemplateInterpreter::ignore_safepoints() {
} }
} }
// If deoptimization happens, this method returns the point where to continue in //------------------------------------------------------------------------------------------------------------------------
// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next // Deoptimization support
// bci and the top of stack is in eax/edx/FPU tos.
// For putfield/getfield, put/getstatic, the continuation is at the same
// bci and the TOS is on stack.
// Note: deopt_entry(type, 0) means reexecute bytecode // If deoptimization happens, this function returns the point of next bytecode to continue execution
// deopt_entry(type, length) means continue at next bytecode address TemplateInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
return AbstractInterpreter::deopt_continue_after_entry(method, bcp, callee_parameters, is_top_frame);
}
address TemplateInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) { // If deoptimization happens, this function returns the point where the interpreter reexecutes
// the bytecode.
// Note: Bytecodes::_athrow (C1 only) and Bytecodes::_return are the special cases
// that do not return "Interpreter::deopt_entry(vtos, 0)"
address TemplateInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
assert(method->contains(bcp), "just checkin'"); assert(method->contains(bcp), "just checkin'");
Bytecodes::Code code = Bytecodes::java_code_at(bcp); Bytecodes::Code code = Bytecodes::java_code_at(bcp);
if (code == Bytecodes::_return) { if (code == Bytecodes::_return) {
// This is used for deopt during registration of finalizers // This is used for deopt during registration of finalizers
// during Object.<init>. We simply need to resume execution at // during Object.<init>. We simply need to resume execution at
// the standard return vtos bytecode to pop the frame normally. // the standard return vtos bytecode to pop the frame normally.
// reexecuting the real bytecode would cause double registration // reexecuting the real bytecode would cause double registration
// of the finalizable object. // of the finalizable object.
assert(is_top_frame, "must be on top"); return _normal_table.entry(Bytecodes::_return).entry(vtos);
return _normal_table.entry(Bytecodes::_return).entry(vtos); } else {
return AbstractInterpreter::deopt_reexecute_entry(method, bcp);
}
}
// If deoptimization happens, the interpreter should reexecute this bytecode.
// This function mainly helps the compilers to set up the reexecute bit.
bool TemplateInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
if (code == Bytecodes::_return) {
//Yes, we consider Bytecodes::_return as a special case of reexecution
return true;
} else { } else {
return AbstractInterpreter::continuation_for(method, bcp, callee_parameters, is_top_frame, use_next_mdp); return AbstractInterpreter::bytecode_should_reexecute(code);
} }
} }
......
...@@ -171,11 +171,15 @@ class TemplateInterpreter: public AbstractInterpreter { ...@@ -171,11 +171,15 @@ class TemplateInterpreter: public AbstractInterpreter {
static void ignore_safepoints(); // ignores safepoints static void ignore_safepoints(); // ignores safepoints
// Deoptimization support // Deoptimization support
static address continuation_for(methodOop method, // Compute the entry address for continuation after
address bcp, static address deopt_continue_after_entry(methodOop method,
int callee_parameters, address bcp,
bool is_top_frame, int callee_parameters,
bool& use_next_mdp); bool is_top_frame);
// Deoptimization should reexecute this bytecode
static bool bytecode_should_reexecute(Bytecodes::Code code);
// Compute the address for reexecution
static address deopt_reexecute_entry(methodOop method, address bcp);
#include "incls/_templateInterpreter_pd.hpp.incl" #include "incls/_templateInterpreter_pd.hpp.incl"
......
...@@ -51,7 +51,7 @@ void CompactingPermGenGen::serialize_oops(SerializeOopClosure* soc) { ...@@ -51,7 +51,7 @@ void CompactingPermGenGen::serialize_oops(SerializeOopClosure* soc) {
soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(constantPoolOopDesc)); soc->do_tag(sizeof(constantPoolOopDesc));
soc->do_tag(sizeof(constantPoolCacheOopDesc)); soc->do_tag(sizeof(constantPoolCacheOopDesc));
soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(symbolOopDesc)); soc->do_tag(sizeof(symbolOopDesc));
soc->do_tag(sizeof(klassOopDesc)); soc->do_tag(sizeof(klassOopDesc));
......
...@@ -38,6 +38,11 @@ class objArrayOopDesc : public arrayOopDesc { ...@@ -38,6 +38,11 @@ class objArrayOopDesc : public arrayOopDesc {
} }
public: public:
// Returns the offset of the first element.
static int base_offset_in_bytes() {
return arrayOopDesc::base_offset_in_bytes(T_OBJECT);
}
// base is the address following the header. // base is the address following the header.
HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); } HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); }
......
...@@ -910,7 +910,16 @@ void PhaseCFG::verify( ) const { ...@@ -910,7 +910,16 @@ void PhaseCFG::verify( ) const {
!(b->head()->is_Loop() && n->is_Phi()) && !(b->head()->is_Loop() && n->is_Phi()) &&
// See (+++) comment in reg_split.cpp // See (+++) comment in reg_split.cpp
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) { !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
assert( b->find_node(def) < j, "uses must follow definitions" ); bool is_loop = false;
if (n->is_Phi()) {
for( uint l = 1; l < def->req(); l++ ) {
if (n == def->in(l)) {
is_loop = true;
break; // Some kind of loop
}
}
}
assert( is_loop || b->find_node(def) < j, "uses must follow definitions" );
} }
if( def->is_SafePointScalarObject() ) { if( def->is_SafePointScalarObject() ) {
assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node"); assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node");
......
...@@ -37,6 +37,7 @@ InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* cal ...@@ -37,6 +37,7 @@ InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* cal
// Keep a private copy of the caller_jvms: // Keep a private copy of the caller_jvms:
_caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms()); _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms());
_caller_jvms->set_bci(caller_jvms->bci()); _caller_jvms->set_bci(caller_jvms->bci());
assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
} }
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter"); assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter");
......
...@@ -223,6 +223,7 @@ uint TailJumpNode::match_edge(uint idx) const { ...@@ -223,6 +223,7 @@ uint TailJumpNode::match_edge(uint idx) const {
JVMState::JVMState(ciMethod* method, JVMState* caller) { JVMState::JVMState(ciMethod* method, JVMState* caller) {
assert(method != NULL, "must be valid call site"); assert(method != NULL, "must be valid call site");
_method = method; _method = method;
_reexecute = Reexecute_Undefined;
debug_only(_bci = -99); // random garbage value debug_only(_bci = -99); // random garbage value
debug_only(_map = (SafePointNode*)-1); debug_only(_map = (SafePointNode*)-1);
_caller = caller; _caller = caller;
...@@ -237,6 +238,7 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) { ...@@ -237,6 +238,7 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) {
JVMState::JVMState(int stack_size) { JVMState::JVMState(int stack_size) {
_method = NULL; _method = NULL;
_bci = InvocationEntryBci; _bci = InvocationEntryBci;
_reexecute = Reexecute_Undefined;
debug_only(_map = (SafePointNode*)-1); debug_only(_map = (SafePointNode*)-1);
_caller = NULL; _caller = NULL;
_depth = 1; _depth = 1;
...@@ -269,6 +271,7 @@ bool JVMState::same_calls_as(const JVMState* that) const { ...@@ -269,6 +271,7 @@ bool JVMState::same_calls_as(const JVMState* that) const {
if (p->_method != q->_method) return false; if (p->_method != q->_method) return false;
if (p->_method == NULL) return true; // bci is irrelevant if (p->_method == NULL) return true; // bci is irrelevant
if (p->_bci != q->_bci) return false; if (p->_bci != q->_bci) return false;
if (p->_reexecute != q->_reexecute) return false;
p = p->caller(); p = p->caller();
q = q->caller(); q = q->caller();
if (p == q) return true; if (p == q) return true;
...@@ -490,6 +493,7 @@ void JVMState::dump_spec(outputStream *st) const { ...@@ -490,6 +493,7 @@ void JVMState::dump_spec(outputStream *st) const {
if (!printed) if (!printed)
_method->print_short_name(st); _method->print_short_name(st);
st->print(" @ bci:%d",_bci); st->print(" @ bci:%d",_bci);
st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false");
} else { } else {
st->print(" runtime stub"); st->print(" runtime stub");
} }
...@@ -509,8 +513,8 @@ void JVMState::dump_on(outputStream* st) const { ...@@ -509,8 +513,8 @@ void JVMState::dump_on(outputStream* st) const {
} }
_map->dump(2); _map->dump(2);
} }
st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=", st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci()); depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
if (_method == NULL) { if (_method == NULL) {
st->print_cr("(none)"); st->print_cr("(none)");
} else { } else {
...@@ -537,6 +541,7 @@ void dump_jvms(JVMState* jvms) { ...@@ -537,6 +541,7 @@ void dump_jvms(JVMState* jvms) {
JVMState* JVMState::clone_shallow(Compile* C) const { JVMState* JVMState::clone_shallow(Compile* C) const {
JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
n->set_bci(_bci); n->set_bci(_bci);
n->_reexecute = _reexecute;
n->set_locoff(_locoff); n->set_locoff(_locoff);
n->set_stkoff(_stkoff); n->set_stkoff(_stkoff);
n->set_monoff(_monoff); n->set_monoff(_monoff);
......
...@@ -178,6 +178,13 @@ public: ...@@ -178,6 +178,13 @@ public:
// This provides a way to map the optimized program back into the interpreter, // This provides a way to map the optimized program back into the interpreter,
// or to let the GC mark the stack. // or to let the GC mark the stack.
class JVMState : public ResourceObj { class JVMState : public ResourceObj {
public:
typedef enum {
Reexecute_Undefined = -1, // not defined -- will be translated into false later
Reexecute_False = 0, // false -- do not reexecute
Reexecute_True = 1 // true -- reexecute the bytecode
} ReexecuteState; //Reexecute State
private: private:
JVMState* _caller; // List pointer for forming scope chains JVMState* _caller; // List pointer for forming scope chains
uint _depth; // One mroe than caller depth, or one. uint _depth; // One mroe than caller depth, or one.
...@@ -188,10 +195,12 @@ private: ...@@ -188,10 +195,12 @@ private:
uint _endoff; // Offset to end of input edge mapping uint _endoff; // Offset to end of input edge mapping
uint _sp; // Jave Expression Stack Pointer for this state uint _sp; // Jave Expression Stack Pointer for this state
int _bci; // Byte Code Index of this JVM point int _bci; // Byte Code Index of this JVM point
ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
ciMethod* _method; // Method Pointer ciMethod* _method; // Method Pointer
SafePointNode* _map; // Map node associated with this scope SafePointNode* _map; // Map node associated with this scope
public: public:
friend class Compile; friend class Compile;
friend class PreserveReexecuteState;
// Because JVMState objects live over the entire lifetime of the // Because JVMState objects live over the entire lifetime of the
// Compile object, they are allocated into the comp_arena, which // Compile object, they are allocated into the comp_arena, which
...@@ -222,16 +231,18 @@ public: ...@@ -222,16 +231,18 @@ public:
bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } bool is_mon(uint i) const { return i >= _monoff && i < _scloff; }
bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } bool is_scl(uint i) const { return i >= _scloff && i < _endoff; }
uint sp() const { return _sp; } uint sp() const { return _sp; }
int bci() const { return _bci; } int bci() const { return _bci; }
bool has_method() const { return _method != NULL; } bool should_reexecute() const { return _reexecute==Reexecute_True; }
ciMethod* method() const { assert(has_method(), ""); return _method; } bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
JVMState* caller() const { return _caller; } bool has_method() const { return _method != NULL; }
SafePointNode* map() const { return _map; } ciMethod* method() const { assert(has_method(), ""); return _method; }
uint depth() const { return _depth; } JVMState* caller() const { return _caller; }
uint debug_start() const; // returns locoff of root caller SafePointNode* map() const { return _map; }
uint debug_end() const; // returns endoff of self uint depth() const { return _depth; }
uint debug_size() const { uint debug_start() const; // returns locoff of root caller
uint debug_end() const; // returns endoff of self
uint debug_size() const {
return loc_size() + sp() + mon_size() + scl_size(); return loc_size() + sp() + mon_size() + scl_size();
} }
uint debug_depth() const; // returns sum of debug_size values at all depths uint debug_depth() const; // returns sum of debug_size values at all depths
...@@ -267,7 +278,9 @@ public: ...@@ -267,7 +278,9 @@ public:
} }
void set_map(SafePointNode *map) { _map = map; } void set_map(SafePointNode *map) { _map = map; }
void set_sp(uint sp) { _sp = sp; } void set_sp(uint sp) { _sp = sp; }
void set_bci(int bci) { _bci = bci; } // _reexecute is initialized to "undefined" for a new bci
void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
// Miscellaneous utility functions // Miscellaneous utility functions
JVMState* clone_deep(Compile* C) const; // recursively clones caller chain JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
......
...@@ -1792,15 +1792,12 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -1792,15 +1792,12 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (UseCompressedOops && can_reshape && progress == NULL) { if (UseCompressedOops && can_reshape && progress == NULL) {
bool may_push = true; bool may_push = true;
bool has_decodeN = false; bool has_decodeN = false;
Node* in_decodeN = NULL;
for (uint i=1; i<req(); ++i) {// For all paths in for (uint i=1; i<req(); ++i) {// For all paths in
Node *ii = in(i); Node *ii = in(i);
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) { if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
// Note: in_decodeN is used only to define the type of new phi. // Do optimization if a non dead path exist.
// Find a non dead path otherwise phi type will be wrong.
if (ii->in(1)->bottom_type() != Type::TOP) { if (ii->in(1)->bottom_type() != Type::TOP) {
has_decodeN = true; has_decodeN = true;
in_decodeN = ii->in(1);
} }
} else if (!ii->is_Phi()) { } else if (!ii->is_Phi()) {
may_push = false; may_push = false;
...@@ -1809,7 +1806,9 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -1809,7 +1806,9 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (has_decodeN && may_push) { if (has_decodeN && may_push) {
PhaseIterGVN *igvn = phase->is_IterGVN(); PhaseIterGVN *igvn = phase->is_IterGVN();
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN); // Make narrow type for new phi.
const Type* narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
PhiNode* new_phi = new (phase->C, r->req()) PhiNode(r, narrow_t);
uint orig_cnt = req(); uint orig_cnt = req();
for (uint i=1; i<req(); ++i) {// For all paths in for (uint i=1; i<req(); ++i) {// For all paths in
Node *ii = in(i); Node *ii = in(i);
...@@ -1822,7 +1821,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -1822,7 +1821,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (ii->as_Phi() == this) { if (ii->as_Phi() == this) {
new_ii = new_phi; new_ii = new_phi;
} else { } else {
new_ii = new (phase->C, 2) EncodePNode(ii, in_decodeN->bottom_type()); new_ii = new (phase->C, 2) EncodePNode(ii, narrow_t);
igvn->register_new_node_with_optimizer(new_ii); igvn->register_new_node_with_optimizer(new_ii);
} }
} }
......
...@@ -620,6 +620,16 @@ BuildCutout::~BuildCutout() { ...@@ -620,6 +620,16 @@ BuildCutout::~BuildCutout() {
assert(kit->stopped(), "cutout code must stop, throw, return, etc."); assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
} }
//---------------------------PreserveReexecuteState----------------------------
PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
_kit = kit;
_sp = kit->sp();
_reexecute = kit->jvms()->_reexecute;
}
PreserveReexecuteState::~PreserveReexecuteState() {
_kit->jvms()->_reexecute = _reexecute;
_kit->set_sp(_sp);
}
//------------------------------clone_map-------------------------------------- //------------------------------clone_map--------------------------------------
// Implementation of PreserveJVMState // Implementation of PreserveJVMState
...@@ -738,6 +748,18 @@ bool GraphKit::dead_locals_are_killed() { ...@@ -738,6 +748,18 @@ bool GraphKit::dead_locals_are_killed() {
#endif //ASSERT #endif //ASSERT
// Helper function for enforcing certain bytecodes to reexecute if
// deoptimization happens
static bool should_reexecute_implied_by_bytecode(JVMState *jvms) {
ciMethod* cur_method = jvms->method();
int cur_bci = jvms->bci();
if (cur_method != NULL && cur_bci != InvocationEntryBci) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code);
} else
return false;
}
// Helper function for adding JVMState and debug information to node // Helper function for adding JVMState and debug information to node
void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
// Add the safepoint edges to the call (or other safepoint). // Add the safepoint edges to the call (or other safepoint).
...@@ -781,6 +803,13 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { ...@@ -781,6 +803,13 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
JVMState* out_jvms = youngest_jvms->clone_deep(C); JVMState* out_jvms = youngest_jvms->clone_deep(C);
call->set_jvms(out_jvms); // Start jvms list for call node call->set_jvms(out_jvms); // Start jvms list for call node
// For a known set of bytecodes, the interpreter should reexecute them if
// deoptimization happens. We set the reexecute state for them here
if (out_jvms->is_reexecute_undefined() && //don't change if already specified
should_reexecute_implied_by_bytecode(out_jvms)) {
out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
}
// Presize the call: // Presize the call:
debug_only(uint non_debug_edges = call->req()); debug_only(uint non_debug_edges = call->req());
call->add_req_batch(top(), youngest_jvms->debug_depth()); call->add_req_batch(top(), youngest_jvms->debug_depth());
......
...@@ -763,3 +763,16 @@ class BuildCutout: public PreserveJVMState { ...@@ -763,3 +763,16 @@ class BuildCutout: public PreserveJVMState {
BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
~BuildCutout(); ~BuildCutout();
}; };
// Helper class to preserve the original _reexecute bit and _sp and restore
// them back
class PreserveReexecuteState: public StackObj {
protected:
GraphKit* _kit;
uint _sp;
JVMState::ReexecuteState _reexecute;
public:
PreserveReexecuteState(GraphKit* kit);
~PreserveReexecuteState();
};
...@@ -2064,7 +2064,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas ...@@ -2064,7 +2064,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// See if it is a narrow oop array. // See if it is a narrow oop array.
if (adr_type->isa_aryptr()) { if (adr_type->isa_aryptr()) {
if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) { if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr(); const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
if (elem_type != NULL) { if (elem_type != NULL) {
sharpened_klass = elem_type->klass(); sharpened_klass = elem_type->klass();
...@@ -3169,78 +3169,85 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { ...@@ -3169,78 +3169,85 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
Node* end = is_copyOfRange? argument(2): argument(1); Node* end = is_copyOfRange? argument(2): argument(1);
Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
_sp += nargs; // set original stack for use by uncommon_trap Node* newcopy;
array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
original = do_null_check(original, T_OBJECT);
_sp -= nargs;
// Check if a null path was taken unconditionally. //set the original stack and the reexecute bit for the interpreter to reexecute
if (stopped()) return true; //the bytecode that invokes Arrays.copyOf if deoptimization happens
{ PreserveReexecuteState preexecs(this);
_sp += nargs;
jvms()->set_should_reexecute(true);
Node* orig_length = load_array_length(original); array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
original = do_null_check(original, T_OBJECT);
Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nargs, // Check if a null path was taken unconditionally.
NULL, 0); if (stopped()) return true;
_sp += nargs; // set original stack for use by uncommon_trap
klass_node = do_null_check(klass_node, T_OBJECT);
_sp -= nargs;
RegionNode* bailout = new (C, 1) RegionNode(1); Node* orig_length = load_array_length(original);
record_for_igvn(bailout);
// Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0,
// Bail out if that is so. NULL, 0);
Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); klass_node = do_null_check(klass_node, T_OBJECT);
if (not_objArray != NULL) {
// Improve the klass node's type from the new optimistic assumption:
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
Node* cast = new (C, 2) CastPPNode(klass_node, akls);
cast->init_req(0, control());
klass_node = _gvn.transform(cast);
}
// Bail out if either start or end is negative. RegionNode* bailout = new (C, 1) RegionNode(1);
generate_negative_guard(start, bailout, &start); record_for_igvn(bailout);
generate_negative_guard(end, bailout, &end);
Node* length = end; // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
if (_gvn.type(start) != TypeInt::ZERO) { // Bail out if that is so.
length = _gvn.transform( new (C, 3) SubINode(end, start) ); Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
} if (not_objArray != NULL) {
// Improve the klass node's type from the new optimistic assumption:
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
Node* cast = new (C, 2) CastPPNode(klass_node, akls);
cast->init_req(0, control());
klass_node = _gvn.transform(cast);
}
// Bail out if length is negative. // Bail out if either start or end is negative.
// ...Not needed, since the new_array will throw the right exception. generate_negative_guard(start, bailout, &start);
//generate_negative_guard(length, bailout, &length); generate_negative_guard(end, bailout, &end);
if (bailout->req() > 1) { Node* length = end;
PreserveJVMState pjvms(this); if (_gvn.type(start) != TypeInt::ZERO) {
set_control( _gvn.transform(bailout) ); length = _gvn.transform( new (C, 3) SubINode(end, start) );
_sp += nargs; // push the arguments back on the stack }
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (!stopped()) { // Bail out if length is negative.
// How many elements will we copy from the original? // ...Not needed, since the new_array will throw the right exception.
// The answer is MinI(orig_length - start, length). //generate_negative_guard(length, bailout, &length);
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
const bool raw_mem_only = true; if (bailout->req() > 1) {
Node* newcopy = new_array(klass_node, length, nargs, raw_mem_only); PreserveJVMState pjvms(this);
set_control( _gvn.transform(bailout) );
// Generate a direct call to the right arraycopy function(s). uncommon_trap(Deoptimization::Reason_intrinsic,
// We know the copy is disjoint but we might not know if the Deoptimization::Action_maybe_recompile);
// oop stores need checking. }
// Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
// This will fail a store-check if x contains any non-nulls. if (!stopped()) {
bool disjoint_bases = true;
bool length_never_negative = true;
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
original, start, newcopy, intcon(0), moved,
disjoint_bases, length_never_negative);
// How many elements will we copy from the original?
// The answer is MinI(orig_length - start, length).
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
const bool raw_mem_only = true;
newcopy = new_array(klass_node, length, 0, raw_mem_only);
// Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the
// oop stores need checking.
// Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
// This will fail a store-check if x contains any non-nulls.
bool disjoint_bases = true;
bool length_never_negative = true;
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
original, start, newcopy, intcon(0), moved,
disjoint_bases, length_never_negative);
}
} //original reexecute and sp are set back here
if(!stopped()) {
push(newcopy); push(newcopy);
} }
...@@ -3992,146 +3999,159 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b ...@@ -3992,146 +3999,159 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
// //
bool LibraryCallKit::inline_native_clone(bool is_virtual) { bool LibraryCallKit::inline_native_clone(bool is_virtual) {
int nargs = 1; int nargs = 1;
Node* obj = null_check_receiver(callee()); PhiNode* result_val;
if (stopped()) return true;
Node* obj_klass = load_object_klass(obj); //set the original stack and the reexecute bit for the interpreter to reexecute
const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); //the bytecode that invokes Object.clone if deoptimization happens
const TypeOopPtr* toop = ((tklass != NULL) { PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
//null_check_receiver will adjust _sp (push and pop)
Node* obj = null_check_receiver(callee());
if (stopped()) return true;
_sp += nargs;
Node* obj_klass = load_object_klass(obj);
const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
const TypeOopPtr* toop = ((tklass != NULL)
? tklass->as_instance_type() ? tklass->as_instance_type()
: TypeInstPtr::NOTNULL); : TypeInstPtr::NOTNULL);
// Conservatively insert a memory barrier on all memory slices. // Conservatively insert a memory barrier on all memory slices.
// Do not let writes into the original float below the clone. // Do not let writes into the original float below the clone.
insert_mem_bar(Op_MemBarCPUOrder); insert_mem_bar(Op_MemBarCPUOrder);
// paths into result_reg: // paths into result_reg:
enum { enum {
_slow_path = 1, // out-of-line call to clone method (virtual or not) _slow_path = 1, // out-of-line call to clone method (virtual or not)
_objArray_path, // plain array allocation, plus arrayof_oop_arraycopy _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
_array_path, // plain array allocation, plus arrayof_long_arraycopy _array_path, // plain array allocation, plus arrayof_long_arraycopy
_instance_path, // plain instance allocation, plus arrayof_long_arraycopy _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
PATH_LIMIT PATH_LIMIT
}; };
RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
TypeInstPtr::NOTNULL); TypeInstPtr::NOTNULL);
PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
TypePtr::BOTTOM); TypePtr::BOTTOM);
record_for_igvn(result_reg); record_for_igvn(result_reg);
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
int raw_adr_idx = Compile::AliasIdxRaw;
const bool raw_mem_only = true;
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
int raw_adr_idx = Compile::AliasIdxRaw;
const bool raw_mem_only = true;
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
if (array_ctl != NULL) { if (array_ctl != NULL) {
// It's an array. // It's an array.
PreserveJVMState pjvms(this); PreserveJVMState pjvms(this);
set_control(array_ctl); set_control(array_ctl);
Node* obj_length = load_array_length(obj); Node* obj_length = load_array_length(obj);
Node* obj_size = NULL; Node* obj_size = NULL;
Node* alloc_obj = new_array(obj_klass, obj_length, nargs, Node* alloc_obj = new_array(obj_klass, obj_length, 0,
raw_mem_only, &obj_size); raw_mem_only, &obj_size);
if (!use_ReduceInitialCardMarks()) { if (!use_ReduceInitialCardMarks()) {
// If it is an oop array, it requires very special treatment, // If it is an oop array, it requires very special treatment,
// because card marking is required on each card of the array. // because card marking is required on each card of the array.
Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
if (is_obja != NULL) { if (is_obja != NULL) {
PreserveJVMState pjvms2(this); PreserveJVMState pjvms2(this);
set_control(is_obja); set_control(is_obja);
// Generate a direct call to the right arraycopy function(s). // Generate a direct call to the right arraycopy function(s).
bool disjoint_bases = true; bool disjoint_bases = true;
bool length_never_negative = true; bool length_never_negative = true;
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
obj, intcon(0), alloc_obj, intcon(0), obj, intcon(0), alloc_obj, intcon(0),
obj_length, obj_length,
disjoint_bases, length_never_negative); disjoint_bases, length_never_negative);
result_reg->init_req(_objArray_path, control()); result_reg->init_req(_objArray_path, control());
result_val->init_req(_objArray_path, alloc_obj); result_val->init_req(_objArray_path, alloc_obj);
result_i_o ->set_req(_objArray_path, i_o()); result_i_o ->set_req(_objArray_path, i_o());
result_mem ->set_req(_objArray_path, reset_memory()); result_mem ->set_req(_objArray_path, reset_memory());
}
}
// We can dispense with card marks if we know the allocation
// comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
// causes the non-eden paths to simulate a fresh allocation,
// insofar that no further card marks are required to initialize
// the object.
// Otherwise, there are no card marks to worry about.
if (!stopped()) {
copy_to_clone(obj, alloc_obj, obj_size, true, false);
// Present the results of the copy.
result_reg->init_req(_array_path, control());
result_val->init_req(_array_path, alloc_obj);
result_i_o ->set_req(_array_path, i_o());
result_mem ->set_req(_array_path, reset_memory());
} }
} }
// We can dispense with card marks if we know the allocation
// comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
// causes the non-eden paths to simulate a fresh allocation,
// insofar that no further card marks are required to initialize
// the object.
// Otherwise, there are no card marks to worry about.
// We only go to the instance fast case code if we pass a number of guards.
// The paths which do not pass are accumulated in the slow_region.
RegionNode* slow_region = new (C, 1) RegionNode(1);
record_for_igvn(slow_region);
if (!stopped()) { if (!stopped()) {
copy_to_clone(obj, alloc_obj, obj_size, true, false); // It's an instance (we did array above). Make the slow-path tests.
// If this is a virtual call, we generate a funny guard. We grab
// Present the results of the copy. // the vtable entry corresponding to clone() from the target object.
result_reg->init_req(_array_path, control()); // If the target method which we are calling happens to be the
result_val->init_req(_array_path, alloc_obj); // Object clone() method, we pass the guard. We do not need this
result_i_o ->set_req(_array_path, i_o()); // guard for non-virtual calls; the caller is known to be the native
result_mem ->set_req(_array_path, reset_memory()); // Object clone().
} if (is_virtual) {
} generate_virtual_guard(obj_klass, slow_region);
}
// We only go to the instance fast case code if we pass a number of guards. // The object must be cloneable and must not have a finalizer.
// The paths which do not pass are accumulated in the slow_region. // Both of these conditions may be checked in a single test.
RegionNode* slow_region = new (C, 1) RegionNode(1); // We could optimize the cloneable test further, but we don't care.
record_for_igvn(slow_region); generate_access_flags_guard(obj_klass,
if (!stopped()) { // Test both conditions:
// It's an instance (we did array above). Make the slow-path tests. JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
// If this is a virtual call, we generate a funny guard. We grab // Must be cloneable but not finalizer:
// the vtable entry corresponding to clone() from the target object. JVM_ACC_IS_CLONEABLE,
// If the target method which we are calling happens to be the slow_region);
// Object clone() method, we pass the guard. We do not need this
// guard for non-virtual calls; the caller is known to be the native
// Object clone().
if (is_virtual) {
generate_virtual_guard(obj_klass, slow_region);
} }
// The object must be cloneable and must not have a finalizer. if (!stopped()) {
// Both of these conditions may be checked in a single test. // It's an instance, and it passed the slow-path tests.
// We could optimize the cloneable test further, but we don't care. PreserveJVMState pjvms(this);
generate_access_flags_guard(obj_klass, Node* obj_size = NULL;
// Test both conditions: Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
// Must be cloneable but not finalizer:
JVM_ACC_IS_CLONEABLE,
slow_region);
}
if (!stopped()) {
// It's an instance, and it passed the slow-path tests.
PreserveJVMState pjvms(this);
Node* obj_size = NULL;
Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
// Present the results of the slow call. // Present the results of the slow call.
result_reg->init_req(_instance_path, control()); result_reg->init_req(_instance_path, control());
result_val->init_req(_instance_path, alloc_obj); result_val->init_req(_instance_path, alloc_obj);
result_i_o ->set_req(_instance_path, i_o()); result_i_o ->set_req(_instance_path, i_o());
result_mem ->set_req(_instance_path, reset_memory()); result_mem ->set_req(_instance_path, reset_memory());
} }
// Generate code for the slow case. We make a call to clone(). // Generate code for the slow case. We make a call to clone().
set_control(_gvn.transform(slow_region)); set_control(_gvn.transform(slow_region));
if (!stopped()) { if (!stopped()) {
PreserveJVMState pjvms(this); PreserveJVMState pjvms(this);
CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual); CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
Node* slow_result = set_results_for_java_call(slow_call); Node* slow_result = set_results_for_java_call(slow_call);
// this->control() comes from set_results_for_java_call // this->control() comes from set_results_for_java_call
result_reg->init_req(_slow_path, control()); result_reg->init_req(_slow_path, control());
result_val->init_req(_slow_path, slow_result); result_val->init_req(_slow_path, slow_result);
result_i_o ->set_req(_slow_path, i_o()); result_i_o ->set_req(_slow_path, i_o());
result_mem ->set_req(_slow_path, reset_memory()); result_mem ->set_req(_slow_path, reset_memory());
} }
// Return the combined state. // Return the combined state.
set_control( _gvn.transform(result_reg) ); set_control( _gvn.transform(result_reg) );
set_i_o( _gvn.transform(result_i_o) ); set_i_o( _gvn.transform(result_i_o) );
set_all_memory( _gvn.transform(result_mem) ); set_all_memory( _gvn.transform(result_mem) );
} //original reexecute and sp are set back here
push(_gvn.transform(result_val)); push(_gvn.transform(result_val));
......
...@@ -608,16 +608,14 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) { ...@@ -608,16 +608,14 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
} }
// Are we masking a long that was converted from an int with a mask // Are we masking a long that was converted from an int with a mask
// that fits in 32-bits? Commute them and use an AndINode. // that fits in 32-bits? Commute them and use an AndINode. Don't
if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF00000000)) == 0) { // convert masks which would cause a sign extension of the integer
// If we are doing an UI2L conversion (i.e. the mask is // value. This check includes UI2L masks (0x00000000FFFFFFFF) which
// 0x00000000FFFFFFFF) we cannot convert the AndL to an AndI // would be optimized away later in Identity.
// because the AndI would be optimized away later in Identity. if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) {
if (mask != CONST64(0x00000000FFFFFFFF)) { Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask));
Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask)); andi = phase->transform(andi);
andi = phase->transform(andi); return new (phase->C, 2) ConvI2LNode(andi);
return new (phase->C, 2) ConvI2LNode(andi);
}
} }
// Masking off sign bits? Dont make them! // Masking off sign bits? Dont make them!
......
...@@ -911,8 +911,9 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) { ...@@ -911,8 +911,9 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
ciMethod* scope_method = method ? method : _method; ciMethod* scope_method = method ? method : _method;
// Describe the scope here // Describe the scope here
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI"); assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
// Now we can describe the scope. // Now we can describe the scope.
debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),locvals,expvals,monvals); debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),jvms->should_reexecute(),locvals,expvals,monvals);
} // End jvms loop } // End jvms loop
// Mark the end of the scope set. // Mark the end of the scope set.
...@@ -994,7 +995,8 @@ void NonSafepointEmitter::emit_non_safepoint() { ...@@ -994,7 +995,8 @@ void NonSafepointEmitter::emit_non_safepoint() {
for (int depth = 1; depth <= max_depth; depth++) { for (int depth = 1; depth <= max_depth; depth++) {
JVMState* jvms = youngest_jvms->of_depth(depth); JVMState* jvms = youngest_jvms->of_depth(depth);
ciMethod* method = jvms->has_method() ? jvms->method() : NULL; ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
debug_info->describe_scope(pc_offset, method, jvms->bci()); assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
debug_info->describe_scope(pc_offset, method, jvms->bci(), jvms->should_reexecute());
} }
// Mark the end of the scope set. // Mark the end of the scope set.
......
...@@ -39,6 +39,8 @@ class Atomic : AllStatic { ...@@ -39,6 +39,8 @@ class Atomic : AllStatic {
static void store_ptr(intptr_t store_value, volatile intptr_t* dest); static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
static void store_ptr(void* store_value, volatile void* dest); static void store_ptr(void* store_value, volatile void* dest);
static jlong load(volatile jlong* src);
// Atomically add to a location, return updated value // Atomically add to a location, return updated value
static jint add (jint add_value, volatile jint* dest); static jint add (jint add_value, volatile jint* dest);
static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
......
...@@ -402,7 +402,12 @@ inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { ...@@ -402,7 +402,12 @@ inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
DebugInfoReadStream buffer(nm(), decode_offset); DebugInfoReadStream buffer(nm(), decode_offset);
_sender_decode_offset = buffer.read_int(); _sender_decode_offset = buffer.read_int();
_method = methodOop(buffer.read_oop()); _method = methodOop(buffer.read_oop());
_bci = buffer.read_bci(); // Deoptimization needs reexecute bit to determine whether to reexecute the bytecode
// only at the time when it "unpack_frames", and the reexecute bit info could always
// be obtained from the scopeDesc in the compiledVFrame. As a result, we don't keep
// the reexecute bit here.
bool dummy_reexecute;
_bci = buffer.read_bci_and_reexecute(dummy_reexecute);
assert(_method->is_method(), "checking type of decoded method"); assert(_method->is_method(), "checking type of decoded method");
} }
......
...@@ -44,6 +44,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { ...@@ -44,6 +44,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
_method = vf->method(); _method = vf->method();
_bci = vf->raw_bci(); _bci = vf->raw_bci();
_reexecute = vf->should_reexecute();
int index; int index;
...@@ -148,16 +149,20 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters, ...@@ -148,16 +149,20 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
// C++ interpreter doesn't need a pc since it will figure out what to do when it // C++ interpreter doesn't need a pc since it will figure out what to do when it
// begins execution // begins execution
address pc; address pc;
bool use_next_mdp; // true if we should use the mdp associated with the next bci bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
// rather than the one associated with bcp // rather than the one associated with bcp
if (raw_bci() == SynchronizationEntryBCI) { if (raw_bci() == SynchronizationEntryBCI) {
// We are deoptimizing while hanging in prologue code for synchronized method // We are deoptimizing while hanging in prologue code for synchronized method
bcp = method()->bcp_from(0); // first byte code bcp = method()->bcp_from(0); // first byte code
pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
use_next_mdp = false; } else if (should_reexecute()) { //reexecute this bytecode
assert(is_top_frame, "reexecute allowed only for the top frame");
bcp = method()->bcp_from(bci());
pc = Interpreter::deopt_reexecute_entry(method(), bcp);
} else { } else {
bcp = method()->bcp_from(bci()); bcp = method()->bcp_from(bci());
pc = Interpreter::continuation_for(method(), bcp, callee_parameters, is_top_frame, use_next_mdp); pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
use_next_mdp = true;
} }
assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode"); assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
......
...@@ -41,7 +41,8 @@ class vframeArrayElement : public _ValueObj { ...@@ -41,7 +41,8 @@ class vframeArrayElement : public _ValueObj {
private: private:
frame _frame; // the interpreter frame we will unpack into frame _frame; // the interpreter frame we will unpack into
int _bci; // raw bci for this vframe int _bci; // raw bci for this vframe
bool _reexecute; // whether sould we reexecute this bytecode
methodOop _method; // the method for this vframe methodOop _method; // the method for this vframe
MonitorChunk* _monitors; // active monitors for this vframe MonitorChunk* _monitors; // active monitors for this vframe
StackValueCollection* _locals; StackValueCollection* _locals;
...@@ -54,6 +55,7 @@ class vframeArrayElement : public _ValueObj { ...@@ -54,6 +55,7 @@ class vframeArrayElement : public _ValueObj {
int bci(void) const; int bci(void) const;
int raw_bci(void) const { return _bci; } int raw_bci(void) const { return _bci; }
bool should_reexecute(void) const { return _reexecute; }
methodOop method(void) const { return _method; } methodOop method(void) const { return _method; }
......
...@@ -276,6 +276,15 @@ int compiledVFrame::raw_bci() const { ...@@ -276,6 +276,15 @@ int compiledVFrame::raw_bci() const {
return scope()->bci(); return scope()->bci();
} }
bool compiledVFrame::should_reexecute() const {
if (scope() == NULL) {
// native nmethods have no scope the method/bci is implied
nmethod* nm = code();
assert(nm->is_native_method(), "must be native");
return false;
}
return scope()->should_reexecute();
}
vframe* compiledVFrame::sender() const { vframe* compiledVFrame::sender() const {
const frame f = fr(); const frame f = fr();
......
...@@ -25,11 +25,12 @@ ...@@ -25,11 +25,12 @@
class compiledVFrame: public javaVFrame { class compiledVFrame: public javaVFrame {
public: public:
// JVM state // JVM state
methodOop method() const; methodOop method() const;
int bci() const; int bci() const;
StackValueCollection* locals() const; bool should_reexecute() const;
StackValueCollection* expressions() const; StackValueCollection* locals() const;
GrowableArray<MonitorInfo*>* monitors() const; StackValueCollection* expressions() const;
GrowableArray<MonitorInfo*>* monitors() const;
void set_locals(StackValueCollection* values) const; void set_locals(StackValueCollection* values) const;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* @bug 6826736 * @bug 6826736
* @summary CMS: core dump with -XX:+UseCompressedOops * @summary CMS: core dump with -XX:+UseCompressedOops
* *
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
*/ */
public class Test { public class Test {
......
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/**
* @test
* @bug 6833129
* @summary Object.clone() and Arrays.copyOf ignore coping with -XX:+DeoptimizeALot
* @run main/othervm -Xbatch -XX:+DeoptimizeALot Test
*/
public class Test{
public static void init(int src[]) {
for (int i =0; i<src.length; i++) {
src[i] = i;
}
}
public static void clone_and_verify(int src[]) {
for (int i = 0; i < src.length; i++) {
int [] src_clone = src.clone();
if (src[i] != src_clone[i]) {
System.out.println("Error: allocated but not copied: ");
for( int j =0; j < src_clone.length; j++)
System.out.print(" " + src_clone[j]);
System.out.println();
System.exit(97);
}
}
}
public static void test() {
int[] src = new int[34];
init(src);
clone_and_verify(src);
}
public static void main(String[] args) {
for (int i=0; i< 20000; i++) {
test();
}
}
}
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* @bug 6851282 * @bug 6851282
* @summary JIT miscompilation results in null entry in array when using CompressedOops * @summary JIT miscompilation results in null entry in array when using CompressedOops
* *
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops Test * @run main/othervm/timeout=600 -Xmx256m -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops Test
*/ */
import java.util.ArrayList; import java.util.ArrayList;
......
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/**
* @test
* @bug 6863155
* @summary Server compiler generates incorrect code (x86, long, bitshift, bitmask)
*
* @run main/othervm -Xcomp -XX:CompileOnly=Test6863155.test Test6863155
*/
public class Test6863155 {
private static long test(byte b) {
return b << 24 & 0xff000000L;
}
public static void main(String... args) {
long result = test((byte) 0xc2);
long expected = 0x00000000c2000000L;
if (result != expected)
throw new InternalError(Long.toHexString(result) + " != " + Long.toHexString(expected));
}
}
/*
* Copyright 2009 D.E. Shaw. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6863420
* @summary os::javaTimeNanos() go backward on Solaris x86
*
* @run main/othervm Test
*/
public class Test {
static long value = 0;
static boolean got_backward_time = false;
public static void main(String args[]) {
final int count = 100000;
for (int numThreads = 1; numThreads <= 32; numThreads++) {
final int numRuns = 1;
for (int t=1; t <= numRuns; t++) {
final int curRun = t;
System.out.println("Spawning " + numThreads + " threads");
final Thread threads[] = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
Runnable thread =
new Runnable() {
public void run() {
for (long l = 0; l < 100000; l++) {
final long start = System.nanoTime();
if (value == 12345678) {
System.out.println("Wow!");
}
final long end = System.nanoTime();
final long time = end - start;
value += time;
if (time < 0) {
System.out.println(
"Backwards: " +
"start=" + start + " " +
"end=" + end + " " +
"time= " + time
);
got_backward_time = true;
}
}
}
};
threads[i] = new Thread(thread, "Thread" + i);
}
for (int i = 0; i < threads.length; i++) {
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
try {
threads[i].join();
}
catch (InterruptedException e) {
continue;
}
}
}
}
if (got_backward_time) {
System.exit(97);
}
}
}
/*
* Copyright 2009 Goldman Sachs International. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/*
* @test
* @bug 6865031
* @summary Application gives bad result (throws bad exception) with compressed oops
* @run main/othervm -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:-LoopUnswitching -XX:CompileCommand=inline,AbstractMemoryEfficientList.equals Test hello goodbye
*/
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
interface MyList {
public int size();
public Object set(final int index, final Object element);
public Object get(final int index);
}
abstract class AbstractMemoryEfficientList implements MyList {
abstract public int size();
abstract public Object get(final int index);
abstract public Object set(final int index, final Object element);
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof MyList)) {
return false;
}
final MyList that = (MyList) o;
if (this.size() != that.size()) {
return false;
}
for (int i = 0; i < this.size(); i++) {
try {
if (!((this.get(i)).equals(that.get(i)))) {
return false;
}
} catch (IndexOutOfBoundsException e) {
System.out.println("THROWING RT EXC");
System.out.println("concurrent modification of this:" + this.getClass() + ":" + System.identityHashCode(this) + "; that:" + that.getClass() + ":" + System.identityHashCode(that) + "; i:" + i);
e.printStackTrace();
System.exit(97);
throw new RuntimeException("concurrent modification of this:" + this.getClass() + ":" + System.identityHashCode(this) + "; that:" + that.getClass() + ":" + System.identityHashCode(that) + "; i:" + i, e);
}
}
return true;
}
public int hashCode() {
int hashCode = 1;
for (int i = 0; i < this.size(); i++) {
Object obj = this.get(i);
hashCode = 31 * hashCode + (obj == null ? 0 : obj.hashCode());
}
return hashCode;
}
}
final class SingletonList extends AbstractMemoryEfficientList {
private Object element1;
SingletonList(final Object obj1) {
super();
this.element1 = obj1;
}
public int size() {
return 1;
}
public Object get(final int index) {
if (index == 0) {
return this.element1;
} else {
throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + this.size());
}
}
public Object set(final int index, final Object element) {
if (index == 0) {
final Object previousElement = this.element1;
this.element1 = element;
return previousElement;
} else {
throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + this.size());
}
}
}
final class DoubletonList extends AbstractMemoryEfficientList {
private Object element1;
private Object element2;
DoubletonList(final Object obj1, final Object obj2) {
this.element1 = obj1;
this.element2 = obj2;
}
public int size() {
return 2;
}
public Object get(final int index) {
switch (index) {
case 0 : return this.element1;
case 1 : return this.element2;
default: throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + this.size());
}
}
public Object set(final int index, final Object element) {
switch (index) {
case 0 :
{
final Object previousElement = this.element1;
this.element1 = element;
return previousElement;
}
case 1 :
{
final Object previousElement = this.element2;
this.element2 = element;
return previousElement;
}
default : throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + this.size());
}
}
}
class WeakPool<V> {
protected static final int DEFAULT_INITIAL_CAPACITY = 16;
private static final int MAXIMUM_CAPACITY = 1 << 30;
private static final float DEFAULT_LOAD_FACTOR = 0.75f;
protected Entry<V>[] table;
private int size;
protected int threshold;
private final float loadFactor;
private final ReferenceQueue<V> queue = new ReferenceQueue<V>();
public WeakPool()
{
this.loadFactor = DEFAULT_LOAD_FACTOR;
threshold = DEFAULT_INITIAL_CAPACITY;
table = new Entry[DEFAULT_INITIAL_CAPACITY];
}
/**
* Check for equality of non-null reference x and possibly-null y. By
* default uses Object.equals.
*/
private boolean eq(Object x, Object y)
{
return x == y || x.equals(y);
}
/**
* Return index for hash code h.
*/
private int indexFor(int h, int length)
{
return h & length - 1;
}
/**
* Expunge stale entries from the table.
*/
private void expungeStaleEntries()
{
Object r;
while ((r = queue.poll()) != null)
{
Entry e = (Entry) r;
int h = e.hash;
int i = indexFor(h, table.length);
// System.out.println("EXPUNGING " + h);
Entry<V> prev = table[i];
Entry<V> p = prev;
while (p != null)
{
Entry<V> next = p.next;
if (p == e)
{
if (prev == e)
{
table[i] = next;
}
else
{
prev.next = next;
}
e.next = null; // Help GC
size--;
break;
}
prev = p;
p = next;
}
}
}
/**
* Return the table after first expunging stale entries
*/
private Entry<V>[] getTable()
{
expungeStaleEntries();
return table;
}
/**
* Returns the number of key-value mappings in this map.
* This result is a snapshot, and may not reflect unprocessed
* entries that will be removed before next attempted access
* because they are no longer referenced.
*/
public int size()
{
if (size == 0)
{
return 0;
}
expungeStaleEntries();
return size;
}
/**
* Returns <tt>true</tt> if this map contains no key-value mappings.
* This result is a snapshot, and may not reflect unprocessed
* entries that will be removed before next attempted access
* because they are no longer referenced.
*/
public boolean isEmpty()
{
return size() == 0;
}
/**
* Returns the value stored in the pool that equals the requested key
* or <tt>null</tt> if the map contains no mapping for
* this key (or the key is null)
*
* @param key the key whose equals value is to be returned.
* @return the object that is equal the specified key, or
* <tt>null</tt> if key is null or no object in the pool equals the key.
*/
public V get(V key)
{
if (key == null)
{
return null;
}
int h = key.hashCode();
Entry<V>[] tab = getTable();
int index = indexFor(h, tab.length);
Entry<V> e = tab[index];
while (e != null)
{
V candidate = e.get();
if (e.hash == h && eq(key, candidate))
{
return candidate;
}
e = e.next;
}
return null;
}
/**
* Returns the entry associated with the specified key in the HashMap.
* Returns null if the HashMap contains no mapping for this key.
*/
Entry getEntry(Object key)
{
int h = key.hashCode();
Entry[] tab = getTable();
int index = indexFor(h, tab.length);
Entry e = tab[index];
while (e != null && !(e.hash == h && eq(key, e.get())))
{
e = e.next;
}
return e;
}
/**
* Places the object into the pool. If the object is null, nothing happens.
* If an equal object already exists, it is not replaced.
*
* @param key the object to put into the pool. key may be null.
* @return the object in the pool that is equal to the key, or the newly placed key if no such object existed when put was called
*/
public V put(V key)
{
if (key == null)
{
return null;
}
int h = key.hashCode();
Entry<V>[] tab = getTable();
int i = indexFor(h, tab.length);
for (Entry<V> e = tab[i]; e != null; e = e.next)
{
V candidate = e.get();
if (h == e.hash && eq(key, candidate))
{
return candidate;
}
}
tab[i] = new Entry<V>(key, queue, h, tab[i]);
if (++size >= threshold)
{
resize(tab.length * 2);
}
// System.out.println("Added " + key + " to pool");
return key;
}
/**
* Rehashes the contents of this map into a new array with a
* larger capacity. This method is called automatically when the
* number of keys in this map reaches its threshold.
* <p/>
* If current capacity is MAXIMUM_CAPACITY, this method does not
* resize the map, but but sets threshold to Integer.MAX_VALUE.
* This has the effect of preventing future calls.
*
* @param newCapacity the new capacity, MUST be a power of two;
* must be greater than current capacity unless current
* capacity is MAXIMUM_CAPACITY (in which case value
* is irrelevant).
*/
void resize(int newCapacity)
{
Entry<V>[] oldTable = getTable();
int oldCapacity = oldTable.length;
if (oldCapacity == MAXIMUM_CAPACITY)
{
threshold = Integer.MAX_VALUE;
return;
}
Entry<V>[] newTable = new Entry[newCapacity];
transfer(oldTable, newTable);
table = newTable;
/*
* If ignoring null elements and processing ref queue caused massive
* shrinkage, then restore old table. This should be rare, but avoids
* unbounded expansion of garbage-filled tables.
*/
if (size >= threshold / 2)
{
threshold = (int) (newCapacity * loadFactor);
}
else
{
expungeStaleEntries();
transfer(newTable, oldTable);
table = oldTable;
}
}
/**
* Transfer all entries from src to dest tables
*/
private void transfer(Entry[] src, Entry[] dest)
{
for (int j = 0; j < src.length; ++j)
{
Entry e = src[j];
src[j] = null;
while (e != null)
{
Entry next = e.next;
Object key = e.get();
if (key == null)
{
e.next = null; // Help GC
size--;
}
else
{
int i = indexFor(e.hash, dest.length);
e.next = dest[i];
dest[i] = e;
}
e = next;
}
}
}
/**
* Removes the object in the pool that equals the key.
*
* @param key
* @return previous value associated with specified key, or <tt>null</tt>
* if there was no mapping for key or the key is null.
*/
public V removeFromPool(V key)
{
if (key == null)
{
return null;
}
int h = key.hashCode();
Entry<V>[] tab = getTable();
int i = indexFor(h, tab.length);
Entry<V> prev = tab[i];
Entry<V> e = prev;
while (e != null)
{
Entry<V> next = e.next;
V candidate = e.get();
if (h == e.hash && eq(key, candidate))
{
size--;
if (prev == e)
{
tab[i] = next;
}
else
{
prev.next = next;
}
return candidate;
}
prev = e;
e = next;
}
return null;
}
/**
* Removes all mappings from this map.
*/
public void clear()
{
// clear out ref queue. We don't need to expunge entries
// since table is getting cleared.
while (queue.poll() != null)
{
// nop
}
table = new Entry[DEFAULT_INITIAL_CAPACITY];
threshold = DEFAULT_INITIAL_CAPACITY;
size = 0;
// Allocation of array may have caused GC, which may have caused
// additional entries to go stale. Removing these entries from the
// reference queue will make them eligible for reclamation.
while (queue.poll() != null)
{
// nop
}
}
/**
* The entries in this hash table extend WeakReference, using its main ref
* field as the key.
*/
protected static class Entry<V>
extends WeakReference<V>
{
private final int hash;
private Entry<V> next;
/**
* Create new entry.
*/
Entry(final V key, final ReferenceQueue<V> queue, final int hash, final Entry<V> next)
{
super(key, queue);
this.hash = hash;
this.next = next;
}
public V getKey()
{
return super.get();
}
public boolean equals(Object o)
{
if (!(o instanceof WeakPool.Entry))
{
return false;
}
WeakPool.Entry<V> that = (WeakPool.Entry<V>) o;
V k1 = this.getKey();
V k2 = that.getKey();
return (k1==k2 || k1.equals(k2));
}
public int hashCode()
{
return this.hash;
}
public String toString()
{
return String.valueOf(this.getKey());
}
}
}
final class MultiSynonymKey {
private List<MyList> keys;
public MultiSynonymKey() {
keys = new ArrayList<MyList>();
}
public MultiSynonymKey(MyList... arg) {
keys = Arrays.asList(arg);
}
public List<MyList> getKeys() {
return keys;
}
public int hashCode() {
return this.getKeys().hashCode();
}
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof MultiSynonymKey)) {
return false;
}
MultiSynonymKey that = (MultiSynonymKey) obj;
return this.getKeys().equals(that.getKeys());
}
public String toString() {
return this.getClass().getName() + this.getKeys().toString();
}
}
public class Test extends Thread {
static public Test test;
static private byte[] arg1;
static private byte[] arg2;
static public WeakPool<MultiSynonymKey> wp;
public volatile MultiSynonymKey ml1;
public volatile MultiSynonymKey ml2;
private volatile MultiSynonymKey ml3;
public void run() {
int count=0;
while (true) {
try {
Thread.sleep(10);
} catch (Exception e) {}
synchronized (wp) {
ml2 = new MultiSynonymKey(new DoubletonList(new String(arg1), new String(arg2)));
wp.put(ml2);
ml3 = new MultiSynonymKey(new DoubletonList(new String(arg1), new String(arg2)));
}
try {
Thread.sleep(10);
} catch (Exception e) {}
synchronized (wp) {
ml1 = new MultiSynonymKey(new SingletonList(new String(arg1)));
wp.put(ml1);
ml3 = new MultiSynonymKey(new SingletonList(new String(arg1)));
}
if (count++==100)
System.exit(95);
}
}
public static void main(String[] args) throws Exception {
wp = new WeakPool<MultiSynonymKey>();
test = new Test();
test.arg1 = args[0].getBytes();
test.arg2 = args[1].getBytes();
test.ml1 = new MultiSynonymKey(new SingletonList(new String(test.arg1)));
test.ml2 = new MultiSynonymKey(new DoubletonList(new String(test.arg1), new String(test.arg2)));
test.ml3 = new MultiSynonymKey(new DoubletonList(new String(test.arg1), new String(test.arg2)));
wp.put(test.ml1);
wp.put(test.ml2);
test.setDaemon(true);
test.start();
int counter = 0;
while (true) {
synchronized (wp) {
MultiSynonymKey foo = test.ml3;
if (wp.put(foo) == foo) {
// System.out.println("foo " + counter);
// System.out.println(foo);
}
}
counter++;
}
}
private boolean eq(Object x, Object y) {
return x == y || x.equals(y);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册