提交 c41f27ce 编写于 作者: G goetz

8019972: PPC64 (part 9): platform files for interpreter only VM.

Summary: With this change the HotSpot core build works on Linux/PPC64. The VM succesfully executes simple test programs.
Reviewed-by: kvn
上级 2688654c
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
// Platform specific for C++ based Interpreter
#define LOTS_OF_REGS /* Lets interpreter use plenty of registers */
private:
// Save the bottom of the stack after frame manager setup. For ease of restoration after return
// from recursive interpreter call.
intptr_t* _frame_bottom; // Saved bottom of frame manager frame.
address _last_Java_pc; // Pc to return to in frame manager.
intptr_t* _last_Java_fp; // frame pointer
intptr_t* _last_Java_sp; // stack pointer
interpreterState _self_link; // Previous interpreter state // sometimes points to self???
double _native_fresult; // Save result of native calls that might return floats.
intptr_t _native_lresult; // Save result of native calls that might return handle/longs.
public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t* last_Java_fp(void) { return _last_Java_fp; }
static ByteSize native_lresult_offset() {
return byte_offset_of(BytecodeInterpreter, _native_lresult);
}
static ByteSize native_fresult_offset() {
return byte_offset_of(BytecodeInterpreter, _native_fresult);
}
static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp);
#define SET_LAST_JAVA_FRAME() THREAD->frame_anchor()->set(istate->_last_Java_sp, istate->_last_Java_pc);
#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->clear();
// Macros for accessing the stack.
#undef STACK_INT
#undef STACK_FLOAT
#undef STACK_ADDR
#undef STACK_OBJECT
#undef STACK_DOUBLE
#undef STACK_LONG
// JavaStack Implementation
#define STACK_SLOT(offset) ((address) &topOfStack[-(offset)])
#define STACK_INT(offset) (*((jint*) &topOfStack[-(offset)]))
#define STACK_FLOAT(offset) (*((jfloat *) &topOfStack[-(offset)]))
#define STACK_OBJECT(offset) (*((oop *) &topOfStack [-(offset)]))
#define STACK_DOUBLE(offset) (((VMJavaVal64*) &topOfStack[-(offset)])->d)
#define STACK_LONG(offset) (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
#define SET_STACK_SLOT(value, offset) (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
#define SET_STACK_ADDR(value, offset) (*((address *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_INT(value, offset) (*((jint *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_FLOAT(value, offset) (*((jfloat *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = \
((VMJavaVal64*)(addr))->d)
#define SET_STACK_LONG(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
#define SET_STACK_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = \
((VMJavaVal64*)(addr))->l)
// JavaLocals implementation
#define LOCALS_SLOT(offset) ((intptr_t*)&locals[-(offset)])
#define LOCALS_ADDR(offset) ((address)locals[-(offset)])
#define LOCALS_INT(offset) (*(jint*)&(locals[-(offset)]))
#define LOCALS_OBJECT(offset) ((oop)locals[-(offset)])
#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
#define SET_LOCALS_SLOT(value, offset) (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
#define SET_LOCALS_INT(value, offset) (*((jint *)&locals[-(offset)]) = (value))
#define SET_LOCALS_DOUBLE(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
#define SET_LOCALS_LONG(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_PP
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
#ifdef CC_INTERP
// Inline interpreter functions for ppc.
#include <math.h>
inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return (jfloat)fmod((double)op1, (double)op2); }
inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
return ( op1 < op2 ? -1 :
op1 > op2 ? 1 :
op1 == op2 ? 0 :
(direction == -1 || direction == 1) ? direction : 0);
}
inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
to[0] = from[0]; to[1] = from[1];
}
// The long operations depend on compiler support for "long long" on ppc.
inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
return op1 + op2;
}
inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
return op1 & op2;
}
inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
if (op1 == min_jlong && op2 == -1) return op1;
return op1 / op2;
}
inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
return op1 * op2;
}
inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
return op1 | op2;
}
inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
return op1 - op2;
}
inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
return op1 ^ op2;
}
inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
if (op1 == min_jlong && op2 == -1) return 0;
return op1 % op2;
}
inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
return ((uint64_t) op1) >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
return op1 >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
return op1 << (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
return -op;
}
inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
return ~op;
}
inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
return (op <= 0);
}
inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
return (op >= 0);
}
inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
return (op == 0);
}
inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
return (op1 == op2);
}
inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
return (op1 != op2);
}
inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
return (op1 >= op2);
}
inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
return (op1 <= op2);
}
inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
return (op1 < op2);
}
inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
return (op1 > op2);
}
inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
}
// Long conversions
inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
return (jdouble) val;
}
inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
return (jfloat) val;
}
inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
return (jint) val;
}
// Double Arithmetic
inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
return op1 + op2;
}
inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
return op1 / op2;
}
inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
return op1 * op2;
}
inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
return -op;
}
inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
return fmod(op1, op2);
}
inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
return op1 - op2;
}
inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
return ( op1 < op2 ? -1 :
op1 > op2 ? 1 :
op1 == op2 ? 0 :
(direction == -1 || direction == 1) ? direction : 0);
}
// Double Conversions
inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
return (jfloat) val;
}
// Float Conversions
inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
return (jdouble) op;
}
// Integer Arithmetic
inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
return op1 + op2;
}
inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
return op1 & op2;
}
inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
/* it's possible we could catch this special case implicitly */
if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
else return op1 / op2;
}
inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
return op1 * op2;
}
inline jint BytecodeInterpreter::VMintNeg(jint op) {
return -op;
}
inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
return op1 | op2;
}
inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
/* it's possible we could catch this special case implicitly */
if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
else return op1 % op2;
}
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
return op1 << (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
return op1 >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
return op1 - op2;
}
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
return ((juint) op1) >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
return op1 ^ op2;
}
inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
return (jdouble) val;
}
inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
return (jfloat) val;
}
inline jlong BytecodeInterpreter::VMint2Long(jint val) {
return (jlong) val;
}
inline jchar BytecodeInterpreter::VMint2Char(jint val) {
return (jchar) val;
}
inline jshort BytecodeInterpreter::VMint2Short(jint val) {
return (jshort) val;
}
inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
return (jbyte) val;
}
#endif // CC_INTERP
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/bytecodes.hpp"
void Bytecodes::pd_initialize() {
// No ppc specific initialization.
}
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODES_PPC_HPP
#define CPU_PPC_VM_BYTECODES_PPC_HPP
// No ppc64 specific bytecodes
#endif // CPU_PPC_VM_BYTECODES_PPC_HPP
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTES_PPC_HPP
#define CPU_PPC_VM_BYTES_PPC_HPP
#include "memory/allocation.hpp"
class Bytes: AllStatic {
public:
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
// PowerPC needs to check for alignment.
// can I count on address always being a pointer to an unsigned char? Yes
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
// Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; }
static inline u8 swap_u8(u8 x) { return x; }
static inline u2 get_native_u2(address p) {
return (intptr_t(p) & 1) == 0
? *(u2*)p
: ( u2(p[0]) << 8 )
| ( u2(p[1]) );
}
static inline u4 get_native_u4(address p) {
switch (intptr_t(p) & 3) {
case 0: return *(u4*)p;
case 2: return ( u4( ((u2*)p)[0] ) << 16 )
| ( u4( ((u2*)p)[1] ) );
default: return ( u4(p[0]) << 24 )
| ( u4(p[1]) << 16 )
| ( u4(p[2]) << 8 )
| u4(p[3]);
}
}
static inline u8 get_native_u8(address p) {
switch (intptr_t(p) & 7) {
case 0: return *(u8*)p;
case 4: return ( u8( ((u4*)p)[0] ) << 32 )
| ( u8( ((u4*)p)[1] ) );
case 2: return ( u8( ((u2*)p)[0] ) << 48 )
| ( u8( ((u2*)p)[1] ) << 32 )
| ( u8( ((u2*)p)[2] ) << 16 )
| ( u8( ((u2*)p)[3] ) );
default: return ( u8(p[0]) << 56 )
| ( u8(p[1]) << 48 )
| ( u8(p[2]) << 40 )
| ( u8(p[3]) << 32 )
| ( u8(p[4]) << 24 )
| ( u8(p[5]) << 16 )
| ( u8(p[6]) << 8 )
| u8(p[7]);
}
}
static inline void put_native_u2(address p, u2 x) {
if ( (intptr_t(p) & 1) == 0 ) { *(u2*)p = x; }
else {
p[0] = x >> 8;
p[1] = x;
}
}
static inline void put_native_u4(address p, u4 x) {
switch ( intptr_t(p) & 3 ) {
case 0: *(u4*)p = x;
break;
case 2: ((u2*)p)[0] = x >> 16;
((u2*)p)[1] = x;
break;
default: ((u1*)p)[0] = x >> 24;
((u1*)p)[1] = x >> 16;
((u1*)p)[2] = x >> 8;
((u1*)p)[3] = x;
break;
}
}
static inline void put_native_u8(address p, u8 x) {
switch ( intptr_t(p) & 7 ) {
case 0: *(u8*)p = x;
break;
case 4: ((u4*)p)[0] = x >> 32;
((u4*)p)[1] = x;
break;
case 2: ((u2*)p)[0] = x >> 48;
((u2*)p)[1] = x >> 32;
((u2*)p)[2] = x >> 16;
((u2*)p)[3] = x;
break;
default: ((u1*)p)[0] = x >> 56;
((u1*)p)[1] = x >> 48;
((u1*)p)[2] = x >> 40;
((u1*)p)[3] = x >> 32;
((u1*)p)[4] = x >> 24;
((u1*)p)[5] = x >> 16;
((u1*)p)[6] = x >> 8;
((u1*)p)[7] = x;
}
}
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
static inline u2 get_Java_u2(address p) { return get_native_u2(p); }
static inline u4 get_Java_u4(address p) { return get_native_u4(p); }
static inline u8 get_Java_u8(address p) { return get_native_u8(p); }
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
};
#endif // CPU_PPC_VM_BYTES_PPC_HPP
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CODEBUFFER_PPC_HPP
#define CPU_PPC_VM_CODEBUFFER_PPC_HPP
private:
void pd_initialize() {}
public:
void flush_bundle(bool start_new_bundle) {}
#endif // CPU_PPC_VM_CODEBUFFER_PPC_HPP
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#ifdef COMPILER2
#include "opto/matcher.hpp"
#endif
// Release the CompiledICHolder* associated with this call site is there is one.
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
if (is_icholder_entry(call->destination())) {
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
}
}
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
return is_icholder_entry(call->destination());
}
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
: _ic_call(call)
{
address ic_call = call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
// Search for the ic_call at the given address.
RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
_is_optimized = false;
_value = nativeMovConstReg_at(r->cached_value());
} else {
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = NULL;
}
}
// ----------------------------------------------------------------------------
// A PPC CompiledStaticCall looks like this:
//
// >>>> consts
//
// [call target1]
// [IC cache]
// [call target2]
//
// <<<< consts
// >>>> insts
//
// bl offset16 -+ -+ ??? // How many bits available?
// | |
// <<<< insts | |
// >>>> stubs | |
// | |- trampoline_stub_Reloc
// trampoline stub: | <-+
// r2 = toc |
// r2 = [r2 + offset] | // Load call target1 from const section
// mtctr r2 |
// bctr |- static_stub_Reloc
// comp_to_interp_stub: <---+
// r1 = toc
// ICreg = [r1 + IC_offset] // Load IC from const section
// r1 = [r1 + offset] // Load call target2 from const section
// mtctr r1
// bctr
//
// <<<< stubs
//
// The call instruction in the code either
// - branches directly to a compiled method if offset encodable in instruction
// - branches to the trampoline stub if offset to compiled method not encodable
// - branches to the compiled_to_interp stub if target interpreted
//
// Further there are three relocations from the loads to the constants in
// the constant section.
//
// Usage of r1 and r2 in the stubs allows to distinguish them.
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2
// Get the mark within main instrs section which is set to the address of the call.
address call_addr = cbuf.insts_mark();
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
// Start the stub.
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) {
Compile::current()->env()->record_out_of_memory_failure();
return;
}
// For java_to_interp stubs we use R11_scratch1 as scratch register
// and in call trampoline stubs we use R12_scratch2. This way we
// can distinguish them (see is_NativeCallTrampolineStub_at()).
Register reg_scratch = R11_scratch1;
// Create a static stub relocation which relates this stub
// with the call instruction at insts_call_instruction_offset in the
// instructions code-section.
__ relocate(static_stub_Relocation::spec(call_addr));
const int stub_start_offset = __ offset();
// Now, create the stub's code:
// - load the TOC
// - load the inline cache oop from the constant pool
// - load the call target from the constant pool
// - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
__ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
if (ReoptimizeCallSequences) {
__ b64_patchable((address)-1, relocInfo::none);
} else {
AddressLiteral a((address)-1);
__ load_const_from_method_toc(reg_scratch, a, reg_scratch);
__ mtctr(reg_scratch);
__ bctr();
}
// FIXME: Assert that the stub can be identified and patched.
// Java_to_interp_stub_size should be good.
assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
"should be good size");
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
"must not confuse java_to_interp with trampoline stubs");
// End the stub.
__ end_a_stub();
#else
ShouldNotReachHere();
#endif
}
#undef __
// Size of java_to_interp stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::to_interp_stub_size() {
return 12 * BytesPerInstWord;
}
// Relocation entries for call stub, compiled java to interpreter.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::reloc_to_interp_stub() {
return 5;
}
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
instruction_address(),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_holder->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
method_holder->set_data(0);
jump->set_jump_destination((address)-1);
}
//-----------------------------------------------------------------------------
// Non-product mode code
#ifndef PRODUCT
void CompiledStaticCall::verify() {
// Verify call.
NativeCall::verify();
if (os::is_MP()) {
verify_alignment();
}
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_COPY_PPC_HPP
#define CPU_PPC_VM_COPY_PPC_HPP
#ifndef PPC64
#error "copy currently only implemented for PPC64"
#endif
// Inline functions for memory copy and fill.
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default: (void)memcpy(to, from, count * HeapWordSize);
break;
}
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default: while (count-- > 0) {
*to++ = *from++;
}
break;
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
// Template for atomic, element-wise copy.
template <class T>
static void copy_conjoint_atomic(T* from, T* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
*to++ = *from++;
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
*to-- = *from--;
}
}
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
copy_conjoint_atomic<jshort>(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
copy_conjoint_atomic<jint>(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
copy_conjoint_atomic<jlong>(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
copy_conjoint_atomic<oop>(from, to, count);
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value;
while (count-- > 0) {
*to++ = v;
}
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
pd_fill_to_words(tohw, count, value);
}
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
(void)memset(to, value, count);
}
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
pd_fill_to_words(tohw, count, 0);
}
static void pd_zero_to_bytes(void* to, size_t count) {
(void)memset(to, 0, count);
}
#endif // CPU_PPC_VM_COPY_PPC_HPP
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
address generate_normal_entry(void);
address generate_native_entry(void);
void lock_method(void);
void unlock_method(void);
void generate_counter_incr(Label& overflow);
void generate_counter_overflow(Label& do_continue);
void generate_more_monitors();
void generate_deopt_handling(Register result_index);
void generate_compute_interpreter_state(Label& exception_return);
#endif // CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
此差异已折叠。
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
protected:
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
const static int InterpreterCodeSize = 12*K;
#endif // CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "runtime/frame.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/top.hpp"
void pd_ps(frame f) {}
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_DEPCHECKER_PPC_HPP
#define CPU_PPC_VM_DEPCHECKER_PPC_HPP
// Nothing to do on ppc64
#endif // CPU_PPC_VM_DEPCHECKER_PPC_HPP
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_DISASSEMBLER_PPC_HPP
#define CPU_PPC_VM_DISASSEMBLER_PPC_HPP
static int pd_instruction_alignment() {
return sizeof(int);
}
static const char* pd_cpu_opts() {
return "ppc64";
}
#endif // CPU_PPC_VM_DISASSEMBLER_PPC_HPP
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
// Inline functions for ppc64 frames:
// Find codeblob and set deopt_state.
inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
assert(pc != NULL, "precondition: must have PC");
_cb = CodeCache::find_blob(pc);
_pc = pc; // Must be set for get_deopt_original_pc()
_fp = (intptr_t*)own_abi()->callers_sp;
// Use _fp - frame_size, needs to be done between _cb and _pc initialization
// and get_deopt_original_pc.
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
assert(((uint64_t)_sp & 0xf) == 0, "SP must be 16-byte aligned");
}
// Constructors
// Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state.
inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {}
inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->lr); // also sets _fp and adjusts _unextended_sp
}
inline frame::frame(intptr_t* sp, address pc) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
}
inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp) : _sp(sp), _unextended_sp(unextended_sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
}
// Accessors
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. NULL
// represents an invalid (incomparable) frame.
inline intptr_t* frame::id(void) const {
// Use the _unextended_pc as the frame's ID. Because we have no
// adapters, but resized compiled frames, some of the new code
// (e.g. JVMTI) wouldn't work if we return the (current) SP of the
// frame.
return _unextended_sp;
}
// Return true if this frame is older (less recent activation) than
// the frame represented by id.
inline bool frame::is_older(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id");
// Stack grows towards smaller addresses on ppc64.
return this->id() > id;
}
inline int frame::frame_size(RegisterMap* map) const {
// Stack grows towards smaller addresses on PPC64: sender is at a higher address.
return sender_sp() - sp();
}
// Return the frame's stack pointer before it has been extended by a
// c2i adapter. This is needed by deoptimization for ignoring c2i adapter
// frames.
inline intptr_t* frame::unextended_sp() const {
return _unextended_sp;
}
// All frames have this field.
inline address frame::sender_pc() const {
return (address)callers_abi()->lr;
}
inline address* frame::sender_pc_addr() const {
return (address*)&(callers_abi()->lr);
}
// All frames have this field.
inline intptr_t* frame::sender_sp() const {
return (intptr_t*)callers_abi();
}
// All frames have this field.
inline intptr_t* frame::link() const {
return (intptr_t*)callers_abi()->callers_sp;
}
inline intptr_t* frame::real_fp() const {
return fp();
}
#ifdef CC_INTERP
inline interpreterState frame::get_interpreterState() const {
return (interpreterState)(((address)callers_abi())
- frame::interpreter_frame_cinterpreterstate_size_in_bytes());
}
inline intptr_t** frame::interpreter_frame_locals_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t**)&istate->_locals;
}
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t*)&istate->_bcp;
}
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t*)&istate->_mdx;
}
inline intptr_t* frame::interpreter_frame_expression_stack() const {
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
inline jint frame::interpreter_frame_expression_stack_direction() {
return -1;
}
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
interpreterState istate = get_interpreterState();
return istate->_stack + 1;
}
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[offset];
}
// monitor elements
// in keeping with Intel side: end is lower in memory than begin;
// and beginning element is oldest element
// Also begin is one past last monitor.
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
return get_interpreterState()->monitor_base();
}
inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
return (BasicObjectLock*)get_interpreterState()->stack_base();
}
inline int frame::interpreter_frame_cinterpreterstate_size_in_bytes() {
// Size of an interpreter object. Not aligned with frame size.
return round_to(sizeof(BytecodeInterpreter), 8);
}
inline Method** frame::interpreter_frame_method_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_method;
}
// Constant pool cache
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_constants; // should really use accessor
}
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_constants;
}
#endif // CC_INTERP
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor.
return round_to(BasicObjectLock::size(), // number of stack slots
WordsPerLong); // number of stack slots for a Java long
}
inline int frame::interpreter_frame_monitor_size_in_bytes() {
return frame::interpreter_frame_monitor_size() * wordSize;
}
// entry frames
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
// Since an entry frame always calls the interpreter first, the
// parameters are on the stack and relative to known register in the
// entry frame.
intptr_t* tos = (intptr_t*)get_entry_frame_locals()->arguments_tos_address;
return &tos[offset + 1]; // prepushed tos
}
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
return (JavaCallWrapper**)&get_entry_frame_locals()->call_wrapper_address;
}
inline oop frame::saved_oop_result(RegisterMap* map) const {
return *((oop*)map->location(R3->as_VMReg()));
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
*((oop*)map->location(R3->as_VMReg())) = obj;
}
#endif // CPU_PPC_VM_FRAME_PPC_INLINE_HPP
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
#define CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
// Size of PPC Instructions
const int BytesPerInstWord = 4;
const int StackAlignmentInBytes = 16;
#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -3614,7 +3614,7 @@ class CommandLineFlags {
NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \
"Address to allocate shared memory region for class data") \
\
diagnostic(bool, EnableInvokeDynamic, true PPC64_ONLY(&& false), \
diagnostic(bool, EnableInvokeDynamic, true, \
"support JSR 292 (method handles, invokedynamic, " \
"anonymous classes") \
\
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册