提交 cdb0b255 编写于 作者: N never

5108146: Merge i486 and amd64 cpu directories

6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up
Reviewed-by: kvn
上级 c8ac20b6
此差异已折叠。
...@@ -956,7 +956,8 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { ...@@ -956,7 +956,8 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
size->load_item(); size->load_item();
store_stack_parameter (size->result(), store_stack_parameter (size->result(),
in_ByteSize(STACK_BIAS + in_ByteSize(STACK_BIAS +
(i + frame::memory_parameter_word_sp_offset) * wordSize)); frame::memory_parameter_word_sp_offset * wordSize +
i * sizeof(jint)));
} }
// This instruction can be deoptimized in the slow path : use // This instruction can be deoptimized in the slow path : use
......
...@@ -204,3 +204,9 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) ...@@ -204,3 +204,9 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
NativeInstruction* ni = nativeInstruction_at(x); NativeInstruction* ni = nativeInstruction_at(x);
ni->set_long_at(0, u.l); ni->set_long_at(0, u.l);
} }
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
...@@ -465,9 +465,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt, ...@@ -465,9 +465,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_LONG: case T_LONG:
assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
#ifdef COMPILER2
#ifdef _LP64 #ifdef _LP64
// Can't be tiered (yet)
if (int_reg < int_reg_max) { if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set2(r->as_VMReg()); regs[i].set2(r->as_VMReg());
...@@ -476,11 +474,12 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt, ...@@ -476,11 +474,12 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#else #else
#ifdef COMPILER2
// For 32-bit build, can't pass longs in O-regs because they become // For 32-bit build, can't pass longs in O-regs because they become
// I-regs and get trashed. Use G-regs instead. G1 and G4 are almost // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
// spare and available. This convention isn't used by the Sparc ABI or // spare and available. This convention isn't used by the Sparc ABI or
// anywhere else. If we're tiered then we don't use G-regs because c1 // anywhere else. If we're tiered then we don't use G-regs because c1
// can't deal with them as a "pair". // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
// G0: zero // G0: zero
// G1: 1st Long arg // G1: 1st Long arg
// G2: global allocated to TLS // G2: global allocated to TLS
...@@ -500,7 +499,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt, ...@@ -500,7 +499,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#endif // _LP64
#else // COMPILER2 #else // COMPILER2
if (int_reg_pairs + 1 < int_reg_max) { if (int_reg_pairs + 1 < int_reg_max) {
if (is_outgoing) { if (is_outgoing) {
...@@ -514,6 +512,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt, ...@@ -514,6 +512,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#endif // COMPILER2 #endif // COMPILER2
#endif // _LP64
break; break;
case T_FLOAT: case T_FLOAT:
...@@ -699,17 +698,16 @@ Register AdapterGenerator::next_arg_slot(const int st_off){ ...@@ -699,17 +698,16 @@ Register AdapterGenerator::next_arg_slot(const int st_off){
// Stores long into offset pointed to by base // Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base, void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) { const int st_off, bool is_stack) {
#ifdef COMPILER2
#ifdef _LP64 #ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the // In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. // data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off)); __ stx(r, base, next_arg_slot(st_off));
#else #else
#ifdef COMPILER2
// Misaligned store of 64-bit data // Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits __ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r); __ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits __ stw(r, base, next_arg_slot(st_off)); // hi bits
#endif // _LP64
#else #else
if (is_stack) { if (is_stack) {
// Misaligned store of 64-bit data // Misaligned store of 64-bit data
...@@ -721,6 +719,7 @@ void AdapterGenerator::store_c2i_long(Register r, Register base, ...@@ -721,6 +719,7 @@ void AdapterGenerator::store_c2i_long(Register r, Register base,
__ stw(r , base, next_arg_slot(st_off)); // hi bits __ stw(r , base, next_arg_slot(st_off)); // hi bits
} }
#endif // COMPILER2 #endif // COMPILER2
#endif // _LP64
tag_c2i_arg(frame::TagCategory2, base, st_off, r); tag_c2i_arg(frame::TagCategory2, base, st_off, r);
} }
...@@ -1637,7 +1636,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { ...@@ -1637,7 +1636,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
} }
} else if (dst.is_single_phys_reg()) { } else if (dst.is_single_phys_reg()) {
if (src.is_adjacent_aligned_on_stack(2)) { if (src.is_adjacent_aligned_on_stack(2)) {
__ ld_long(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
} else { } else {
// dst is a single reg. // dst is a single reg.
// Remember lo is low address not msb for stack slots // Remember lo is low address not msb for stack slots
...@@ -1811,7 +1810,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, ...@@ -1811,7 +1810,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
VMRegPair *in_regs, VMRegPair *in_regs,
BasicType ret_type) { BasicType ret_type) {
// Native nmethod wrappers never take possesion of the oop arguments. // Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an // So the caller will gc the arguments. The only thing we need an
// oopMap for is if the call is static // oopMap for is if the call is static
......
/* /*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -22,12 +22,6 @@ ...@@ -22,12 +22,6 @@
* *
*/ */
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
}
inline void MacroAssembler::pd_patch_instruction(address branch, address target) { inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0]; unsigned char op = branch[0];
assert(op == 0xE8 /* call */ || assert(op == 0xE8 /* call */ ||
...@@ -69,18 +63,25 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) { ...@@ -69,18 +63,25 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) {
} }
#endif // ndef PRODUCT #endif // ndef PRODUCT
inline void MacroAssembler::movptr(Address dst, intptr_t src) { #ifndef _LP64
#ifdef _LP64 inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
Assembler::mov64(dst, src); inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; }
#else
Assembler::movl(dst, src);
#endif // _LP64
}
inline void MacroAssembler::movptr(Register dst, intptr_t src) { inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { return dst_enc << 3 | src_enc; }
#ifdef _LP64 inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
Assembler::mov64(dst, src);
inline void Assembler::prefix(Register reg) {}
inline void Assembler::prefix(Address adr) {}
inline void Assembler::prefixq(Address adr) {}
inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
#else #else
Assembler::movl(dst, src); inline void Assembler::emit_long64(jlong x) {
#endif // _LP64 *(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
} }
#endif // _LP64
此差异已折叠。
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
// short offset operators (jmp and jcc)
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
const char* s;
unsigned char op = branch[0];
if (op == 0xE8) {
s = "call";
} else if (op == 0xE9 || op == 0xEB) {
s = "jmp";
} else if ((op & 0xF0) == 0x70) {
s = "jcc";
} else if (op == 0x0F) {
s = "jcc";
} else {
s = "????";
}
tty->print("%s (unresolved)", s);
}
#endif // ndef PRODUCT
此差异已折叠。
...@@ -43,11 +43,12 @@ void ConversionStub::emit_code(LIR_Assembler* ce) { ...@@ -43,11 +43,12 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
__ comisd(input()->as_xmm_double_reg(), __ comisd(input()->as_xmm_double_reg(),
ExternalAddress((address)&double_zero)); ExternalAddress((address)&double_zero));
} else { } else {
__ pushl(rax); LP64_ONLY(ShouldNotReachHere());
__ push(rax);
__ ftst(); __ ftst();
__ fnstsw_ax(); __ fnstsw_ax();
__ sahf(); __ sahf();
__ popl(rax); __ pop(rax);
} }
Label NaN, do_return; Label NaN, do_return;
...@@ -61,7 +62,7 @@ void ConversionStub::emit_code(LIR_Assembler* ce) { ...@@ -61,7 +62,7 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
// input is NaN -> return 0 // input is NaN -> return 0
__ bind(NaN); __ bind(NaN);
__ xorl(result()->as_register(), result()->as_register()); __ xorptr(result()->as_register(), result()->as_register());
__ bind(do_return); __ bind(do_return);
__ jmp(_continuation); __ jmp(_continuation);
...@@ -139,7 +140,7 @@ NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKl ...@@ -139,7 +140,7 @@ NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKl
void NewInstanceStub::emit_code(LIR_Assembler* ce) { void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed"); assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry); __ bind(_entry);
__ movl(rdx, _klass_reg->as_register()); __ movptr(rdx, _klass_reg->as_register());
__ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
...@@ -306,10 +307,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { ...@@ -306,10 +307,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register"); assert(_obj != noreg, "must be a valid register");
Register tmp = rax; Register tmp = rax;
if (_obj == tmp) tmp = rbx; if (_obj == tmp) tmp = rbx;
__ pushl(tmp); __ push(tmp);
__ get_thread(tmp); __ get_thread(tmp);
__ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ popl(tmp); __ pop(tmp);
__ jcc(Assembler::notEqual, call_patch); __ jcc(Assembler::notEqual, call_patch);
// access_field patches may execute the patched code before it's // access_field patches may execute the patched code before it's
...@@ -434,7 +435,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { ...@@ -434,7 +435,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
VMReg r_1 = args[i].first(); VMReg r_1 = args[i].first();
if (r_1->is_stack()) { if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize; int st_off = r_1->reg2stack() * wordSize;
__ movl (Address(rsp, st_off), r[i]); __ movptr (Address(rsp, st_off), r[i]);
} else { } else {
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
} }
...@@ -449,7 +450,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { ...@@ -449,7 +450,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(info()); ce->add_call_info_here(info());
#ifndef PRODUCT #ifndef PRODUCT
__ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
#endif #endif
__ jmp(_continuation); __ jmp(_continuation);
......
...@@ -36,27 +36,34 @@ enum { ...@@ -36,27 +36,34 @@ enum {
// registers // registers
enum { enum {
pd_nof_cpu_regs_frame_map = 8, // number of registers used during code emission pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = 8, // number of registers used during code emission pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_xmm_regs_frame_map = 8, // number of registers used during code emission pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 6, // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = 8, // number of registers killed by calls
pd_nof_caller_save_xmm_regs_frame_map = 8, // number of registers killed by calls
pd_nof_cpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator #ifdef _LP64
#define UNALLOCATED 4 // rsp, rbp, r15, r10
#else
#define UNALLOCATED 2 // rsp, rbp
#endif // LP64
pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls
pd_nof_caller_save_xmm_regs_frame_map = pd_nof_xmm_regs_frame_map, // number of registers killed by calls
pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator
pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator
pd_nof_cpu_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_fpu_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_xmm_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
pd_first_cpu_reg = 0, pd_first_cpu_reg = 0,
pd_last_cpu_reg = 5, pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
pd_first_byte_reg = 2, pd_first_byte_reg = 2,
pd_last_byte_reg = 5, pd_last_byte_reg = 5,
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + 7, pd_last_fpu_reg = pd_first_fpu_reg + 7,
pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map, pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
pd_last_xmm_reg = pd_first_xmm_reg + 7 pd_last_xmm_reg = pd_first_xmm_reg + pd_nof_xmm_regs_frame_map - 1
}; };
......
...@@ -39,10 +39,15 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { ...@@ -39,10 +39,15 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type));
} else if (r_1->is_Register()) { } else if (r_1->is_Register()) {
Register reg = r_1->as_Register(); Register reg = r_1->as_Register();
if (r_2->is_Register()) { if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
Register reg2 = r_2->as_Register(); Register reg2 = r_2->as_Register();
#ifdef _LP64
assert(reg2 == reg, "must be same register");
opr = as_long_opr(reg);
#else
opr = as_long_opr(reg2, reg); opr = as_long_opr(reg2, reg);
} else if (type == T_OBJECT) { #endif // _LP64
} else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg); opr = as_oop_opr(reg);
} else { } else {
opr = as_opr(reg); opr = as_opr(reg);
...@@ -88,18 +93,39 @@ LIR_Opr FrameMap::rax_oop_opr; ...@@ -88,18 +93,39 @@ LIR_Opr FrameMap::rax_oop_opr;
LIR_Opr FrameMap::rdx_oop_opr; LIR_Opr FrameMap::rdx_oop_opr;
LIR_Opr FrameMap::rcx_oop_opr; LIR_Opr FrameMap::rcx_oop_opr;
LIR_Opr FrameMap::rax_rdx_long_opr; LIR_Opr FrameMap::long0_opr;
LIR_Opr FrameMap::rbx_rcx_long_opr; LIR_Opr FrameMap::long1_opr;
LIR_Opr FrameMap::fpu0_float_opr; LIR_Opr FrameMap::fpu0_float_opr;
LIR_Opr FrameMap::fpu0_double_opr; LIR_Opr FrameMap::fpu0_double_opr;
LIR_Opr FrameMap::xmm0_float_opr; LIR_Opr FrameMap::xmm0_float_opr;
LIR_Opr FrameMap::xmm0_double_opr; LIR_Opr FrameMap::xmm0_double_opr;
#ifdef _LP64
LIR_Opr FrameMap::r8_opr;
LIR_Opr FrameMap::r9_opr;
LIR_Opr FrameMap::r10_opr;
LIR_Opr FrameMap::r11_opr;
LIR_Opr FrameMap::r12_opr;
LIR_Opr FrameMap::r13_opr;
LIR_Opr FrameMap::r14_opr;
LIR_Opr FrameMap::r15_opr;
// r10 and r15 can never contain oops since they aren't available to
// the allocator
LIR_Opr FrameMap::r8_oop_opr;
LIR_Opr FrameMap::r9_oop_opr;
LIR_Opr FrameMap::r11_oop_opr;
LIR_Opr FrameMap::r12_oop_opr;
LIR_Opr FrameMap::r13_oop_opr;
LIR_Opr FrameMap::r14_oop_opr;
#endif // _LP64
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, };
XMMRegister FrameMap::_xmm_regs [8] = { 0, }; XMMRegister FrameMap::_xmm_regs [] = { 0, };
XMMRegister FrameMap::nr2xmmreg(int rnr) { XMMRegister FrameMap::nr2xmmreg(int rnr) {
assert(_init_done, "tables not initialized"); assert(_init_done, "tables not initialized");
...@@ -113,18 +139,39 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) { ...@@ -113,18 +139,39 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) {
void FrameMap::init() { void FrameMap::init() {
if (_init_done) return; if (_init_done) return;
assert(nof_cpu_regs == 8, "wrong number of CPU registers"); assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); rsi_oop_opr = LIR_OprFact::single_cpu_oop(0); map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); rdi_oop_opr = LIR_OprFact::single_cpu_oop(1); map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1);
map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); rbx_oop_opr = LIR_OprFact::single_cpu_oop(2); map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2);
map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); rax_oop_opr = LIR_OprFact::single_cpu_oop(3); map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3);
map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); rdx_oop_opr = LIR_OprFact::single_cpu_oop(4); map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4);
map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); rcx_oop_opr = LIR_OprFact::single_cpu_oop(5); map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5);
map_register(6, rsp); rsp_opr = LIR_OprFact::single_cpu(6);
map_register(7, rbp); rbp_opr = LIR_OprFact::single_cpu(7); #ifndef _LP64
// The unallocatable registers are at the end
rax_rdx_long_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); map_register(6, rsp);
rbx_rcx_long_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); map_register(7, rbp);
#else
map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
// The unallocatable registers are at the end
map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
map_register(14, rsp);
map_register(15, rbp);
#endif // _LP64
#ifdef _LP64
long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/);
long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/);
#else
long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
#endif // _LP64
fpu0_float_opr = LIR_OprFact::single_fpu(0); fpu0_float_opr = LIR_OprFact::single_fpu(0);
fpu0_double_opr = LIR_OprFact::double_fpu(0); fpu0_double_opr = LIR_OprFact::double_fpu(0);
xmm0_float_opr = LIR_OprFact::single_xmm(0); xmm0_float_opr = LIR_OprFact::single_xmm(0);
...@@ -137,6 +184,15 @@ void FrameMap::init() { ...@@ -137,6 +184,15 @@ void FrameMap::init() {
_caller_save_cpu_regs[4] = rdx_opr; _caller_save_cpu_regs[4] = rdx_opr;
_caller_save_cpu_regs[5] = rcx_opr; _caller_save_cpu_regs[5] = rcx_opr;
#ifdef _LP64
_caller_save_cpu_regs[6] = r8_opr;
_caller_save_cpu_regs[7] = r9_opr;
_caller_save_cpu_regs[8] = r11_opr;
_caller_save_cpu_regs[9] = r12_opr;
_caller_save_cpu_regs[10] = r13_opr;
_caller_save_cpu_regs[11] = r14_opr;
#endif // _LP64
_xmm_regs[0] = xmm0; _xmm_regs[0] = xmm0;
_xmm_regs[1] = xmm1; _xmm_regs[1] = xmm1;
...@@ -147,18 +203,51 @@ void FrameMap::init() { ...@@ -147,18 +203,51 @@ void FrameMap::init() {
_xmm_regs[6] = xmm6; _xmm_regs[6] = xmm6;
_xmm_regs[7] = xmm7; _xmm_regs[7] = xmm7;
#ifdef _LP64
_xmm_regs[8] = xmm8;
_xmm_regs[9] = xmm9;
_xmm_regs[10] = xmm10;
_xmm_regs[11] = xmm11;
_xmm_regs[12] = xmm12;
_xmm_regs[13] = xmm13;
_xmm_regs[14] = xmm14;
_xmm_regs[15] = xmm15;
#endif // _LP64
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
for (int i = 0; i < nof_caller_save_xmm_regs ; i++) {
_caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i); _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i);
} }
_init_done = true; _init_done = true;
rsi_oop_opr = as_oop_opr(rsi);
rdi_oop_opr = as_oop_opr(rdi);
rbx_oop_opr = as_oop_opr(rbx);
rax_oop_opr = as_oop_opr(rax);
rdx_oop_opr = as_oop_opr(rdx);
rcx_oop_opr = as_oop_opr(rcx);
rsp_opr = as_pointer_opr(rsp);
rbp_opr = as_pointer_opr(rbp);
#ifdef _LP64
r8_oop_opr = as_oop_opr(r8);
r9_oop_opr = as_oop_opr(r9);
r11_oop_opr = as_oop_opr(r11);
r12_oop_opr = as_oop_opr(r12);
r13_oop_opr = as_oop_opr(r13);
r14_oop_opr = as_oop_opr(r14);
#endif // _LP64
VMRegPair regs; VMRegPair regs;
BasicType sig_bt = T_OBJECT; BasicType sig_bt = T_OBJECT;
SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true); SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
receiver_opr = as_oop_opr(regs.first()->as_Register()); receiver_opr = as_oop_opr(regs.first()->as_Register());
assert(receiver_opr == rcx_oop_opr, "rcvr ought to be rcx");
} }
......
...@@ -38,8 +38,13 @@ ...@@ -38,8 +38,13 @@
nof_xmm_regs = pd_nof_xmm_regs_frame_map, nof_xmm_regs = pd_nof_xmm_regs_frame_map,
nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map, nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map,
first_available_sp_in_frame = 0, first_available_sp_in_frame = 0,
#ifndef _LP64
frame_pad_in_bytes = 8, frame_pad_in_bytes = 8,
nof_reg_args = 2 nof_reg_args = 2
#else
frame_pad_in_bytes = 16,
nof_reg_args = 6
#endif // _LP64
}; };
private: private:
...@@ -65,17 +70,49 @@ ...@@ -65,17 +70,49 @@
static LIR_Opr rax_oop_opr; static LIR_Opr rax_oop_opr;
static LIR_Opr rdx_oop_opr; static LIR_Opr rdx_oop_opr;
static LIR_Opr rcx_oop_opr; static LIR_Opr rcx_oop_opr;
#ifdef _LP64
static LIR_Opr rax_rdx_long_opr; static LIR_Opr r8_opr;
static LIR_Opr rbx_rcx_long_opr; static LIR_Opr r9_opr;
static LIR_Opr r10_opr;
static LIR_Opr r11_opr;
static LIR_Opr r12_opr;
static LIR_Opr r13_opr;
static LIR_Opr r14_opr;
static LIR_Opr r15_opr;
static LIR_Opr r8_oop_opr;
static LIR_Opr r9_oop_opr;
static LIR_Opr r11_oop_opr;
static LIR_Opr r12_oop_opr;
static LIR_Opr r13_oop_opr;
static LIR_Opr r14_oop_opr;
#endif // _LP64
static LIR_Opr long0_opr;
static LIR_Opr long1_opr;
static LIR_Opr fpu0_float_opr; static LIR_Opr fpu0_float_opr;
static LIR_Opr fpu0_double_opr; static LIR_Opr fpu0_double_opr;
static LIR_Opr xmm0_float_opr; static LIR_Opr xmm0_float_opr;
static LIR_Opr xmm0_double_opr; static LIR_Opr xmm0_double_opr;
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
#else
static LIR_Opr as_long_opr(Register r, Register r2) { static LIR_Opr as_long_opr(Register r, Register r2) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2)); return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2));
} }
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::single_cpu(cpu_reg2rnr(r));
}
#endif // _LP64
// VMReg name for spilled physical FPU stack slot n // VMReg name for spilled physical FPU stack slot n
static VMReg fpu_regname (int n); static VMReg fpu_regname (int n);
......
...@@ -36,13 +36,20 @@ ...@@ -36,13 +36,20 @@
address float_constant(float f); address float_constant(float f);
address double_constant(double d); address double_constant(double d);
bool is_literal_address(LIR_Address* addr);
// When we need to use something other than rscratch1 use this
// method.
Address as_Address(LIR_Address* addr, Register tmp);
public: public:
void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words); void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words); void store_parameter(jobject c, int offset_from_esp_in_words);
enum { call_stub_size = 15, enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28),
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
deopt_handler_size = 10 deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
}; };
...@@ -77,7 +77,7 @@ LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { ...@@ -77,7 +77,7 @@ LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
switch (type->tag()) { switch (type->tag()) {
case intTag: opr = FrameMap::rax_opr; break; case intTag: opr = FrameMap::rax_opr; break;
case objectTag: opr = FrameMap::rax_oop_opr; break; case objectTag: opr = FrameMap::rax_oop_opr; break;
case longTag: opr = FrameMap::rax_rdx_long_opr; break; case longTag: opr = FrameMap::long0_opr; break;
case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
...@@ -117,12 +117,14 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { ...@@ -117,12 +117,14 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
bool LIRGenerator::can_inline_as_constant(Value v) const { bool LIRGenerator::can_inline_as_constant(Value v) const {
if (v->type()->tag() == longTag) return false;
return v->type()->tag() != objectTag || return v->type()->tag() != objectTag ||
(v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
} }
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
if (c->type() == T_LONG) return false;
return c->type() != T_OBJECT || c->as_jobject() == NULL; return c->type() != T_OBJECT || c->as_jobject() == NULL;
} }
...@@ -155,6 +157,13 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o ...@@ -155,6 +157,13 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
addr = new LIR_Address(array_opr, addr = new LIR_Address(array_opr,
offset_in_bytes + index_opr->as_jint() * elem_size, type); offset_in_bytes + index_opr->as_jint() * elem_size, type);
} else { } else {
#ifdef _LP64
if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp;
}
#endif // _LP64
addr = new LIR_Address(array_opr, addr = new LIR_Address(array_opr,
index_opr, index_opr,
LIR_Address::scale(type), LIR_Address::scale(type),
...@@ -164,7 +173,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o ...@@ -164,7 +173,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// This store will need a precise card mark, so go ahead and // This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the // compute the full adddres instead of computing once for the
// store and again for the card mark. // store and again for the card mark.
LIR_Opr tmp = new_register(T_INT); LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp); __ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, 0, type); return new LIR_Address(tmp, 0, type);
} else { } else {
...@@ -174,9 +183,8 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o ...@@ -174,9 +183,8 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr temp = new_register(T_INT); LIR_Opr pointer = new_pointer_register();
LIR_Opr pointer = new_register(T_INT); __ move(LIR_OprFact::intptrConst(counter), pointer);
__ move(LIR_OprFact::intConst((int)counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT); LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
increment_counter(addr, step); increment_counter(addr, step);
} }
...@@ -481,7 +489,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { ...@@ -481,7 +489,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item(); left.load_item();
right.load_item(); right.load_item();
LIR_Opr reg = FrameMap::rax_rdx_long_opr; LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
LIR_Opr result = rlock_result(x); LIR_Opr result = rlock_result(x);
__ move(reg, result); __ move(reg, result);
...@@ -690,10 +698,10 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) { ...@@ -690,10 +698,10 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
cmp_value.load_item_force(FrameMap::rax_rdx_long_opr); cmp_value.load_item_force(FrameMap::long0_opr);
// new value must be in rcx,ebx (hi,lo) // new value must be in rcx,ebx (hi,lo)
new_value.load_item_force(FrameMap::rbx_rcx_long_opr); new_value.load_item_force(FrameMap::long1_opr);
// object pointer register is overwritten with field address // object pointer register is overwritten with field address
obj.load_item(); obj.load_item();
...@@ -720,7 +728,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { ...@@ -720,7 +728,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type"); assert(obj.type()->tag() == objectTag, "invalid type");
assert(offset.type()->tag() == intTag, "invalid type");
// In 64bit the type can be long, sparc doesn't have this assert
// assert(offset.type()->tag() == intTag, "invalid type");
assert(cmp.type()->tag() == type->tag(), "invalid type"); assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type"); assert(val.type()->tag() == type->tag(), "invalid type");
...@@ -735,8 +746,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { ...@@ -735,8 +746,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
cmp.load_item_force(FrameMap::rax_opr); cmp.load_item_force(FrameMap::rax_opr);
val.load_item(); val.load_item();
} else if (type == longType) { } else if (type == longType) {
cmp.load_item_force(FrameMap::rax_rdx_long_opr); cmp.load_item_force(FrameMap::long0_opr);
val.load_item_force(FrameMap::rbx_rcx_long_opr); val.load_item_force(FrameMap::long1_opr);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
...@@ -833,12 +844,33 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { ...@@ -833,12 +844,33 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
// operands for arraycopy must use fixed registers, otherwise // operands for arraycopy must use fixed registers, otherwise
// LinearScan will fail allocation (because arraycopy always needs a // LinearScan will fail allocation (because arraycopy always needs a
// call) // call)
#ifndef _LP64
src.load_item_force (FrameMap::rcx_oop_opr); src.load_item_force (FrameMap::rcx_oop_opr);
src_pos.load_item_force (FrameMap::rdx_opr); src_pos.load_item_force (FrameMap::rdx_opr);
dst.load_item_force (FrameMap::rax_oop_opr); dst.load_item_force (FrameMap::rax_oop_opr);
dst_pos.load_item_force (FrameMap::rbx_opr); dst_pos.load_item_force (FrameMap::rbx_opr);
length.load_item_force (FrameMap::rdi_opr); length.load_item_force (FrameMap::rdi_opr);
LIR_Opr tmp = (FrameMap::rsi_opr); LIR_Opr tmp = (FrameMap::rsi_opr);
#else
// The java calling convention will give us enough registers
// so that on the stub side the args will be perfect already.
// On the other slow/special case side we call C and the arg
// positions are not similar enough to pick one as the best.
// Also because the java calling convention is a "shifted" version
// of the C convention we can process the java args trivially into C
// args without worry of overwriting during the xfer
src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
length.load_item_force (FrameMap::as_opr(j_rarg4));
LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
#endif // LP64
set_no_result(x); set_no_result(x);
int flags; int flags;
...@@ -857,7 +889,7 @@ LIR_Opr fixed_register_for(BasicType type) { ...@@ -857,7 +889,7 @@ LIR_Opr fixed_register_for(BasicType type) {
case T_FLOAT: return FrameMap::fpu0_float_opr; case T_FLOAT: return FrameMap::fpu0_float_opr;
case T_DOUBLE: return FrameMap::fpu0_double_opr; case T_DOUBLE: return FrameMap::fpu0_double_opr;
case T_INT: return FrameMap::rax_opr; case T_INT: return FrameMap::rax_opr;
case T_LONG: return FrameMap::rax_rdx_long_opr; case T_LONG: return FrameMap::long0_opr;
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
} }
} }
...@@ -1161,9 +1193,13 @@ void LIRGenerator::do_If(If* x) { ...@@ -1161,9 +1193,13 @@ void LIRGenerator::do_If(If* x) {
LIR_Opr LIRGenerator::getThreadPointer() { LIR_Opr LIRGenerator::getThreadPointer() {
#ifdef _LP64
return FrameMap::as_pointer_opr(r15_thread);
#else
LIR_Opr result = new_register(T_INT); LIR_Opr result = new_register(T_INT);
__ get_thread(result); __ get_thread(result);
return result; return result;
#endif //
} }
void LIRGenerator::trace_block_entry(BlockBegin* block) { void LIRGenerator::trace_block_entry(BlockBegin* block) {
......
...@@ -23,18 +23,29 @@ ...@@ -23,18 +23,29 @@
*/ */
inline bool LinearScan::is_processed_reg_num(int reg_num) { inline bool LinearScan::is_processed_reg_num(int reg_num) {
#ifndef _LP64
// rsp and rbp (numbers 6 ancd 7) are ignored // rsp and rbp (numbers 6 ancd 7) are ignored
assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num"); assert(reg_num >= 0, "invalid reg_num");
return reg_num < 6 || reg_num > 7; return reg_num < 6 || reg_num > 7;
#else
// rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num");
return reg_num < 12 || reg_num > 15;
#endif // _LP64
} }
inline int LinearScan::num_physical_regs(BasicType type) { inline int LinearScan::num_physical_regs(BasicType type) {
// Intel requires two cpu registers for long, // Intel requires two cpu registers for long,
// but requires only one fpu register for double // but requires only one fpu register for double
if (type == T_LONG) { if (LP64_ONLY(false &&) type == T_LONG) {
return 2; return 2;
} }
return 1; return 1;
......
...@@ -26,18 +26,17 @@ ...@@ -26,18 +26,17 @@
#include "incls/_c1_MacroAssembler_x86.cpp.incl" #include "incls/_c1_MacroAssembler_x86.cpp.incl"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
const int aligned_mask = 3; const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes(); const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done; Label done;
int null_check_offset = -1; int null_check_offset = -1;
verify_oop(obj); verify_oop(obj);
// save object being locked into the BasicObjectLock // save object being locked into the BasicObjectLock
movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
if (UseBiasedLocking) { if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point"); assert(scratch != noreg, "should have scratch register at this point");
...@@ -47,16 +46,16 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr ...@@ -47,16 +46,16 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
} }
// Load object header // Load object header
movl(hdr, Address(obj, hdr_offset)); movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked // and mark it as unlocked
orl(hdr, markOopDesc::unlocked_value); orptr(hdr, markOopDesc::unlocked_value);
// save unlocked object header into the displaced header location on the stack // save unlocked object header into the displaced header location on the stack
movl(Address(disp_hdr, 0), hdr); movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the // test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the // displaced header address in the object header - if it is not the same, get the
// object header instead // object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
cmpxchg(disp_hdr, Address(obj, hdr_offset)); cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
// if the object header was the same, we're done // if the object header was the same, we're done
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::equal, cond_inc32(Assembler::equal,
...@@ -76,11 +75,11 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr ...@@ -76,11 +75,11 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// //
// assuming both the stack pointer and page_size have their least // assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2 // significant 2 bits cleared and page_size is a power of 2
subl(hdr, rsp); subptr(hdr, rsp);
andl(hdr, aligned_mask - os::vm_page_size()); andptr(hdr, aligned_mask - os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header // for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking) // location (NULL in the displaced hdr location indicates recursive locking)
movl(Address(disp_hdr, 0), hdr); movptr(Address(disp_hdr, 0), hdr);
// otherwise we don't care about the result and handle locking via runtime call // otherwise we don't care about the result and handle locking via runtime call
jcc(Assembler::notZero, slow_case); jcc(Assembler::notZero, slow_case);
// done // done
...@@ -90,35 +89,34 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr ...@@ -90,35 +89,34 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int aligned_mask = 3; const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes(); const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done; Label done;
if (UseBiasedLocking) { if (UseBiasedLocking) {
// load object // load object
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done); biased_locking_exit(obj, hdr, done);
} }
// load displaced header // load displaced header
movl(hdr, Address(disp_hdr, 0)); movptr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking // if the loaded hdr is NULL we had recursive locking
testl(hdr, hdr); testptr(hdr, hdr);
// if we had recursive locking, we are done // if we had recursive locking, we are done
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
if (!UseBiasedLocking) { if (!UseBiasedLocking) {
// load object // load object
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
} }
verify_oop(obj); verify_oop(obj);
// test if object header is pointing to the displaced header, and if so, restore // test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to // the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead // the displaced header, get the object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
cmpxchg(hdr, Address(obj, hdr_offset)); cmpxchgptr(hdr, Address(obj, hdr_offset));
// if the object header was not pointing to the displaced header, // if the object header was not pointing to the displaced header,
// we do unlocking via runtime call // we do unlocking via runtime call
jcc(Assembler::notEqual, slow_case); jcc(Assembler::notEqual, slow_case);
...@@ -141,13 +139,14 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register ...@@ -141,13 +139,14 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len); assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) { if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2); assert_different_registers(obj, klass, len, t1, t2);
movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else { } else {
movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype()); // This assumes that all prototype bits fit in an int32_t
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
} }
movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass); movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
if (len->is_valid()) { if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
} }
...@@ -160,25 +159,27 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int ...@@ -160,25 +159,27 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different"); assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord"); assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
Register index = len_in_bytes; Register index = len_in_bytes;
subl(index, hdr_size_in_bytes); // index is positive and ptr sized
subptr(index, hdr_size_in_bytes);
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// initialize topmost word, divide index by 2, check if odd and test if zero // initialize topmost word, divide index by 2, check if odd and test if zero
// note: for the remaining code to work, index must be a multiple of BytesPerWord // note: for the remaining code to work, index must be a multiple of BytesPerWord
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
testl(index, BytesPerWord - 1); testptr(index, BytesPerWord - 1);
jcc(Assembler::zero, L); jcc(Assembler::zero, L);
stop("index is not a multiple of BytesPerWord"); stop("index is not a multiple of BytesPerWord");
bind(L); bind(L);
} }
#endif #endif
xorl(t1, t1); // use _zero reg to clear memory (shorter code) xorptr(t1, t1); // use _zero reg to clear memory (shorter code)
if (UseIncDec) { if (UseIncDec) {
shrl(index, 3); // divide by 8 and set carry flag if bit 2 was set shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
} else { } else {
shrl(index, 2); // use 2 instructions to avoid partial flag stall shrptr(index, 2); // use 2 instructions to avoid partial flag stall
shrl(index, 1); shrptr(index, 1);
} }
#ifndef _LP64
// index could have been not a multiple of 8 (i.e., bit 2 was set) // index could have been not a multiple of 8 (i.e., bit 2 was set)
{ Label even; { Label even;
// note: if index was a multiple of 8, than it cannot // note: if index was a multiple of 8, than it cannot
...@@ -186,16 +187,17 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int ...@@ -186,16 +187,17 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
// => if it is even, we don't need to check for 0 again // => if it is even, we don't need to check for 0 again
jcc(Assembler::carryClear, even); jcc(Assembler::carryClear, even);
// clear topmost word (no jump needed if conditional assignment would work here) // clear topmost word (no jump needed if conditional assignment would work here)
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1); movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
// index could be 0 now, need to check again // index could be 0 now, need to check again
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
bind(even); bind(even);
} }
#endif // !_LP64
// initialize remaining object fields: rdx is a multiple of 2 now // initialize remaining object fields: rdx is a multiple of 2 now
{ Label loop; { Label loop;
bind(loop); bind(loop);
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1); movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1); NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
decrement(index); decrement(index);
jcc(Assembler::notZero, loop); jcc(Assembler::notZero, loop);
} }
...@@ -227,30 +229,30 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register ...@@ -227,30 +229,30 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
const Register index = t2; const Register index = t2;
const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
if (var_size_in_bytes != noreg) { if (var_size_in_bytes != noreg) {
movl(index, var_size_in_bytes); mov(index, var_size_in_bytes);
initialize_body(obj, index, hdr_size_in_bytes, t1_zero); initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
} else if (con_size_in_bytes <= threshold) { } else if (con_size_in_bytes <= threshold) {
// use explicit null stores // use explicit null stores
// code size = 2 + 3*n bytes (n = number of fields to clear) // code size = 2 + 3*n bytes (n = number of fields to clear)
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
movl(Address(obj, i), t1_zero); movptr(Address(obj, i), t1_zero);
} else if (con_size_in_bytes > hdr_size_in_bytes) { } else if (con_size_in_bytes > hdr_size_in_bytes) {
// use loop to null out the fields // use loop to null out the fields
// code size = 16 bytes for even n (n = number of fields to clear) // code size = 16 bytes for even n (n = number of fields to clear)
// initialize last object field first if odd number of fields // initialize last object field first if odd number of fields
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
// initialize last object field if constant size is odd // initialize last object field if constant size is odd
if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
// initialize remaining object fields: rdx is a multiple of 2 // initialize remaining object fields: rdx is a multiple of 2
{ Label loop; { Label loop;
bind(loop); bind(loop);
movl(Address(obj, index, Address::times_8, movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
hdr_size_in_bytes - (1*BytesPerWord)), t1_zero); t1_zero);
movl(Address(obj, index, Address::times_8, NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
hdr_size_in_bytes - (2*BytesPerWord)), t1_zero); t1_zero);)
decrement(index); decrement(index);
jcc(Assembler::notZero, loop); jcc(Assembler::notZero, loop);
} }
...@@ -269,17 +271,17 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, ...@@ -269,17 +271,17 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
assert_different_registers(obj, len, t1, t2, klass); assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask // determine alignment mask
assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work"); assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// check for negative or excessive length // check for negative or excessive length
cmpl(len, max_array_allocation_length); cmpptr(len, (int32_t)max_array_allocation_length);
jcc(Assembler::above, slow_case); jcc(Assembler::above, slow_case);
const Register arr_size = t2; // okay to be the same const Register arr_size = t2; // okay to be the same
// align object end // align object end
movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask); movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
leal(arr_size, Address(arr_size, len, f)); lea(arr_size, Address(arr_size, len, f));
andl(arr_size, ~MinObjAlignmentInBytesMask); andptr(arr_size, ~MinObjAlignmentInBytesMask);
try_allocate(obj, arr_size, 0, t1, t2, slow_case); try_allocate(obj, arr_size, 0, t1, t2, slow_case);
...@@ -305,12 +307,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { ...@@ -305,12 +307,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset(); int start_offset = offset();
cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
// if icache check fails, then jump to runtime routine // if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver! // Note: RECEIVER must still contain the receiver!
jump_cc(Assembler::notEqual, jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub())); RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
assert(offset() - start_offset == 9, "check alignment in emit_method_entry"); const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
} }
...@@ -364,7 +367,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) { ...@@ -364,7 +367,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
void C1_MacroAssembler::verify_not_null_oop(Register r) { void C1_MacroAssembler::verify_not_null_oop(Register r) {
if (!VerifyOops) return; if (!VerifyOops) return;
Label not_null; Label not_null;
testl(r, r); testptr(r, r);
jcc(Assembler::notZero, not_null); jcc(Assembler::notZero, not_null);
stop("non-null oop required"); stop("non-null oop required");
bind(not_null); bind(not_null);
...@@ -373,12 +376,12 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) { ...@@ -373,12 +376,12 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) {
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
#ifdef ASSERT #ifdef ASSERT
if (inv_rax) movl(rax, 0xDEAD); if (inv_rax) movptr(rax, 0xDEAD);
if (inv_rbx) movl(rbx, 0xDEAD); if (inv_rbx) movptr(rbx, 0xDEAD);
if (inv_rcx) movl(rcx, 0xDEAD); if (inv_rcx) movptr(rcx, 0xDEAD);
if (inv_rdx) movl(rdx, 0xDEAD); if (inv_rdx) movptr(rdx, 0xDEAD);
if (inv_rsi) movl(rsi, 0xDEAD); if (inv_rsi) movptr(rsi, 0xDEAD);
if (inv_rdi) movl(rdi, 0xDEAD); if (inv_rdi) movptr(rdi, 0xDEAD);
#endif #endif
} }
......
...@@ -94,16 +94,17 @@ ...@@ -94,16 +94,17 @@
// Note: NEVER push values directly, but only through following push_xxx functions; // Note: NEVER push values directly, but only through following push_xxx functions;
// This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset) // This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset)
void push_jint (jint i) { _rsp_offset++; pushl(i); } void push_jint (jint i) { _rsp_offset++; push(i); }
void push_oop (jobject o) { _rsp_offset++; pushoop(o); } void push_oop (jobject o) { _rsp_offset++; pushoop(o); }
void push_addr (Address a) { _rsp_offset++; pushl(a); } // Seems to always be in wordSize
void push_reg (Register r) { _rsp_offset++; pushl(r); } void push_addr (Address a) { _rsp_offset++; pushptr(a); }
void pop (Register r) { _rsp_offset--; popl (r); assert(_rsp_offset >= 0, "stack offset underflow"); } void push_reg (Register r) { _rsp_offset++; push(r); }
void pop_reg (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); }
void dec_stack (int nof_words) { void dec_stack (int nof_words) {
_rsp_offset -= nof_words; _rsp_offset -= nof_words;
assert(_rsp_offset >= 0, "stack offset underflow"); assert(_rsp_offset >= 0, "stack offset underflow");
addl(rsp, wordSize * nof_words); addptr(rsp, wordSize * nof_words);
} }
void dec_stack_after_call (int nof_words) { void dec_stack_after_call (int nof_words) {
......
...@@ -98,24 +98,24 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list, ...@@ -98,24 +98,24 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
// table. // table.
#ifdef WIN32 #ifdef WIN32
__ pushl(rcx); // save "this" __ push(rcx); // save "this"
#endif #endif
__ movl(rcx, rax); __ mov(rcx, rax);
__ shrl(rcx, 8); // isolate vtable identifier. __ shrptr(rcx, 8); // isolate vtable identifier.
__ shll(rcx, LogBytesPerWord); __ shlptr(rcx, LogBytesPerWord);
Address index(noreg, rcx, Address::times_1); Address index(noreg, rcx, Address::times_1);
ExternalAddress vtbl((address)vtbl_list); ExternalAddress vtbl((address)vtbl_list);
__ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address. __ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
#ifdef WIN32 #ifdef WIN32
__ popl(rcx); // restore "this" __ pop(rcx); // restore "this"
#else #else
__ movl(rcx, Address(rsp, 4)); // fetch "this" __ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
#endif #endif
__ movl(Address(rcx, 0), rdx); // update vtable pointer. __ movptr(Address(rcx, 0), rdx); // update vtable pointer.
__ andl(rax, 0x00ff); // isolate vtable method index __ andptr(rax, 0x00ff); // isolate vtable method index
__ shll(rax, LogBytesPerWord); __ shlptr(rax, LogBytesPerWord);
__ addl(rax, rdx); // address of real method pointer. __ addptr(rax, rdx); // address of real method pointer.
__ jmp(Address(rax, 0)); // get real method pointer. __ jmp(Address(rax, 0)); // get real method pointer.
__ flush(); __ flush();
......
...@@ -217,7 +217,8 @@ bool frame::safe_for_sender(JavaThread *thread) { ...@@ -217,7 +217,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
void frame::patch_pc(Thread* thread, address pc) { void frame::patch_pc(Thread* thread, address pc) {
if (TracePcPatching) { if (TracePcPatching) {
tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)sp())[-1], ((address *)sp())[-1], pc); tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ",
&((address *)sp())[-1], ((address *)sp())[-1], pc);
} }
((address *)sp())[-1] = pc; ((address *)sp())[-1] = pc;
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
......
...@@ -159,7 +159,7 @@ inline intptr_t** frame::interpreter_frame_locals_addr() const { ...@@ -159,7 +159,7 @@ inline intptr_t** frame::interpreter_frame_locals_addr() const {
inline intptr_t* frame::interpreter_frame_bcx_addr() const { inline intptr_t* frame::interpreter_frame_bcx_addr() const {
assert(is_interpreted_frame(), "must be interpreted"); assert(is_interpreted_frame(), "must be interpreted");
return (jint*) &(get_interpreterState()->_bcp); return (intptr_t*) &(get_interpreterState()->_bcp);
} }
...@@ -179,7 +179,7 @@ inline methodOop* frame::interpreter_frame_method_addr() const { ...@@ -179,7 +179,7 @@ inline methodOop* frame::interpreter_frame_method_addr() const {
inline intptr_t* frame::interpreter_frame_mdx_addr() const { inline intptr_t* frame::interpreter_frame_mdx_addr() const {
assert(is_interpreted_frame(), "must be interpreted"); assert(is_interpreted_frame(), "must be interpreted");
return (jint*) &(get_interpreterState()->_mdx); return (intptr_t*) &(get_interpreterState()->_mdx);
} }
// top of expression stack // top of expression stack
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册