提交 d063f06d 编写于 作者: R roland

6769124: various 64-bit fixes for c1

Reviewed-by: never
上级 59e1c413
...@@ -189,14 +189,17 @@ void LIR_Assembler::osr_entry() { ...@@ -189,14 +189,17 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_register(); Register OSR_buf = osrBufferPointer()->as_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() + int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); (2 * BytesPerWord) * (number_of_locks - 1);
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) { for (int i = 0; i < number_of_locks; i++) {
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord); int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT #ifdef ASSERT
// verify the interpreter's monitor has a non-null object // verify the interpreter's monitor has a non-null object
{ {
Label L; Label L;
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ cmp(G0, O7); __ cmp(G0, O7);
__ br(Assembler::notEqual, false, Assembler::pt, L); __ br(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
...@@ -205,9 +208,9 @@ void LIR_Assembler::osr_entry() { ...@@ -205,9 +208,9 @@ void LIR_Assembler::osr_entry() {
} }
#endif // ASSERT #endif // ASSERT
// Copy the lock field into the compiled activation. // Copy the lock field into the compiled activation.
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7); __ ld_ptr(OSR_buf, slot_offset + 0, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_object(i)); __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
} }
} }
...@@ -953,9 +956,11 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ ...@@ -953,9 +956,11 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
} else { } else {
#ifdef _LP64 #ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this"); assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else #else
if (base == to_reg->as_register_lo()) { if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
...@@ -976,8 +981,8 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ ...@@ -976,8 +981,8 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
FloatRegister reg = to_reg->as_double_reg(); FloatRegister reg = to_reg->as_double_reg();
// split unaligned loads // split unaligned loads
if (unaligned || PatchALot) { if (unaligned || PatchALot) {
__ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor()); __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
__ ldf(FloatRegisterImpl::S, base, offset, reg); __ ldf(FloatRegisterImpl::S, base, offset, reg);
} else { } else {
__ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
} }
...@@ -2200,6 +2205,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ...@@ -2200,6 +2205,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register len = O2; Register len = O2;
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
if (shift == 0) { if (shift == 0) {
__ add(src_ptr, src_pos, src_ptr); __ add(src_ptr, src_pos, src_ptr);
} else { } else {
...@@ -2208,6 +2214,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ...@@ -2208,6 +2214,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} }
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
if (shift == 0) { if (shift == 0) {
__ add(dst_ptr, dst_pos, dst_ptr); __ add(dst_ptr, dst_pos, dst_ptr);
} else { } else {
......
...@@ -144,17 +144,17 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, ...@@ -144,17 +144,17 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
if (index->is_register()) { if (index->is_register()) {
// apply the shift and accumulate the displacement // apply the shift and accumulate the displacement
if (shift > 0) { if (shift > 0) {
LIR_Opr tmp = new_register(T_INT); LIR_Opr tmp = new_pointer_register();
__ shift_left(index, shift, tmp); __ shift_left(index, shift, tmp);
index = tmp; index = tmp;
} }
if (disp != 0) { if (disp != 0) {
LIR_Opr tmp = new_register(T_INT); LIR_Opr tmp = new_pointer_register();
if (Assembler::is_simm13(disp)) { if (Assembler::is_simm13(disp)) {
__ add(tmp, LIR_OprFact::intConst(disp), tmp); __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
index = tmp; index = tmp;
} else { } else {
__ move(LIR_OprFact::intConst(disp), tmp); __ move(LIR_OprFact::intptrConst(disp), tmp);
__ add(tmp, index, tmp); __ add(tmp, index, tmp);
index = tmp; index = tmp;
} }
...@@ -162,8 +162,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, ...@@ -162,8 +162,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
} }
} else if (disp != 0 && !Assembler::is_simm13(disp)) { } else if (disp != 0 && !Assembler::is_simm13(disp)) {
// index is illegal so replace it with the displacement loaded into a register // index is illegal so replace it with the displacement loaded into a register
index = new_register(T_INT); index = new_pointer_register();
__ move(LIR_OprFact::intConst(disp), index); __ move(LIR_OprFact::intptrConst(disp), index);
disp = 0; disp = 0;
} }
......
...@@ -2251,6 +2251,7 @@ void Assembler::popf() { ...@@ -2251,6 +2251,7 @@ void Assembler::popf() {
emit_byte(0x9D); emit_byte(0x9D);
} }
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::popl(Address dst) { void Assembler::popl(Address dst) {
// NOTE: this will adjust stack by 8byte on 64bits // NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im(this); InstructionMark im(this);
...@@ -2258,6 +2259,7 @@ void Assembler::popl(Address dst) { ...@@ -2258,6 +2259,7 @@ void Assembler::popl(Address dst) {
emit_byte(0x8F); emit_byte(0x8F);
emit_operand(rax, dst); emit_operand(rax, dst);
} }
#endif
void Assembler::prefetch_prefix(Address src) { void Assembler::prefetch_prefix(Address src) {
prefix(src); prefix(src);
...@@ -2428,6 +2430,7 @@ void Assembler::pushf() { ...@@ -2428,6 +2430,7 @@ void Assembler::pushf() {
emit_byte(0x9C); emit_byte(0x9C);
} }
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::pushl(Address src) { void Assembler::pushl(Address src) {
// Note this will push 64bit on 64bit // Note this will push 64bit on 64bit
InstructionMark im(this); InstructionMark im(this);
...@@ -2435,6 +2438,7 @@ void Assembler::pushl(Address src) { ...@@ -2435,6 +2438,7 @@ void Assembler::pushl(Address src) {
emit_byte(0xFF); emit_byte(0xFF);
emit_operand(rsi, src); emit_operand(rsi, src);
} }
#endif
void Assembler::pxor(XMMRegister dst, Address src) { void Assembler::pxor(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), "")); NOT_LP64(assert(VM_Version::supports_sse2(), ""));
...@@ -5591,7 +5595,12 @@ void MacroAssembler::align(int modulus) { ...@@ -5591,7 +5595,12 @@ void MacroAssembler::align(int modulus) {
} }
void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
andpd(dst, as_Address(src)); if (reachable(src)) {
andpd(dst, as_Address(src));
} else {
lea(rscratch1, src);
andpd(dst, Address(rscratch1, 0));
}
} }
void MacroAssembler::andptr(Register dst, int32_t imm32) { void MacroAssembler::andptr(Register dst, int32_t imm32) {
...@@ -6078,11 +6087,21 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) { ...@@ -6078,11 +6087,21 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
} }
void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
comisd(dst, as_Address(src)); if (reachable(src)) {
comisd(dst, as_Address(src));
} else {
lea(rscratch1, src);
comisd(dst, Address(rscratch1, 0));
}
} }
void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
comiss(dst, as_Address(src)); if (reachable(src)) {
comiss(dst, as_Address(src));
} else {
lea(rscratch1, src);
comiss(dst, Address(rscratch1, 0));
}
} }
......
...@@ -1244,7 +1244,9 @@ private: ...@@ -1244,7 +1244,9 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8); void pcmpestri(XMMRegister xmm1, Address src, int imm8);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst); void popl(Address dst);
#endif
#ifdef _LP64 #ifdef _LP64
void popq(Address dst); void popq(Address dst);
...@@ -1285,7 +1287,9 @@ private: ...@@ -1285,7 +1287,9 @@ private:
// Interleave Low Bytes // Interleave Low Bytes
void punpcklbw(XMMRegister dst, XMMRegister src); void punpcklbw(XMMRegister dst, XMMRegister src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src); void pushl(Address src);
#endif
void pushq(Address src); void pushq(Address src);
......
...@@ -301,22 +301,25 @@ void LIR_Assembler::osr_entry() { ...@@ -301,22 +301,25 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_pointer_register(); Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() + int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); (2 * BytesPerWord) * (number_of_locks - 1);
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) { for (int i = 0; i < number_of_locks; i++) {
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord); int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT #ifdef ASSERT
// verify the interpreter's monitor has a non-null object // verify the interpreter's monitor has a non-null object
{ {
Label L; Label L;
__ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("locked object is NULL"); __ stop("locked object is NULL");
__ bind(L); __ bind(L);
} }
#endif #endif
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes())); __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
__ movptr(frame_map()->address_for_monitor_lock(i), rbx); __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes())); __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ movptr(frame_map()->address_for_monitor_object(i), rbx); __ movptr(frame_map()->address_for_monitor_object(i), rbx);
} }
} }
...@@ -785,7 +788,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi ...@@ -785,7 +788,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere(); ShouldNotReachHere();
__ movoop(as_Address(addr, noreg), c->as_jobject()); __ movoop(as_Address(addr, noreg), c->as_jobject());
} else { } else {
#ifdef _LP64
__ movoop(rscratch1, c->as_jobject());
null_check_here = code_offset();
__ movptr(as_Address_lo(addr), rscratch1);
#else
__ movoop(as_Address(addr), c->as_jobject()); __ movoop(as_Address(addr), c->as_jobject());
#endif
} }
} }
break; break;
...@@ -1118,8 +1127,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { ...@@ -1118,8 +1127,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
} else { } else {
#ifndef _LP64
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popl (frame_map()->address_for_slot(dest->single_stack_ix())); __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
#else
//no pushl on 64bits
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
#endif
} }
} else if (src->is_double_stack()) { } else if (src->is_double_stack()) {
...@@ -3136,8 +3151,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ...@@ -3136,8 +3151,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _LP64 #ifdef _LP64
assert_different_registers(c_rarg0, dst, dst_pos, length); assert_different_registers(c_rarg0, dst, dst_pos, length);
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg1, length); assert_different_registers(c_rarg1, length);
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ mov(c_rarg2, length); __ mov(c_rarg2, length);
......
...@@ -755,8 +755,19 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { ...@@ -755,8 +755,19 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
} }
LIR_Opr addr = new_pointer_register(); LIR_Opr addr = new_pointer_register();
__ move(obj.result(), addr); LIR_Address* a;
__ add(addr, offset.result(), addr); if(offset.result()->is_constant()) {
a = new LIR_Address(obj.result(),
NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
as_BasicType(type));
} else {
a = new LIR_Address(obj.result(),
offset.result(),
LIR_Address::times_1,
0,
as_BasicType(type));
}
__ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields. if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any. // Do the pre-write barrier, if any.
......
...@@ -255,6 +255,8 @@ void VM_Version::get_processor_features() { ...@@ -255,6 +255,8 @@ void VM_Version::get_processor_features() {
if (!VM_Version::supports_sse2()) { if (!VM_Version::supports_sse2()) {
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
} }
// in 64 bit the use of SSE2 is the minimum
if (UseSSE < 2) UseSSE = 2;
#endif #endif
// If the OS doesn't support SSE, we can't use this feature even if the HW does // If the OS doesn't support SSE, we can't use this feature even if the HW does
......
...@@ -365,7 +365,7 @@ void BlockListBuilder::make_loop_header(BlockBegin* block) { ...@@ -365,7 +365,7 @@ void BlockListBuilder::make_loop_header(BlockBegin* block) {
if (_next_loop_index < 31) _next_loop_index++; if (_next_loop_index < 31) _next_loop_index++;
} else { } else {
// block already marked as loop header // block already marked as loop header
assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set"); assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
} }
} }
......
...@@ -1855,12 +1855,26 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { ...@@ -1855,12 +1855,26 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
} else { } else {
#ifdef X86 #ifdef X86
#ifdef _LP64
if (!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#else #else
if (index_op->is_illegal() || log2_scale == 0) { if (index_op->is_illegal() || log2_scale == 0) {
#ifdef _LP64
if (!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
addr = new LIR_Address(base_op, index_op, dst_type); addr = new LIR_Address(base_op, index_op, dst_type);
} else { } else {
LIR_Opr tmp = new_register(T_INT); LIR_Opr tmp = new_pointer_register();
__ shift_left(index_op, log2_scale, tmp); __ shift_left(index_op, log2_scale, tmp);
addr = new LIR_Address(base_op, tmp, dst_type); addr = new LIR_Address(base_op, tmp, dst_type);
} }
...@@ -1915,10 +1929,25 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { ...@@ -1915,10 +1929,25 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr index_op = idx.result(); LIR_Opr index_op = idx.result();
if (log2_scale != 0) { if (log2_scale != 0) {
// temporary fix (platform dependent code without shift on Intel would be better) // temporary fix (platform dependent code without shift on Intel would be better)
index_op = new_register(T_INT); index_op = new_pointer_register();
__ move(idx.result(), index_op); #ifdef _LP64
if(idx.result()->type() == T_INT) {
__ convert(Bytecodes::_i2l, idx.result(), index_op);
} else {
#endif
__ move(idx.result(), index_op);
#ifdef _LP64
}
#endif
__ shift_left(index_op, log2_scale, index_op); __ shift_left(index_op, log2_scale, index_op);
} }
#ifdef _LP64
else if(!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
__ move(value.result(), addr); __ move(value.result(), addr);
......
...@@ -2464,6 +2464,10 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope ...@@ -2464,6 +2464,10 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
case T_LONG: // fall through case T_LONG: // fall through
case T_DOUBLE: { case T_DOUBLE: {
#ifdef _LP64
scope_values->append(&_int_0_scope_value);
scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
#else
if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) { if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
...@@ -2471,7 +2475,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope ...@@ -2471,7 +2475,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
} }
#endif
return 2; return 2;
} }
...@@ -2503,17 +2507,18 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV ...@@ -2503,17 +2507,18 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
} else if (opr->is_single_cpu()) { } else if (opr->is_single_cpu()) {
bool is_oop = opr->is_oop_register(); bool is_oop = opr->is_oop_register();
int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0); int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
ScopeValue* sv = _scope_value_cache.at(cache_idx); ScopeValue* sv = _scope_value_cache.at(cache_idx);
if (sv == NULL) { if (sv == NULL) {
Location::Type loc_type = is_oop ? Location::oop : Location::normal; Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
VMReg rname = frame_map()->regname(opr); VMReg rname = frame_map()->regname(opr);
sv = new LocationValue(Location::new_reg_loc(loc_type, rname)); sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
_scope_value_cache.at_put(cache_idx, sv); _scope_value_cache.at_put(cache_idx, sv);
} }
// check if cached value is correct // check if cached value is correct
DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr))))); DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
scope_values->append(sv); scope_values->append(sv);
return 1; return 1;
......
...@@ -1234,9 +1234,11 @@ void Arguments::set_ergonomics_flags() { ...@@ -1234,9 +1234,11 @@ void Arguments::set_ergonomics_flags() {
// Check that UseCompressedOops can be set with the max heap size allocated // Check that UseCompressedOops can be set with the max heap size allocated
// by ergonomics. // by ergonomics.
if (MaxHeapSize <= max_heap_for_compressed_oops()) { if (MaxHeapSize <= max_heap_for_compressed_oops()) {
#ifndef COMPILER1
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) { if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
FLAG_SET_ERGO(bool, UseCompressedOops, true); FLAG_SET_ERGO(bool, UseCompressedOops, true);
} }
#endif
#ifdef _WIN64 #ifdef _WIN64
if (UseLargePages && UseCompressedOops) { if (UseLargePages && UseCompressedOops) {
// Cannot allocate guard pages for implicit checks in indexed addressing // Cannot allocate guard pages for implicit checks in indexed addressing
...@@ -2675,6 +2677,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) { ...@@ -2675,6 +2677,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} }
} }
#if defined(_LP64) && defined(COMPILER1)
UseCompressedOops = false;
#endif
#ifdef SERIALGC #ifdef SERIALGC
set_serial_gc_flags(); set_serial_gc_flags();
#endif // SERIALGC #endif // SERIALGC
......
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6769124
* @summary arraycopy may crash the VM with c1 on 64 bit
*/
public class TestArrayCopy6769124 {
public static void main(String[] args) {
int k = 1 << 31;
for(int j = 0; j <1000000; j++) {
int i = -1;
while(i < 10) {
i++;
}
int m = k * i;
int[] O1 = new int[20];
int[] O2 = new int[20];
System.arraycopy(O1, i, O2, i, 1); //will crash on amd64
System.arraycopy(O1, m, O2, m, 1); //will crash on sparcv9
}
}
}
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6769124
* @summary int value might not be correctly decoded on deopt with c1 on 64 bit
*
* @run main/othervm -Xcomp -XX:CompileOnly=TestDeoptInt6769124.m TestDeoptInt6769124
*/
public class TestDeoptInt6769124 {
static class A {
volatile int vl;
A(int v) {
vl = v;
}
}
static void m(int b) {
A a = new A(10);
int c;
c = b + a.vl; //accessing volatile field of class not loaded at compile time forces a deopt
if(c != 20) {
System.out.println("a (= " + a.vl + ") + b (= " + b + ") = c (= " + c + ") != 20");
throw new InternalError();
}
}
public static void main(String[] args) {
m(10);
}
}
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6769124
* @summary unaligned load may fail with c1 on 64 bit
*/
public class TestUnalignedLoad6769124 {
static long l1v = 0x200000003L;
static long l2v = 0x400000005L;
static double d1v = Double.MAX_VALUE;
static double d2v = Double.MIN_VALUE;
public static void main(String[] args) {
long l1 = l1v;
double d1 = d1v;
long l2 = l2v;
double d2 = d2v;
// Run long enough to induce an OSR
for (int i = 0; i < 10000000; i++) {
}
boolean error = false;
if (l1 != l1v) {
System.out.println(l1 + " != " + l1v);
error = true;
}
if (l2 != l2v) {
System.out.println(l2 + " != " + l2v);
error = true;
}
if (d1 != d1v) {
System.out.println(d1 + " != " + d1v);
error = true;
}
if (d2 != d2v) {
System.out.println(d2 + " != " + d2v);
error = true;
}
if (error) {
throw new InternalError();
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册