提交 7fef4161 编写于 作者: I iveresov

6964776: c2 should ensure the polling page is reachable on 64 bit

Summary: Materialize the pointer to the polling page in a register instead of using rip-relative addressing when the distance from the code cache is larger than disp32.
Reviewed-by: never, kvn
上级 fdf76f6c
...@@ -3510,7 +3510,6 @@ bool Assembler::reachable(AddressLiteral adr) { ...@@ -3510,7 +3510,6 @@ bool Assembler::reachable(AddressLiteral adr) {
// anywhere in the codeCache then we are always reachable. // anywhere in the codeCache then we are always reachable.
// This would have to change if we ever save/restore shared code // This would have to change if we ever save/restore shared code
// to be more pessimistic. // to be more pessimistic.
disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
if (!is_simm32(disp)) return false; if (!is_simm32(disp)) return false;
disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
...@@ -3534,6 +3533,14 @@ bool Assembler::reachable(AddressLiteral adr) { ...@@ -3534,6 +3533,14 @@ bool Assembler::reachable(AddressLiteral adr) {
return is_simm32(disp); return is_simm32(disp);
} }
// Check if the polling page is not reachable from the code cache using rip-relative
// addressing.
bool Assembler::is_polling_page_far() {
intptr_t addr = (intptr_t)os::get_polling_page();
return !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
!is_simm32(addr - (intptr_t)CodeCache::high_bound());
}
void Assembler::emit_data64(jlong data, void Assembler::emit_data64(jlong data,
relocInfo::relocType rtype, relocInfo::relocType rtype,
int format) { int format) {
...@@ -6886,6 +6893,11 @@ void MacroAssembler::sign_extend_short(Register reg) { ...@@ -6886,6 +6893,11 @@ void MacroAssembler::sign_extend_short(Register reg) {
} }
} }
void MacroAssembler::testl(Register dst, AddressLiteral src) {
assert(reachable(src), "Address should be reachable");
testl(dst, as_Address(src));
}
////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC #ifndef SERIALGC
...@@ -7121,17 +7133,6 @@ void MacroAssembler::subptr(Register dst, Register src) { ...@@ -7121,17 +7133,6 @@ void MacroAssembler::subptr(Register dst, Register src) {
LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
} }
void MacroAssembler::test32(Register src1, AddressLiteral src2) {
// src2 must be rval
if (reachable(src2)) {
testl(src1, as_Address(src2));
} else {
lea(rscratch1, src2);
testl(src1, Address(rscratch1, 0));
}
}
// C++ bool manipulation // C++ bool manipulation
void MacroAssembler::testbool(Register dst) { void MacroAssembler::testbool(Register dst) {
if(sizeof(bool) == 1) if(sizeof(bool) == 1)
......
...@@ -580,7 +580,6 @@ private: ...@@ -580,7 +580,6 @@ private:
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
// These are all easily abused and hence protected // These are all easily abused and hence protected
...@@ -683,6 +682,8 @@ private: ...@@ -683,6 +682,8 @@ private:
static bool is_simm32(int32_t x) { return true; } static bool is_simm32(int32_t x) { return true; }
#endif // _LP64 #endif // _LP64
static bool is_polling_page_far() NOT_LP64({ return false;});
// Generic instructions // Generic instructions
// Does 32bit or 64bit as needed for the platform. In some sense these // Does 32bit or 64bit as needed for the platform. In some sense these
// belong in macro assembler but there is no need for both varieties to exist // belong in macro assembler but there is no need for both varieties to exist
...@@ -2094,7 +2095,10 @@ class MacroAssembler: public Assembler { ...@@ -2094,7 +2095,10 @@ class MacroAssembler: public Assembler {
void leal32(Register dst, Address src) { leal(dst, src); } void leal32(Register dst, Address src) { leal(dst, src); }
void test32(Register src1, AddressLiteral src2); // Import other testl() methods from the parent class or else
// they will be hidden by the following overriding declaration.
using Assembler::testl;
void testl(Register dst, AddressLiteral src);
void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
......
...@@ -648,12 +648,13 @@ void LIR_Assembler::return_op(LIR_Opr result) { ...@@ -648,12 +648,13 @@ void LIR_Assembler::return_op(LIR_Opr result) {
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
relocInfo::poll_return_type); relocInfo::poll_return_type);
// NOTE: the requires that the polling page be reachable else the reloc if (Assembler::is_polling_page_far()) {
// goes to the movq that loads the address and not the faulting instruction __ lea(rscratch1, polling_page);
// which breaks the signal handler code __ relocate(relocInfo::poll_return_type);
__ testl(rax, Address(rscratch1, 0));
__ test32(rax, polling_page); } else {
__ testl(rax, polling_page);
}
__ ret(0); __ ret(0);
} }
...@@ -661,20 +662,17 @@ void LIR_Assembler::return_op(LIR_Opr result) { ...@@ -661,20 +662,17 @@ void LIR_Assembler::return_op(LIR_Opr result) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
relocInfo::poll_type); relocInfo::poll_type);
guarantee(info != NULL, "Shouldn't be NULL");
if (info != NULL) { int offset = __ offset();
if (Assembler::is_polling_page_far()) {
__ lea(rscratch1, polling_page);
offset = __ offset();
add_debug_info_for_branch(info); add_debug_info_for_branch(info);
__ testl(rax, Address(rscratch1, 0));
} else { } else {
ShouldNotReachHere(); add_debug_info_for_branch(info);
__ testl(rax, polling_page);
} }
int offset = __ offset();
// NOTE: the requires that the polling page be reachable else the reloc
// goes to the movq that loads the address and not the faulting instruction
// which breaks the signal handler code
__ test32(rax, polling_page);
return offset; return offset;
} }
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -519,7 +519,11 @@ class NativeReturnX: public NativeInstruction { ...@@ -519,7 +519,11 @@ class NativeReturnX: public NativeInstruction {
class NativeTstRegMem: public NativeInstruction { class NativeTstRegMem: public NativeInstruction {
public: public:
enum Intel_specific_constants { enum Intel_specific_constants {
instruction_code_memXregl = 0x85 instruction_rex_prefix_mask = 0xF0,
instruction_rex_prefix = Assembler::REX,
instruction_code_memXregl = 0x85,
modrm_mask = 0x38, // select reg from the ModRM byte
modrm_reg = 0x00 // rax
}; };
}; };
...@@ -533,12 +537,25 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) = ...@@ -533,12 +537,25 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) =
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() { inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64 #ifdef AMD64
if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && if (Assembler::is_polling_page_far()) {
ubyte_at(1) == 0x05 ) { // 00 rax 101 // two cases, depending on the choice of the base register in the address.
address fault = addr_at(6) + int_at(2); if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
return os::is_poll_address(fault); ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
(ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
(ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) {
return true;
} else {
return false;
}
} else { } else {
return false; if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
ubyte_at(1) == 0x05) { // 00 rax 101
address fault = addr_at(6) + int_at(2);
return os::is_poll_address(fault);
} else {
return false;
}
} }
#else #else
return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg || return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
......
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -198,41 +198,44 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) ...@@ -198,41 +198,44 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64 #ifdef _LP64
typedef Assembler::WhichOperand WhichOperand; if (!Assembler::is_polling_page_far()) {
WhichOperand which = (WhichOperand) format(); typedef Assembler::WhichOperand WhichOperand;
// This format is imm but it is really disp32 WhichOperand which = (WhichOperand) format();
which = Assembler::disp32_operand; // This format is imm but it is really disp32
address orig_addr = old_addr_for(addr(), src, dest); which = Assembler::disp32_operand;
NativeInstruction* oni = nativeInstruction_at(orig_addr); address orig_addr = old_addr_for(addr(), src, dest);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); NativeInstruction* oni = nativeInstruction_at(orig_addr);
// This poll_addr is incorrect by the size of the instruction it is irrelevant int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
intptr_t poll_addr = (intptr_t)oni + *orig_disp; // This poll_addr is incorrect by the size of the instruction it is irrelevant
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni; NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp; int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp;
}
#endif // _LP64 #endif // _LP64
} }
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64 #ifdef _LP64
typedef Assembler::WhichOperand WhichOperand; if (!Assembler::is_polling_page_far()) {
WhichOperand which = (WhichOperand) format(); typedef Assembler::WhichOperand WhichOperand;
// This format is imm but it is really disp32 WhichOperand which = (WhichOperand) format();
which = Assembler::disp32_operand; // This format is imm but it is really disp32
address orig_addr = old_addr_for(addr(), src, dest); which = Assembler::disp32_operand;
NativeInstruction* oni = nativeInstruction_at(orig_addr); address orig_addr = old_addr_for(addr(), src, dest);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); NativeInstruction* oni = nativeInstruction_at(orig_addr);
// This poll_addr is incorrect by the size of the instruction it is irrelevant int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
intptr_t poll_addr = (intptr_t)oni + *orig_disp; // This poll_addr is incorrect by the size of the instruction it is irrelevant
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni; NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp; int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp;
}
#endif // _LP64 #endif // _LP64
} }
...@@ -574,12 +574,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() ...@@ -574,12 +574,11 @@ int MachCallDynamicJavaNode::ret_addr_offset()
// In os_cpu .ad file // In os_cpu .ad file
// int MachCallRuntimeNode::ret_addr_offset() // int MachCallRuntimeNode::ret_addr_offset()
// Indicate if the safepoint node needs the polling page as an input. // Indicate if the safepoint node needs the polling page as an input,
// Since amd64 does not have absolute addressing but RIP-relative // it does if the polling page is more than disp32 away.
// addressing and the polling page is within 2G, it doesn't.
bool SafePointNode::needs_polling_address_input() bool SafePointNode::needs_polling_address_input()
{ {
return false; return Assembler::is_polling_page_far();
} }
// //
...@@ -992,15 +991,21 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const ...@@ -992,15 +991,21 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
framesize -= 2*wordSize; framesize -= 2*wordSize;
if (framesize) { if (framesize) {
st->print_cr("addq\trsp, %d\t# Destroy frame", framesize); st->print_cr("addq rsp, %d\t# Destroy frame", framesize);
st->print("\t"); st->print("\t");
} }
st->print_cr("popq\trbp"); st->print_cr("popq rbp");
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
"# Safepoint: poll for GC");
st->print("\t"); st->print("\t");
if (Assembler::is_polling_page_far()) {
st->print_cr("movq rscratch1, #polling_page_address\n\t"
"testl rax, [rscratch1]\t"
"# Safepoint: poll for GC");
} else {
st->print_cr("testl rax, [rip + #offset_to_poll_page]\t"
"# Safepoint: poll for GC");
}
} }
} }
#endif #endif
...@@ -1033,45 +1038,22 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const ...@@ -1033,45 +1038,22 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
emit_opcode(cbuf, 0x58 | RBP_enc); emit_opcode(cbuf, 0x58 | RBP_enc);
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
// testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes MacroAssembler _masm(&cbuf);
// XXX reg_mem doesn't support RIP-relative addressing yet AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
cbuf.set_insts_mark(); if (Assembler::is_polling_page_far()) {
cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX __ lea(rscratch1, polling_page);
emit_opcode(cbuf, 0x85); // testl __ relocate(relocInfo::poll_return_type);
emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 __ testl(rax, Address(rscratch1, 0));
// cbuf.insts_mark() is beginning of instruction } else {
emit_d32_reloc(cbuf, os::get_polling_page()); __ testl(rax, polling_page);
// relocInfo::poll_return_type, }
} }
} }
uint MachEpilogNode::size(PhaseRegAlloc* ra_) const uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
{ {
Compile* C = ra_->C; return MachNode::size(ra_); // too many variables; just compute it
int framesize = C->frame_slots() << LogBytesPerInt; // the hard way
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
// and RBP
framesize -= 2*wordSize;
uint size = 0;
if (do_polling() && C->is_method_compilation()) {
size += 6;
}
// count popq rbp
size++;
if (framesize) {
if (framesize < 0x80) {
size += 4;
} else if (framesize) {
size += 7;
}
}
return size;
} }
int MachEpilogNode::reloc() const int MachEpilogNode::reloc() const
...@@ -3410,8 +3392,8 @@ encode %{ ...@@ -3410,8 +3392,8 @@ encode %{
} }
if (EmitSync & 1) { if (EmitSync & 1) {
// Without cast to int32_t a movptr will destroy r10 which is typically obj // Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.cmpptr(rsp, (int32_t)NULL_WORD) ; masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
} else } else
if (EmitSync & 2) { if (EmitSync & 2) {
Label DONE_LABEL; Label DONE_LABEL;
...@@ -3439,10 +3421,10 @@ encode %{ ...@@ -3439,10 +3421,10 @@ encode %{
} else { } else {
Label DONE_LABEL, IsInflated, Egress; Label DONE_LABEL, IsInflated, Egress;
masm.movptr(tmpReg, Address(objReg, 0)) ; masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
masm.jcc (Assembler::notZero, IsInflated) ; masm.jcc (Assembler::notZero, IsInflated) ;
// it's stack-locked, biased or neutral // it's stack-locked, biased or neutral
// TODO: optimize markword triage order to reduce the number of // TODO: optimize markword triage order to reduce the number of
// conditional branches in the most common cases. // conditional branches in the most common cases.
...@@ -3456,9 +3438,9 @@ encode %{ ...@@ -3456,9 +3438,9 @@ encode %{
} }
// was q will it destroy high? // was q will it destroy high?
masm.orl (tmpReg, 1) ; masm.orl (tmpReg, 1) ;
masm.movptr(Address(boxReg, 0), tmpReg) ; masm.movptr(Address(boxReg, 0), tmpReg) ;
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) { if (_counters != NULL) {
masm.cond_inc32(Assembler::equal, masm.cond_inc32(Assembler::equal,
...@@ -3485,16 +3467,16 @@ encode %{ ...@@ -3485,16 +3467,16 @@ encode %{
// fetched _owner. If the CAS is successful we may // fetched _owner. If the CAS is successful we may
// avoid an RTO->RTS upgrade on the $line. // avoid an RTO->RTS upgrade on the $line.
// Without cast to int32_t a movptr will destroy r10 which is typically obj // Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.mov (boxReg, tmpReg) ; masm.mov (boxReg, tmpReg) ;
masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.testptr(tmpReg, tmpReg) ; masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ; masm.jcc (Assembler::notZero, DONE_LABEL) ;
// It's inflated and appears unlocked // It's inflated and appears unlocked
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// Intentional fall-through into DONE_LABEL ... // Intentional fall-through into DONE_LABEL ...
masm.bind (DONE_LABEL) ; masm.bind (DONE_LABEL) ;
...@@ -3513,8 +3495,8 @@ encode %{ ...@@ -3513,8 +3495,8 @@ encode %{
Register tmpReg = as_Register($tmp$$reg); Register tmpReg = as_Register($tmp$$reg);
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
if (EmitSync & 4) { if (EmitSync & 4) {
masm.cmpptr(rsp, 0) ; masm.cmpptr(rsp, 0) ;
} else } else
if (EmitSync & 8) { if (EmitSync & 8) {
Label DONE_LABEL; Label DONE_LABEL;
...@@ -3541,25 +3523,25 @@ encode %{ ...@@ -3541,25 +3523,25 @@ encode %{
if (UseBiasedLocking && !UseOptoBiasInlining) { if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
} }
masm.movptr(tmpReg, Address(objReg, 0)) ; masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ; masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, DONE_LABEL) ; masm.jcc (Assembler::zero, DONE_LABEL) ;
masm.testl (tmpReg, 0x02) ; masm.testl (tmpReg, 0x02) ;
masm.jcc (Assembler::zero, Stacked) ; masm.jcc (Assembler::zero, Stacked) ;
// It's inflated // It's inflated
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.xorptr(boxReg, r15_thread) ; masm.xorptr(boxReg, r15_thread) ;
masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ; masm.jcc (Assembler::notZero, DONE_LABEL) ;
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, CheckSucc) ; masm.jcc (Assembler::notZero, CheckSucc) ;
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jmp (DONE_LABEL) ; masm.jmp (DONE_LABEL) ;
if ((EmitSync & 65536) == 0) { if ((EmitSync & 65536) == 0) {
Label LSuccess, LGoSlowPath ; Label LSuccess, LGoSlowPath ;
masm.bind (CheckSucc) ; masm.bind (CheckSucc) ;
masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
...@@ -3591,9 +3573,9 @@ encode %{ ...@@ -3591,9 +3573,9 @@ encode %{
masm.jmp (DONE_LABEL) ; masm.jmp (DONE_LABEL) ;
} }
masm.bind (Stacked) ; masm.bind (Stacked) ;
masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
if (EmitSync & 65536) { if (EmitSync & 65536) {
...@@ -3914,22 +3896,6 @@ encode %{ ...@@ -3914,22 +3896,6 @@ encode %{
// done: // done:
%} %}
// Safepoint Poll. This polls the safepoint page, and causes an
// exception if it is not readable. Unfortunately, it kills
// RFLAGS in the process.
enc_class enc_safepoint_poll
%{
// testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
// XXX reg_mem doesn't support RIP-relative addressing yet
cbuf.set_insts_mark();
cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX
emit_opcode(cbuf, 0x85); // testl
emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
// cbuf.insts_mark() is beginning of instruction
emit_d32_reloc(cbuf, os::get_polling_page());
// relocInfo::poll_type,
%}
%} %}
...@@ -4233,6 +4199,15 @@ operand immP0() ...@@ -4233,6 +4199,15 @@ operand immP0()
interface(CONST_INTER); interface(CONST_INTER);
%} %}
operand immP_poll() %{
predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
match(ConP);
// formats are generated automatically for constants and base registers
format %{ %}
interface(CONST_INTER);
%}
// Pointer Immediate // Pointer Immediate
operand immN() %{ operand immN() %{
match(ConN); match(ConN);
...@@ -4840,7 +4815,7 @@ operand regF() ...@@ -4840,7 +4815,7 @@ operand regF()
%} %}
// Double register operands // Double register operands
operand regD() operand regD()
%{ %{
constraint(ALLOC_IN_RC(double_reg)); constraint(ALLOC_IN_RC(double_reg));
match(RegD); match(RegD);
...@@ -6568,6 +6543,16 @@ instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr) ...@@ -6568,6 +6543,16 @@ instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr)
ins_pipe(ialu_reg); ins_pipe(ialu_reg);
%} %}
instruct loadConP_poll(rRegP dst, immP_poll src) %{
match(Set dst src);
format %{ "movq $dst, $src\t!ptr" %}
ins_encode %{
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
__ lea($dst$$Register, polling_page);
%}
ins_pipe(ialu_reg_fat);
%}
instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr) instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
%{ %{
match(Set dst src); match(Set dst src);
...@@ -7241,11 +7226,11 @@ instruct bytes_reverse_long(rRegL dst) %{ ...@@ -7241,11 +7226,11 @@ instruct bytes_reverse_long(rRegL dst) %{
instruct bytes_reverse_unsigned_short(rRegI dst) %{ instruct bytes_reverse_unsigned_short(rRegI dst) %{
match(Set dst (ReverseBytesUS dst)); match(Set dst (ReverseBytesUS dst));
format %{ "bswapl $dst\n\t" format %{ "bswapl $dst\n\t"
"shrl $dst,16\n\t" %} "shrl $dst,16\n\t" %}
ins_encode %{ ins_encode %{
__ bswapl($dst$$Register); __ bswapl($dst$$Register);
__ shrl($dst$$Register, 16); __ shrl($dst$$Register, 16);
%} %}
ins_pipe( ialu_reg ); ins_pipe( ialu_reg );
%} %}
...@@ -7253,11 +7238,11 @@ instruct bytes_reverse_unsigned_short(rRegI dst) %{ ...@@ -7253,11 +7238,11 @@ instruct bytes_reverse_unsigned_short(rRegI dst) %{
instruct bytes_reverse_short(rRegI dst) %{ instruct bytes_reverse_short(rRegI dst) %{
match(Set dst (ReverseBytesS dst)); match(Set dst (ReverseBytesS dst));
format %{ "bswapl $dst\n\t" format %{ "bswapl $dst\n\t"
"sar $dst,16\n\t" %} "sar $dst,16\n\t" %}
ins_encode %{ ins_encode %{
__ bswapl($dst$$Register); __ bswapl($dst$$Register);
__ sarl($dst$$Register, 16); __ sarl($dst$$Register, 16);
%} %}
ins_pipe( ialu_reg ); ins_pipe( ialu_reg );
%} %}
...@@ -7480,7 +7465,7 @@ instruct membar_volatile(rFlagsReg cr) %{ ...@@ -7480,7 +7465,7 @@ instruct membar_volatile(rFlagsReg cr) %{
effect(KILL cr); effect(KILL cr);
ins_cost(400); ins_cost(400);
format %{ format %{
$$template $$template
if (os::is_MP()) { if (os::is_MP()) {
$$emit$$"lock addl [rsp + #0], 0\t! membar_volatile" $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
...@@ -8291,7 +8276,7 @@ instruct storePConditional(memory heap_top_ptr, ...@@ -8291,7 +8276,7 @@ instruct storePConditional(memory heap_top_ptr,
rFlagsReg cr) rFlagsReg cr)
%{ %{
match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
"If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %} "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
opcode(0x0F, 0xB1); opcode(0x0F, 0xB1);
...@@ -9854,9 +9839,9 @@ instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr) ...@@ -9854,9 +9839,9 @@ instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
// Xor Register with Immediate -1 // Xor Register with Immediate -1
instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{ instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{
match(Set dst (XorI dst imm)); match(Set dst (XorI dst imm));
format %{ "not $dst" %} format %{ "not $dst" %}
ins_encode %{ ins_encode %{
__ notl($dst$$Register); __ notl($dst$$Register);
%} %}
...@@ -10097,9 +10082,9 @@ instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr) ...@@ -10097,9 +10082,9 @@ instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
// Xor Register with Immediate -1 // Xor Register with Immediate -1
instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{ instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{
match(Set dst (XorL dst imm)); match(Set dst (XorL dst imm));
format %{ "notq $dst" %} format %{ "notq $dst" %}
ins_encode %{ ins_encode %{
__ notq($dst$$Register); __ notq($dst$$Register);
%} %}
...@@ -12473,14 +12458,33 @@ instruct cmpFastUnlock(rFlagsReg cr, ...@@ -12473,14 +12458,33 @@ instruct cmpFastUnlock(rFlagsReg cr,
// Safepoint Instructions // Safepoint Instructions
instruct safePoint_poll(rFlagsReg cr) instruct safePoint_poll(rFlagsReg cr)
%{ %{
predicate(!Assembler::is_polling_page_far());
match(SafePoint); match(SafePoint);
effect(KILL cr); effect(KILL cr);
format %{ "testl rax, [rip + #offset_to_poll_page]\t" format %{ "testl rax, [rip + #offset_to_poll_page]\t"
"# Safepoint: poll for GC" %}
ins_cost(125);
ins_encode %{
AddressLiteral addr(os::get_polling_page(), relocInfo::poll_type);
__ testl(rax, addr);
%}
ins_pipe(ialu_reg_mem);
%}
instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
%{
predicate(Assembler::is_polling_page_far());
match(SafePoint poll);
effect(KILL cr, USE poll);
format %{ "testl rax, [$poll]\t"
"# Safepoint: poll for GC" %} "# Safepoint: poll for GC" %}
size(6); // Opcode + ModRM + Disp32 == 6 bytes
ins_cost(125); ins_cost(125);
ins_encode(enc_safepoint_poll); ins_encode %{
__ relocate(relocInfo::poll_type);
__ testl(rax, Address($poll$$Register, 0));
%}
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册