提交 c8857644 编写于 作者: T twisti

7174218: remove AtomicLongCSImpl intrinsics

Reviewed-by: kvn, twisti
Contributed-by: NKrystal Mok <sajia@taobao.com>
上级 f658b5f4
...@@ -644,30 +644,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) { ...@@ -644,30 +644,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
} }
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIRItem obj (x->argument_at(0), this); // AtomicLong object
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
obj.load_item();
cmp_value.load_item();
new_value.load_item();
// generate compare-and-swap and produce zero condition if swap occurs
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
LIR_Opr addr = FrameMap::O7_opr;
__ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);
LIR_Opr t1 = FrameMap::G1_opr; // temp for 64-bit value
LIR_Opr t2 = FrameMap::G3_opr; // temp for 64-bit value
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type"); assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object LIRItem obj (x->argument_at(0), this); // object
......
...@@ -827,7 +827,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te ...@@ -827,7 +827,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
// a Load // a Load
// inputs are (0:control, 1:memory, 2:address) // inputs are (0:control, 1:memory, 2:address)
if (!(n->ideal_Opcode()==ld_op) && // Following are special cases if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
!(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
!(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
...@@ -7306,17 +7305,6 @@ instruct loadPLocked(iRegP dst, memory mem) %{ ...@@ -7306,17 +7305,6 @@ instruct loadPLocked(iRegP dst, memory mem) %{
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// LoadL-locked. Same as a regular long load when used with a compare-swap
instruct loadLLocked(iRegL dst, memory mem) %{
match(Set dst (LoadLLocked mem));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
effect( KILL newval ); effect( KILL newval );
......
...@@ -718,35 +718,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) { ...@@ -718,35 +718,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
} }
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIRItem obj (x->argument_at(0), this); // AtomicLong object
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
cmp_value.load_item_force(FrameMap::long0_opr);
// new value must be in rcx,ebx (hi,lo)
new_value.load_item_force(FrameMap::long1_opr);
// object pointer register is overwritten with field address
obj.load_item();
// generate compare-and-swap; produces zero condition if swap occurs
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
LIR_Opr addr = new_pointer_register();
__ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);
LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed
LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type"); assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object LIRItem obj (x->argument_at(0), this); // object
......
...@@ -7800,50 +7800,6 @@ instruct loadPLocked(eRegP dst, memory mem) %{ ...@@ -7800,50 +7800,6 @@ instruct loadPLocked(eRegP dst, memory mem) %{
ins_pipe( ialu_reg_mem ); ins_pipe( ialu_reg_mem );
%} %}
// LoadLong-locked - same as a volatile long load when used with compare-swap
instruct loadLLocked(stackSlotL dst, memory mem) %{
predicate(UseSSE<=1);
match(Set dst (LoadLLocked mem));
ins_cost(200);
format %{ "FILD $mem\t# Atomic volatile long load\n\t"
"FISTp $dst" %}
ins_encode(enc_loadL_volatile(mem,dst));
ins_pipe( fpu_reg_mem );
%}
instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{
predicate(UseSSE>=2);
match(Set dst (LoadLLocked mem));
effect(TEMP tmp);
ins_cost(180);
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
"MOVSD $dst,$tmp" %}
ins_encode %{
__ movdbl($tmp$$XMMRegister, $mem$$Address);
__ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{
predicate(UseSSE>=2);
match(Set dst (LoadLLocked mem));
effect(TEMP tmp);
ins_cost(160);
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
"MOVD $dst.lo,$tmp\n\t"
"PSRLQ $tmp,32\n\t"
"MOVD $dst.hi,$tmp" %}
ins_encode %{
__ movdbl($tmp$$XMMRegister, $mem$$Address);
__ movdl($dst$$Register, $tmp$$XMMRegister);
__ psrlq($tmp$$XMMRegister, 32);
__ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
// Conditional-store of the updated heap-top. // Conditional-store of the updated heap-top.
// Used during allocation of the shared heap. // Used during allocation of the shared heap.
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
......
...@@ -7497,18 +7497,6 @@ instruct loadPLocked(rRegP dst, memory mem) ...@@ -7497,18 +7497,6 @@ instruct loadPLocked(rRegP dst, memory mem)
ins_pipe(ialu_reg_mem); // XXX ins_pipe(ialu_reg_mem); // XXX
%} %}
// LoadL-locked - same as a regular LoadL when used with compare-swap
instruct loadLLocked(rRegL dst, memory mem)
%{
match(Set dst (LoadLLocked mem));
ins_cost(125); // XXX
format %{ "movq $dst, $mem\t# long locked" %}
opcode(0x8B);
ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
ins_pipe(ialu_reg_mem); // XXX
%}
// Conditional-store of the updated heap-top. // Conditional-store of the updated heap-top.
// Used during allocation of the shared heap. // Used during allocation of the shared heap.
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
......
...@@ -261,7 +261,6 @@ Form::DataType Form::is_load_from_memory(const char *opType) const { ...@@ -261,7 +261,6 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
if( strcmp(opType,"LoadL")==0 ) return Form::idealL; if( strcmp(opType,"LoadL")==0 ) return Form::idealL;
if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL; if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL;
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP; if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
if( strcmp(opType,"LoadP")==0 ) return Form::idealP; if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
if( strcmp(opType,"LoadN")==0 ) return Form::idealN; if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI; if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;
......
...@@ -3387,7 +3387,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const { ...@@ -3387,7 +3387,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" , "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
"Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S", "Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S",
"LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned", "LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",
"LoadPLocked", "LoadLLocked", "LoadPLocked",
"StorePConditional", "StoreIConditional", "StoreLConditional", "StorePConditional", "StoreIConditional", "StoreLConditional",
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
"StoreCM", "StoreCM",
......
...@@ -3195,13 +3195,6 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { ...@@ -3195,13 +3195,6 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
preserves_state = true; preserves_state = true;
break; break;
// sun/misc/AtomicLong.attemptUpdate
case vmIntrinsics::_attemptUpdate :
if (!VM_Version::supports_cx8()) return false;
if (!InlineAtomicLong) return false;
preserves_state = true;
break;
// Use special nodes for Unsafe instructions so we can more easily // Use special nodes for Unsafe instructions so we can more easily
// perform an address-mode optimization on the raw variants // perform an address-mode optimization on the raw variants
case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false); case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false);
......
...@@ -3009,11 +3009,6 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) { ...@@ -3009,11 +3009,6 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_CompareAndSwap(x, longType); do_CompareAndSwap(x, longType);
break; break;
// sun.misc.AtomicLongCSImpl.attemptUpdate
case vmIntrinsics::_attemptUpdate:
do_AttemptUpdate(x);
break;
case vmIntrinsics::_Reference_get: case vmIntrinsics::_Reference_get:
do_Reference_get(x); do_Reference_get(x);
break; break;
...@@ -3254,4 +3249,3 @@ void LIRGenerator::do_MemBar(MemBar* x) { ...@@ -3254,4 +3249,3 @@ void LIRGenerator::do_MemBar(MemBar* x) {
} }
} }
} }
...@@ -244,7 +244,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { ...@@ -244,7 +244,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_MathIntrinsic(Intrinsic* x); void do_MathIntrinsic(Intrinsic* x);
void do_ArrayCopy(Intrinsic* x); void do_ArrayCopy(Intrinsic* x);
void do_CompareAndSwap(Intrinsic* x, ValueType* type); void do_CompareAndSwap(Intrinsic* x, ValueType* type);
void do_AttemptUpdate(Intrinsic* x);
void do_NIOCheckIndex(Intrinsic* x); void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x); void do_FPIntrinsics(Intrinsic* x);
void do_Reference_get(Intrinsic* x); void do_Reference_get(Intrinsic* x);
......
...@@ -2919,7 +2919,6 @@ int java_lang_AssertionStatusDirectives::packages_offset; ...@@ -2919,7 +2919,6 @@ int java_lang_AssertionStatusDirectives::packages_offset;
int java_lang_AssertionStatusDirectives::packageEnabled_offset; int java_lang_AssertionStatusDirectives::packageEnabled_offset;
int java_lang_AssertionStatusDirectives::deflt_offset; int java_lang_AssertionStatusDirectives::deflt_offset;
int java_nio_Buffer::_limit_offset; int java_nio_Buffer::_limit_offset;
int sun_misc_AtomicLongCSImpl::_value_offset;
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0; int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
int sun_reflect_ConstantPool::_cp_oop_offset; int sun_reflect_ConstantPool::_cp_oop_offset;
int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset; int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
...@@ -2979,21 +2978,6 @@ void java_nio_Buffer::compute_offsets() { ...@@ -2979,21 +2978,6 @@ void java_nio_Buffer::compute_offsets() {
compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature()); compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
} }
// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
int sun_misc_AtomicLongCSImpl::value_offset() {
assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");
return _value_offset;
}
void sun_misc_AtomicLongCSImpl::compute_offsets() {
klassOop k = SystemDictionary::AtomicLongCSImpl_klass();
// If this class is not present, its value field offset won't be referenced.
if (k != NULL) {
compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
}
}
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) { void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return; if (_owner_offset != 0) return;
...@@ -3098,7 +3082,6 @@ void JavaClasses::compute_offsets() { ...@@ -3098,7 +3082,6 @@ void JavaClasses::compute_offsets() {
sun_reflect_ConstantPool::compute_offsets(); sun_reflect_ConstantPool::compute_offsets();
sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
} }
sun_misc_AtomicLongCSImpl::compute_offsets();
// generated interpreter code wants to know about the offsets we just computed: // generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values(); AbstractAssembler::update_delayed_values();
......
...@@ -1383,15 +1383,6 @@ class java_nio_Buffer: AllStatic { ...@@ -1383,15 +1383,6 @@ class java_nio_Buffer: AllStatic {
static void compute_offsets(); static void compute_offsets();
}; };
class sun_misc_AtomicLongCSImpl: AllStatic {
private:
static int _value_offset;
public:
static int value_offset();
static void compute_offsets();
};
class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
private: private:
static int _owner_offset; static int _owner_offset;
......
...@@ -170,9 +170,6 @@ class SymbolPropertyTable; ...@@ -170,9 +170,6 @@ class SymbolPropertyTable;
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
template(nio_Buffer_klass, java_nio_Buffer, Opt) \ template(nio_Buffer_klass, java_nio_Buffer, Opt) \
\ \
/* If this class isn't present, it won't be referenced. */ \
template(AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl, Opt) \
\
template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \ template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\ \
template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \ template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \
......
...@@ -722,15 +722,6 @@ ...@@ -722,15 +722,6 @@
/* java/lang/ref/Reference */ \ /* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \ do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\ \
\
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
/* (symbols get_name and void_long_signature defined above) */ \
\
do_intrinsic(_attemptUpdate, sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R) \
do_name( attemptUpdate_name, "attemptUpdate") \
do_signature(attemptUpdate_signature, "(JJ)Z") \
\
/* support for sun.misc.Unsafe */ \ /* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\ \
......
...@@ -147,7 +147,6 @@ macro(LoadNKlass) ...@@ -147,7 +147,6 @@ macro(LoadNKlass)
macro(LoadL) macro(LoadL)
macro(LoadL_unaligned) macro(LoadL_unaligned)
macro(LoadPLocked) macro(LoadPLocked)
macro(LoadLLocked)
macro(LoadP) macro(LoadP)
macro(LoadN) macro(LoadN)
macro(LoadRange) macro(LoadRange)
......
...@@ -2297,7 +2297,6 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { ...@@ -2297,7 +2297,6 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
case Op_LoadL: case Op_LoadL:
case Op_LoadL_unaligned: case Op_LoadL_unaligned:
case Op_LoadPLocked: case Op_LoadPLocked:
case Op_LoadLLocked:
case Op_LoadP: case Op_LoadP:
case Op_LoadN: case Op_LoadN:
case Op_LoadRange: case Op_LoadRange:
......
...@@ -192,8 +192,6 @@ class LibraryCallKit : public GraphKit { ...@@ -192,8 +192,6 @@ class LibraryCallKit : public GraphKit {
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark); void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
bool inline_native_clone(bool is_virtual); bool inline_native_clone(bool is_virtual);
bool inline_native_Reflection_getCallerClass(); bool inline_native_Reflection_getCallerClass();
bool inline_native_AtomicLong_get();
bool inline_native_AtomicLong_attemptUpdate();
bool is_method_invoke_or_aux_frame(JVMState* jvms); bool is_method_invoke_or_aux_frame(JVMState* jvms);
// Helper function for inlining native object hash method // Helper function for inlining native object hash method
bool inline_native_hashcode(bool is_virtual, bool is_static); bool inline_native_hashcode(bool is_virtual, bool is_static);
...@@ -331,11 +329,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { ...@@ -331,11 +329,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
// We do not intrinsify this. The optimizer does fine with it. // We do not intrinsify this. The optimizer does fine with it.
return NULL; return NULL;
case vmIntrinsics::_get_AtomicLong:
case vmIntrinsics::_attemptUpdate:
if (!InlineAtomicLong) return NULL;
break;
case vmIntrinsics::_getCallerClass: case vmIntrinsics::_getCallerClass:
if (!UseNewReflection) return NULL; if (!UseNewReflection) return NULL;
if (!InlineReflectionGetCallerClass) return NULL; if (!InlineReflectionGetCallerClass) return NULL;
...@@ -711,11 +704,6 @@ bool LibraryCallKit::try_to_inline() { ...@@ -711,11 +704,6 @@ bool LibraryCallKit::try_to_inline() {
case vmIntrinsics::_reverseBytes_c: case vmIntrinsics::_reverseBytes_c:
return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
case vmIntrinsics::_get_AtomicLong:
return inline_native_AtomicLong_get();
case vmIntrinsics::_attemptUpdate:
return inline_native_AtomicLong_attemptUpdate();
case vmIntrinsics::_getCallerClass: case vmIntrinsics::_getCallerClass:
return inline_native_Reflection_getCallerClass(); return inline_native_Reflection_getCallerClass();
...@@ -4006,113 +3994,6 @@ bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) { ...@@ -4006,113 +3994,6 @@ bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
return false; return false;
} }
static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by
// inline_native_AtomicLong_attemptUpdate() but it has no way of
// computing it since there is no lookup field by name function in the
// CI interface. This is computed and set by inline_native_AtomicLong_get().
// Using a static variable here is safe even if we have multiple compilation
// threads because the offset is constant. At worst the same offset will be
// computed and stored multiple
bool LibraryCallKit::inline_native_AtomicLong_get() {
// Restore the stack and pop off the argument
_sp+=1;
Node *obj = pop();
// get the offset of the "value" field. Since the CI interfaces
// does not provide a way to look up a field by name, we scan the bytecodes
// to get the field index. We expect the first 2 instructions of the method
// to be:
// 0 aload_0
// 1 getfield "value"
ciMethod* method = callee();
if (value_field_offset == -1)
{
ciField* value_field;
ciBytecodeStream iter(method);
Bytecodes::Code bc = iter.next();
if ((bc != Bytecodes::_aload_0) &&
((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
return false;
bc = iter.next();
if (bc != Bytecodes::_getfield)
return false;
bool ignore;
value_field = iter.get_field(ignore);
value_field_offset = value_field->offset_in_bytes();
}
// Null check without removing any arguments.
_sp++;
obj = do_null_check(obj, T_OBJECT);
_sp--;
// Check for locking null object
if (stopped()) return true;
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
int alias_idx = C->get_alias_index(adr_type);
Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
push_pair(result);
return true;
}
bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
// Restore the stack and pop off the arguments
_sp+=5;
Node *newVal = pop_pair();
Node *oldVal = pop_pair();
Node *obj = pop();
// we need the offset of the "value" field which was computed when
// inlining the get() method. Give up if we don't have it.
if (value_field_offset == -1)
return false;
// Null check without removing any arguments.
_sp+=5;
obj = do_null_check(obj, T_OBJECT);
_sp-=5;
// Check for locking null object
if (stopped()) return true;
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
int alias_idx = C->get_alias_index(adr_type);
Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
set_memory(store_proj, alias_idx);
Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
Node *result;
// CMove node is not used to be able fold a possible check code
// after attemptUpdate() call. This code could be transformed
// into CMove node by loop optimizations.
{
RegionNode *r = new (C, 3) RegionNode(3);
result = new (C, 3) PhiNode(r, TypeInt::BOOL);
Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
Node *iftrue = opt_iff(r, iff);
r->init_req(1, iftrue);
result->init_req(1, intcon(1));
result->init_req(2, intcon(0));
set_control(_gvn.transform(r));
record_for_igvn(r);
C->set_has_split_ifs(true); // Has chance for split-if optimization
}
push(_gvn.transform(result));
return true;
}
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
// restore the arguments // restore the arguments
_sp += arg_size(); _sp += arg_size();
......
...@@ -636,17 +636,6 @@ public: ...@@ -636,17 +636,6 @@ public:
virtual bool depends_only_on_test() const { return true; } virtual bool depends_only_on_test() const { return true; }
}; };
//------------------------------LoadLLockedNode---------------------------------
// Load-locked a pointer from memory (either object or array).
// On Sparc & Intel this is implemented as a normal long load.
class LoadLLockedNode : public LoadLNode {
public:
LoadLLockedNode( Node *c, Node *mem, Node *adr )
: LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
virtual int Opcode() const;
virtual int store_Opcode() const { return Op_StoreLConditional; }
};
//------------------------------SCMemProjNode--------------------------------------- //------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory state of a store conditional node. // This class defines a projection of the memory state of a store conditional node.
// These nodes return a value, but also update memory. // These nodes return a value, but also update memory.
......
...@@ -634,7 +634,7 @@ JNIEXPORT jobject JNICALL ...@@ -634,7 +634,7 @@ JNIEXPORT jobject JNICALL
JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused); JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);
/* /*
* sun.misc.AtomicLong * java.util.concurrent.atomic.AtomicLong
*/ */
JNIEXPORT jboolean JNICALL JNIEXPORT jboolean JNICALL
JVM_SupportsCX8(void); JVM_SupportsCX8(void);
......
...@@ -631,9 +631,6 @@ class CommandLineFlags { ...@@ -631,9 +631,6 @@ class CommandLineFlags {
develop(bool, InlineClassNatives, true, \ develop(bool, InlineClassNatives, true, \
"inline Class.isInstance, etc") \ "inline Class.isInstance, etc") \
\ \
develop(bool, InlineAtomicLong, true, \
"inline sun.misc.AtomicLong") \
\
develop(bool, InlineThreadNatives, true, \ develop(bool, InlineThreadNatives, true, \
"inline Thread.currentThread, etc") \ "inline Thread.currentThread, etc") \
\ \
......
...@@ -1876,7 +1876,6 @@ static inline uint64_t cast_uint64_t(size_t x) ...@@ -1876,7 +1876,6 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_c2_type(StoreNNode, StoreNode) \ declare_c2_type(StoreNNode, StoreNode) \
declare_c2_type(StoreCMNode, StoreNode) \ declare_c2_type(StoreCMNode, StoreNode) \
declare_c2_type(LoadPLockedNode, LoadPNode) \ declare_c2_type(LoadPLockedNode, LoadPNode) \
declare_c2_type(LoadLLockedNode, LoadLNode) \
declare_c2_type(SCMemProjNode, ProjNode) \ declare_c2_type(SCMemProjNode, ProjNode) \
declare_c2_type(LoadStoreNode, Node) \ declare_c2_type(LoadStoreNode, Node) \
declare_c2_type(StorePConditionalNode, LoadStoreNode) \ declare_c2_type(StorePConditionalNode, LoadStoreNode) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册