提交 bcaaa7a7 编写于 作者: G goetz

8042309: Some bugfixes for the ppc64 port.

Reviewed-by: kvn
上级 ba6464cf
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved. * Copyright 2012, 2013 SAP AG. All rights reserved.
...@@ -403,7 +404,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_ov ...@@ -403,7 +404,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_ov
BLOCK_COMMENT("compute_interpreter_state {"); BLOCK_COMMENT("compute_interpreter_state {");
// access_flags = method->access_flags(); // access_flags = method->access_flags();
// TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size"); // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
__ lwa(access_flags, method_(access_flags)); __ lwa(access_flags, method_(access_flags));
// parameter_count = method->constMethod->size_of_parameters(); // parameter_count = method->constMethod->size_of_parameters();
...@@ -1055,7 +1056,7 @@ address CppInterpreterGenerator::generate_native_entry(void) { ...@@ -1055,7 +1056,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
assert(access_flags->is_nonvolatile(), assert(access_flags->is_nonvolatile(),
"access_flags must be in a non-volatile register"); "access_flags must be in a non-volatile register");
// Type check. // Type check.
// TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size"); // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
__ lwz(access_flags, method_(access_flags)); __ lwz(access_flags, method_(access_flags));
// We don't want to reload R19_method and access_flags after calls // We don't want to reload R19_method and access_flags after calls
...@@ -1838,7 +1839,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) { ...@@ -1838,7 +1839,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
// Interpreter state fields. // Interpreter state fields.
const Register msg = R24_tmp4; const Register msg = R24_tmp4;
// MethodOop fields. // Method fields.
const Register parameter_count = R25_tmp5; const Register parameter_count = R25_tmp5;
const Register result_index = R26_tmp6; const Register result_index = R26_tmp6;
...@@ -2023,7 +2024,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) { ...@@ -2023,7 +2024,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
__ add(R17_tos, R17_tos, parameter_count); __ add(R17_tos, R17_tos, parameter_count);
// Result stub address array index // Result stub address array index
// TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size"); // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
__ lwa(result_index, method_(result_index)); __ lwa(result_index, method_(result_index));
__ li(msg, BytecodeInterpreter::method_resume); __ li(msg, BytecodeInterpreter::method_resume);
...@@ -2709,7 +2710,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) { ...@@ -2709,7 +2710,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
__ ld(R3_ARG1, state_(_result._osr._osr_buf)); __ ld(R3_ARG1, state_(_result._osr._osr_buf));
__ mtctr(R12_scratch2); __ mtctr(R12_scratch2);
// Load method oop, gc may move it during execution of osr'd method. // Load method, gc may move it during execution of osr'd method.
__ ld(R22_tmp2, state_(_method)); __ ld(R22_tmp2, state_(_method));
// Load message 'call_method'. // Load message 'call_method'.
__ li(R23_tmp3, BytecodeInterpreter::call_method); __ li(R23_tmp3, BytecodeInterpreter::call_method);
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP #ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP #define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#include "code/codeCache.hpp"
// Inline functions for ppc64 frames: // Inline functions for ppc64 frames:
// Find codeblob and set deopt_state. // Find codeblob and set deopt_state.
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP #ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP #define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#include "assembler_ppc.inline.hpp" #include "asm/macroAssembler.hpp"
#include "interpreter/invocationCounter.hpp" #include "interpreter/invocationCounter.hpp"
// This file specializes the assembler with interpreter-specific macros. // This file specializes the assembler with interpreter-specific macros.
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp" #include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
......
...@@ -139,32 +139,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() { ...@@ -139,32 +139,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Signature is in R3_RET. Signature is callee saved. // Signature is in R3_RET. Signature is callee saved.
__ mr(signature, R3_RET); __ mr(signature, R3_RET);
// Reload method, it may have moved.
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ ld(R19_method, 0, target_sp);
__ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
// Get the result handler. // Get the result handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
// Reload method, it may have moved.
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ ld(R19_method, 0, target_sp);
__ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
{ {
Label L; Label L;
// test if static // test if static
// _access_flags._flags must be at offset 0. // _access_flags._flags must be at offset 0.
// TODO PPC port: requires change in shared code. // TODO PPC port: requires change in shared code.
//assert(in_bytes(AccessFlags::flags_offset()) == 0, //assert(in_bytes(AccessFlags::flags_offset()) == 0,
// "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags"); // "MethodDesc._access_flags == MethodDesc._access_flags._flags");
// _access_flags must be a 32 bit value. // _access_flags must be a 32 bit value.
assert(sizeof(AccessFlags) == 4, "wrong size"); assert(sizeof(AccessFlags) == 4, "wrong size");
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags)); __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// we don't have fast jni accessors. // We don't have fast jni accessors.
return (address) -1; return (address) -1;
} }
...@@ -57,12 +57,12 @@ address JNI_FastGetField::generate_fast_get_int_field() { ...@@ -57,12 +57,12 @@ address JNI_FastGetField::generate_fast_get_int_field() {
} }
address JNI_FastGetField::generate_fast_get_long_field() { address JNI_FastGetField::generate_fast_get_long_field() {
// we don't have fast jni accessors. // We don't have fast jni accessors.
return (address) -1; return (address) -1;
} }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
// e don't have fast jni accessors. // We don't have fast jni accessors.
return (address) -1; return (address) -1;
} }
......
...@@ -1135,6 +1135,7 @@ class CallStubImpl { ...@@ -1135,6 +1135,7 @@ class CallStubImpl {
public: public:
// Emit call stub, compiled java to interpreter.
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset); static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub. // Size of call trampoline stub.
...@@ -3663,6 +3664,8 @@ encode %{ ...@@ -3663,6 +3664,8 @@ encode %{
%} %}
// Compound version of call dynamic // Compound version of call dynamic
// Toc is only passed so that it can be used in ins_encode statement.
// In the code we have to use $constanttablebase.
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{ enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound); // TODO: PPC port $archOpcode(ppc64Opcode_compound);
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
...@@ -3670,14 +3673,17 @@ encode %{ ...@@ -3670,14 +3673,17 @@ encode %{
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC; Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
#if 0 #if 0
int vtable_index = this->_vtable_index;
if (_vtable_index < 0) { if (_vtable_index < 0) {
// Must be invalid_vtable_index, not nonvirtual_vtable_index. // Must be invalid_vtable_index, not nonvirtual_vtable_index.
assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value"); assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
// Virtual call relocation will point to ic load.
address virtual_call_meta_addr = __ pc(); address virtual_call_meta_addr = __ pc();
__ load_const_from_method_toc(ic_reg, meta, Rtoc); // Load a clear inline cache.
AddressLiteral empty_ic((address) Universe::non_oop_word());
__ load_const_from_method_toc(ic_reg, empty_ic, Rtoc);
// CALL to fixup routine. Fixup routine uses ScopeDesc info // CALL to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call. // to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
...@@ -3710,7 +3716,6 @@ encode %{ ...@@ -3710,7 +3716,6 @@ encode %{
"Fix constant in ret_addr_offset()"); "Fix constant in ret_addr_offset()");
} }
#endif #endif
guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
Unimplemented(); // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!). Unimplemented(); // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
%} %}
...@@ -7064,7 +7069,7 @@ instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc s ...@@ -7064,7 +7069,7 @@ instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc s
n1->_bottom_type = _bottom_type; n1->_bottom_type = _bottom_type;
decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode(); decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
n2->add_req(n_region, n2); n2->add_req(n_region, n1);
n2->_opnds[0] = op_dst; n2->_opnds[0] = op_dst;
n2->_opnds[1] = op_dst; n2->_opnds[1] = op_dst;
n2->_bottom_type = _bottom_type; n2->_bottom_type = _bottom_type;
...@@ -7929,7 +7934,23 @@ instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{ ...@@ -7929,7 +7934,23 @@ instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// positive longs and 0xF...F for negative ones. // positive longs and 0xF...F for negative ones.
instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{ instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
predicate(false);
format %{ "SRADI $dst, $src, #63" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_sradi);
__ sradi($dst$$Register, $src$$Register, 0x3f);
%}
ins_pipe(pipe_class_default);
%}
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// positive longs and 0xF...F for negative ones.
instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
// no match-rule, false predicate // no match-rule, false predicate
effect(DEF dst, USE src); effect(DEF dst, USE src);
predicate(false); predicate(false);
...@@ -9619,14 +9640,14 @@ instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ ...@@ -9619,14 +9640,14 @@ instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*4); ins_cost(DEFAULT_COST*4);
expand %{ expand %{
iRegIdst src1s; iRegLdst src1s;
iRegIdst src2s; iRegLdst src2s;
iRegIdst diff; iRegLdst diff;
sxtI_reg(src1s, src1); // ensure proper sign extention convI2L_reg(src1s, src1); // Ensure proper sign extension.
sxtI_reg(src2s, src2); // ensure proper sign extention convI2L_reg(src2s, src2); // Ensure proper sign extension.
subI_reg_reg(diff, src1s, src2s); subL_reg_reg(diff, src1s, src2s);
// Need to consider >=33 bit result, therefore we need signmaskL. // Need to consider >=33 bit result, therefore we need signmaskL.
signmask64I_regI(dst, diff); signmask64I_regL(dst, diff);
%} %}
%} %}
...@@ -11178,18 +11199,18 @@ instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ ...@@ -11178,18 +11199,18 @@ instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*6); ins_cost(DEFAULT_COST*6);
expand %{ expand %{
iRegIdst src1s; iRegLdst src1s;
iRegIdst src2s; iRegLdst src2s;
iRegIdst diff; iRegLdst diff;
iRegIdst sm; iRegLdst sm;
iRegIdst doz; // difference or zero iRegLdst doz; // difference or zero
sxtI_reg(src1s, src1); // Ensure proper sign extention. convI2L_reg(src1s, src1); // Ensure proper sign extension.
sxtI_reg(src2s, src2); // Ensure proper sign extention. convI2L_reg(src2s, src2); // Ensure proper sign extension.
subI_reg_reg(diff, src2s, src1s); subL_reg_reg(diff, src2s, src1s);
// Need to consider >=33 bit result, therefore we need signmaskL. // Need to consider >=33 bit result, therefore we need signmaskL.
signmask64I_regI(sm, diff); signmask64L_regL(sm, diff);
andI_reg_reg(doz, diff, sm); // <=0 andL_reg_reg(doz, diff, sm); // <=0
addI_reg_reg(dst, doz, src1s); addI_regL_regL(dst, doz, src1s);
%} %}
%} %}
...@@ -11198,19 +11219,18 @@ instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ ...@@ -11198,19 +11219,18 @@ instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*6); ins_cost(DEFAULT_COST*6);
expand %{ expand %{
immI_minus1 m1 %{ -1 %} iRegLdst src1s;
iRegIdst src1s; iRegLdst src2s;
iRegIdst src2s; iRegLdst diff;
iRegIdst diff; iRegLdst sm;
iRegIdst sm; iRegLdst doz; // difference or zero
iRegIdst doz; // difference or zero convI2L_reg(src1s, src1); // Ensure proper sign extension.
sxtI_reg(src1s, src1); // Ensure proper sign extention. convI2L_reg(src2s, src2); // Ensure proper sign extension.
sxtI_reg(src2s, src2); // Ensure proper sign extention. subL_reg_reg(diff, src2s, src1s);
subI_reg_reg(diff, src2s, src1s);
// Need to consider >=33 bit result, therefore we need signmaskL. // Need to consider >=33 bit result, therefore we need signmaskL.
signmask64I_regI(sm, diff); signmask64L_regL(sm, diff);
andcI_reg_reg(doz, sm, m1, diff); // >=0 andcL_reg_reg(doz, diff, sm); // >=0
addI_reg_reg(dst, doz, src1s); addI_regL_regL(dst, doz, src1s);
%} %}
%} %}
......
...@@ -81,24 +81,18 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con ...@@ -81,24 +81,18 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
#if 0 #if 0
// Call special ClassCastException constructor taking object to cast // Call special ClassCastException constructor taking object to cast
// and target class as arguments. // and target class as arguments.
address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) { address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
address entry = __ pc(); address entry = __ pc();
// Target class oop is in register R6_ARG4 by convention!
// Expression stack must be empty before entering the VM if an // Expression stack must be empty before entering the VM if an
// exception happened. // exception happened.
__ empty_expression_stack(); __ empty_expression_stack();
// Setup parameters.
// Thread will be loaded to R3_ARG1. // Thread will be loaded to R3_ARG1.
__ load_const_optimized(R4_ARG2, (address) name); // Target class oop is in register R5_ARG3 by convention!
__ mr(R5_ARG3, R17_tos); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
// R6_ARG4 contains specified class.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
#ifdef ASSERT
// Above call must not return here since exception pending. // Above call must not return here since exception pending.
__ should_not_reach_here(); DEBUG_ONLY(__ should_not_reach_here();)
#endif
return entry; return entry;
} }
#endif #endif
...@@ -1535,14 +1529,32 @@ void TemplateInterpreterGenerator::generate_throw_exception() { ...@@ -1535,14 +1529,32 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
// Get out of the current method and re-execute the call that called us. // Get out of the current method and re-execute the call that called us.
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2); __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
__ restore_interpreter_state(R11_scratch1); __ restore_interpreter_state(R11_scratch1);
__ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
__ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
__ mtlr(return_pc);
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ set_method_data_pointer_for_bcp(); __ set_method_data_pointer_for_bcp();
} }
#if INCLUDE_JVMTI
Label L_done;
__ lbz(R11_scratch1, 0, R14_bcp);
__ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
__ bne(CCR0, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ ld(R4_ARG2, 0, R18_locals);
__ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
R4_ARG2, R19_method, R14_bcp);
__ cmpdi(CCR0, R11_scratch1, 0);
__ beq(CCR0, L_done);
__ std(R11_scratch1, wordSize, R15_esp);
__ bind(L_done);
#endif // INCLUDE_JVMTI
__ dispatch_next(vtos); __ dispatch_next(vtos);
} }
// end of JVMTI PopFrame support // end of JVMTI PopFrame support
......
...@@ -64,7 +64,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, ...@@ -64,7 +64,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
switch (barrier) { switch (barrier) {
#ifndef SERIALGC #if INCLUDE_ALL_GCS
case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
{ {
...@@ -104,7 +104,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, ...@@ -104,7 +104,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
__ bind(Ldone); __ bind(Ldone);
} }
break; break;
#endif // SERIALGC #endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableModRef: case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
{ {
...@@ -259,17 +259,17 @@ void TemplateTable::fconst(int value) { ...@@ -259,17 +259,17 @@ void TemplateTable::fconst(int value) {
switch (value) { switch (value) {
default: ShouldNotReachHere(); default: ShouldNotReachHere();
case 0: { case 0: {
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0); int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
__ lfs(F15_ftos, simm16_offset, R11_scratch1); __ lfs(F15_ftos, simm16_offset, R11_scratch1);
break; break;
} }
case 1: { case 1: {
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0); int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
__ lfs(F15_ftos, simm16_offset, R11_scratch1); __ lfs(F15_ftos, simm16_offset, R11_scratch1);
break; break;
} }
case 2: { case 2: {
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0); int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
__ lfs(F15_ftos, simm16_offset, R11_scratch1); __ lfs(F15_ftos, simm16_offset, R11_scratch1);
break; break;
} }
...@@ -282,12 +282,12 @@ void TemplateTable::dconst(int value) { ...@@ -282,12 +282,12 @@ void TemplateTable::dconst(int value) {
static double one = 1.0; static double one = 1.0;
switch (value) { switch (value) {
case 0: { case 0: {
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0); int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
__ lfd(F15_ftos, simm16_offset, R11_scratch1); __ lfd(F15_ftos, simm16_offset, R11_scratch1);
break; break;
} }
case 1: { case 1: {
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0); int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
__ lfd(F15_ftos, simm16_offset, R11_scratch1); __ lfd(F15_ftos, simm16_offset, R11_scratch1);
break; break;
} }
...@@ -3728,9 +3728,9 @@ void TemplateTable::checkcast() { ...@@ -3728,9 +3728,9 @@ void TemplateTable::checkcast() {
transition(atos, atos); transition(atos, atos);
Label Ldone, Lis_null, Lquicked, Lresolved; Label Ldone, Lis_null, Lquicked, Lresolved;
Register Roffset = R5_ARG3, Register Roffset = R6_ARG4,
RobjKlass = R4_ARG2, RobjKlass = R4_ARG2,
RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect this register. RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
Rcpool = R11_scratch1, Rcpool = R11_scratch1,
Rtags = R12_scratch2; Rtags = R12_scratch2;
......
...@@ -53,41 +53,41 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { * ...@@ -53,41 +53,41 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *
inline jlong Atomic::load(volatile jlong* src) { return *src; } inline jlong Atomic::load(volatile jlong* src) { return *src; }
/* //
machine barrier instructions: // machine barrier instructions:
//
- sync two-way memory barrier, aka fence // - sync two-way memory barrier, aka fence
- lwsync orders Store|Store, // - lwsync orders Store|Store,
Load|Store, // Load|Store,
Load|Load, // Load|Load,
but not Store|Load // but not Store|Load
- eieio orders memory accesses for device memory (only) // - eieio orders memory accesses for device memory (only)
- isync invalidates speculatively executed instructions // - isync invalidates speculatively executed instructions
From the POWER ISA 2.06 documentation: // From the POWER ISA 2.06 documentation:
"[...] an isync instruction prevents the execution of // "[...] an isync instruction prevents the execution of
instructions following the isync until instructions // instructions following the isync until instructions
preceding the isync have completed, [...]" // preceding the isync have completed, [...]"
From IBM's AIX assembler reference: // From IBM's AIX assembler reference:
"The isync [...] instructions causes the processor to // "The isync [...] instructions causes the processor to
refetch any instructions that might have been fetched // refetch any instructions that might have been fetched
prior to the isync instruction. The instruction isync // prior to the isync instruction. The instruction isync
causes the processor to wait for all previous instructions // causes the processor to wait for all previous instructions
to complete. Then any instructions already fetched are // to complete. Then any instructions already fetched are
discarded and instruction processing continues in the // discarded and instruction processing continues in the
environment established by the previous instructions." // environment established by the previous instructions."
//
semantic barrier instructions: // semantic barrier instructions:
(as defined in orderAccess.hpp) // (as defined in orderAccess.hpp)
//
- release orders Store|Store, (maps to lwsync) // - release orders Store|Store, (maps to lwsync)
Load|Store // Load|Store
- acquire orders Load|Store, (maps to lwsync) // - acquire orders Load|Store, (maps to lwsync)
Load|Load // Load|Load
- fence orders Store|Store, (maps to sync) // - fence orders Store|Store, (maps to sync)
Load|Store, // Load|Store,
Load|Load, // Load|Load,
Store|Load // Store|Load
*/ //
#define strasm_sync "\n sync \n" #define strasm_sync "\n sync \n"
#define strasm_lwsync "\n lwsync \n" #define strasm_lwsync "\n lwsync \n"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册