提交 20153ca5 编写于 作者: M morris

8008407: remove SPARC V8 support

Summary: Removed most of the SPARC V8 instructions
Reviewed-by: kvn, twisti
上级 58ac1de6
...@@ -57,7 +57,6 @@ class Assembler : public AbstractAssembler { ...@@ -57,7 +57,6 @@ class Assembler : public AbstractAssembler {
fbp_op2 = 5, fbp_op2 = 5,
br_op2 = 2, br_op2 = 2,
bp_op2 = 1, bp_op2 = 1,
cb_op2 = 7, // V8
sethi_op2 = 4 sethi_op2 = 4
}; };
...@@ -145,7 +144,6 @@ class Assembler : public AbstractAssembler { ...@@ -145,7 +144,6 @@ class Assembler : public AbstractAssembler {
ldsh_op3 = 0x0a, ldsh_op3 = 0x0a,
ldx_op3 = 0x0b, ldx_op3 = 0x0b,
ldstub_op3 = 0x0d,
stx_op3 = 0x0e, stx_op3 = 0x0e,
swap_op3 = 0x0f, swap_op3 = 0x0f,
...@@ -163,15 +161,6 @@ class Assembler : public AbstractAssembler { ...@@ -163,15 +161,6 @@ class Assembler : public AbstractAssembler {
prefetch_op3 = 0x2d, prefetch_op3 = 0x2d,
ldc_op3 = 0x30,
ldcsr_op3 = 0x31,
lddc_op3 = 0x33,
stc_op3 = 0x34,
stcsr_op3 = 0x35,
stdcq_op3 = 0x36,
stdc_op3 = 0x37,
casa_op3 = 0x3c, casa_op3 = 0x3c,
casxa_op3 = 0x3e, casxa_op3 = 0x3e,
...@@ -574,17 +563,11 @@ class Assembler : public AbstractAssembler { ...@@ -574,17 +563,11 @@ class Assembler : public AbstractAssembler {
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); } static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
// instruction only in v9 // instruction only in v9
static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); } static void v9_only() { } // do nothing
// instruction only in v8
static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
// instruction deprecated in v9 // instruction deprecated in v9
static void v9_dep() { } // do nothing for now static void v9_dep() { } // do nothing for now
// some float instructions only exist for single prec. on v8
static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
// v8 has no CC field // v8 has no CC field
static void v8_no_cc(CC cc) { if (cc) v9_only(); } static void v8_no_cc(CC cc) { if (cc) v9_only(); }
...@@ -730,11 +713,6 @@ public: ...@@ -730,11 +713,6 @@ public:
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L ); inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
// pp 121 (V8)
inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
inline void cb( Condition c, bool a, Label& L );
// pp 149 // pp 149
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
...@@ -775,8 +753,8 @@ public: ...@@ -775,8 +753,8 @@ public:
// pp 157 // pp 157
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
// pp 159 // pp 159
...@@ -794,21 +772,11 @@ public: ...@@ -794,21 +772,11 @@ public:
// pp 162 // pp 162
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
// on v8 to do negation of single, double and quad precision floats.
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
// on v8 to do abs operation on single/double/quad precision floats.
void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
// pp 163 // pp 163
...@@ -839,11 +807,6 @@ public: ...@@ -839,11 +807,6 @@ public:
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); } void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); } void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
// pp 149 (v8)
void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
// pp 170 // pp 170
void jmpl( Register s1, Register s2, Register d ); void jmpl( Register s1, Register s2, Register d );
...@@ -860,16 +823,6 @@ public: ...@@ -860,16 +823,6 @@ public:
inline void ldxfsr( Register s1, Register s2 ); inline void ldxfsr( Register s1, Register s2 );
inline void ldxfsr( Register s1, int simm13a); inline void ldxfsr( Register s1, int simm13a);
// pp 94 (v8)
inline void ldc( Register s1, Register s2, int crd );
inline void ldc( Register s1, int simm13a, int crd);
inline void lddc( Register s1, Register s2, int crd );
inline void lddc( Register s1, int simm13a, int crd);
inline void ldcsr( Register s1, Register s2, int crd );
inline void ldcsr( Register s1, int simm13a, int crd);
// 173 // 173
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
...@@ -910,18 +863,6 @@ public: ...@@ -910,18 +863,6 @@ public:
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 179
inline void ldstub( Register s1, Register s2, Register d );
inline void ldstub( Register s1, int simm13a, Register d);
// pp 180
void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 181 // pp 181
...@@ -992,11 +933,6 @@ public: ...@@ -992,11 +933,6 @@ public:
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 199
void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 201 // pp 201
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); } void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
...@@ -1116,17 +1052,6 @@ public: ...@@ -1116,17 +1052,6 @@ public:
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 97 (v8)
inline void stc( int crd, Register s1, Register s2 );
inline void stc( int crd, Register s1, int simm13a);
inline void stdc( int crd, Register s1, Register s2 );
inline void stdc( int crd, Register s1, int simm13a);
inline void stcsr( int crd, Register s1, Register s2 );
inline void stcsr( int crd, Register s1, int simm13a);
inline void stdcq( int crd, Register s1, Register s2 );
inline void stdcq( int crd, Register s1, int simm13a);
// pp 230 // pp 230
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); } void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
...@@ -1153,20 +1078,16 @@ public: ...@@ -1153,20 +1078,16 @@ public:
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); } void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 235 // pp 235
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); } void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 237 // pp 237
void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
// simple uncond. trap // simple uncond. trap
void trap( int trapa ) { trap( always, icc, G0, trapa ); } void trap( int trapa ) { trap( always, icc, G0, trapa ); }
......
...@@ -63,9 +63,6 @@ inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)) ...@@ -63,9 +63,6 @@ inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L))
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); } inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); } inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); } inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
...@@ -88,18 +85,9 @@ inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHol ...@@ -88,18 +85,9 @@ inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHol
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
...@@ -119,9 +107,6 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); ...@@ -119,9 +107,6 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
...@@ -132,8 +117,6 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs ...@@ -132,8 +117,6 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
...@@ -152,17 +135,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); ...@@ -152,17 +135,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
// v8 p 99
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
// pp 231 // pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
......
...@@ -597,13 +597,6 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) { ...@@ -597,13 +597,6 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
__ sra(Rdividend, 31, Rscratch); __ sra(Rdividend, 31, Rscratch);
__ wry(Rscratch); __ wry(Rscratch);
if (!VM_Version::v9_instructions_work()) {
// v9 doesn't require these nops
__ nop();
__ nop();
__ nop();
__ nop();
}
add_debug_info_for_div0_here(op->info()); add_debug_info_for_div0_here(op->info());
...@@ -652,10 +645,6 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { ...@@ -652,10 +645,6 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
default : ShouldNotReachHere(); default : ShouldNotReachHere();
};
if (!VM_Version::v9_instructions_work()) {
__ nop();
} }
__ fb( acond, false, Assembler::pn, *(op->label())); __ fb( acond, false, Assembler::pn, *(op->label()));
} else { } else {
...@@ -725,9 +714,6 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { ...@@ -725,9 +714,6 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Label L; Label L;
// result must be 0 if value is NaN; test by comparing value to itself // result must be 0 if value is NaN; test by comparing value to itself
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
if (!VM_Version::v9_instructions_work()) {
__ nop();
}
__ fb(Assembler::f_unordered, true, Assembler::pn, L); __ fb(Assembler::f_unordered, true, Assembler::pn, L);
__ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
__ ftoi(FloatRegisterImpl::S, rsrc, rsrc); __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
...@@ -1909,7 +1895,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr ...@@ -1909,7 +1895,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) { switch (code) {
case lir_add: __ add (lreg, rreg, res); break; case lir_add: __ add (lreg, rreg, res); break;
case lir_sub: __ sub (lreg, rreg, res); break; case lir_sub: __ sub (lreg, rreg, res); break;
case lir_mul: __ mult (lreg, rreg, res); break; case lir_mul: __ mulx (lreg, rreg, res); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} }
...@@ -1924,7 +1910,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr ...@@ -1924,7 +1910,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) { switch (code) {
case lir_add: __ add (lreg, simm13, res); break; case lir_add: __ add (lreg, simm13, res); break;
case lir_sub: __ sub (lreg, simm13, res); break; case lir_sub: __ sub (lreg, simm13, res); break;
case lir_mul: __ mult (lreg, simm13, res); break; case lir_mul: __ mulx (lreg, simm13, res); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else { } else {
...@@ -1936,7 +1922,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr ...@@ -1936,7 +1922,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) { switch (code) {
case lir_add: __ add (lreg, (int)con, res); break; case lir_add: __ add (lreg, (int)con, res); break;
case lir_sub: __ sub (lreg, (int)con, res); break; case lir_sub: __ sub (lreg, (int)con, res); break;
case lir_mul: __ mult (lreg, (int)con, res); break; case lir_mul: __ mulx (lreg, (int)con, res); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} }
...@@ -3234,7 +3220,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, ...@@ -3234,7 +3220,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
Register base = mem_addr->base()->as_register(); Register base = mem_addr->base()->as_register();
if (src->is_register() && dest->is_address()) { if (src->is_register() && dest->is_address()) {
// G4 is high half, G5 is low half // G4 is high half, G5 is low half
if (VM_Version::v9_instructions_work()) {
// clear the top bits of G5, and scale up G4 // clear the top bits of G5, and scale up G4
__ srl (src->as_register_lo(), 0, G5); __ srl (src->as_register_lo(), 0, G5);
__ sllx(src->as_register_hi(), 32, G4); __ sllx(src->as_register_hi(), 32, G4);
...@@ -3246,19 +3231,8 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, ...@@ -3246,19 +3231,8 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
} else { } else {
__ stx(G4, base, idx); __ stx(G4, base, idx);
} }
} else {
__ mov (src->as_register_hi(), G4);
__ mov (src->as_register_lo(), G5);
null_check_offset = __ offset();
if (idx == noreg) {
__ std(G4, base, disp);
} else {
__ std(G4, base, idx);
}
}
} else if (src->is_address() && dest->is_register()) { } else if (src->is_address() && dest->is_register()) {
null_check_offset = __ offset(); null_check_offset = __ offset();
if (VM_Version::v9_instructions_work()) {
if (idx == noreg) { if (idx == noreg) {
__ ldx(base, disp, G5); __ ldx(base, disp, G5);
} else { } else {
...@@ -3266,16 +3240,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, ...@@ -3266,16 +3240,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
} }
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
__ mov (G5, dest->as_register_lo()); // copy low half into lo __ mov (G5, dest->as_register_lo()); // copy low half into lo
} else {
if (idx == noreg) {
__ ldd(base, disp, G4);
} else {
__ ldd(base, idx, G4);
}
// G4 is high half, G5 is low half
__ mov (G4, dest->as_register_hi());
__ mov (G5, dest->as_register_lo());
}
} else { } else {
Unimplemented(); Unimplemented();
} }
......
...@@ -108,7 +108,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox ...@@ -108,7 +108,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it // if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done // hence we are done
cmp(Rmark, Rscratch); cmp(Rmark, Rscratch);
...@@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb ...@@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
// Check if it is still a light weight lock, this is is true if we see // Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object // the stack address of the basicLock in the markOop of the object
casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); cas_ptr(mark_addr.base(), Rbox, Rmark);
cmp(Rbox, Rmark); cmp(Rbox, Rmark);
brx(Assembler::notEqual, false, Assembler::pn, slow_case); brx(Assembler::notEqual, false, Assembler::pn, slow_case);
...@@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object( ...@@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2); initialize_body(t1, t2);
#ifndef _LP64 #ifndef _LP64
} else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) { } else if (con_size_in_bytes < threshold * 2) {
// on v9 we can do double word stores to fill twice as much space. // on v9 we can do double word stores to fill twice as much space.
assert(hdr_size_in_bytes % 8 == 0, "double word aligned"); assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
assert(con_size_in_bytes % 8 == 0, "double word aligned"); assert(con_size_in_bytes % 8 == 0, "double word aligned");
......
...@@ -30,5 +30,4 @@ ...@@ -30,5 +30,4 @@
void Compile::pd_compiler2_init() { void Compile::pd_compiler2_init() {
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" ); guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
} }
...@@ -30,8 +30,7 @@ ...@@ -30,8 +30,7 @@
} }
static const char* pd_cpu_opts() { static const char* pd_cpu_opts() {
return (VM_Version::v9_instructions_work()? return "v9only";
(VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
} }
#endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
...@@ -110,8 +110,5 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS ...@@ -110,8 +110,5 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
\ \
product(uintx, ArraycopyDstPrefetchDistance, 0, \ product(uintx, ArraycopyDstPrefetchDistance, 0, \
"Distance to prefetch destination array in arracopy") \ "Distance to prefetch destination array in arracopy") \
\
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
"Number of times to spin wait on a v8 atomic operation lock") \
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
...@@ -1210,8 +1210,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) ...@@ -1210,8 +1210,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
// compare and exchange object_addr, markOop | 1, stack address of basicLock // compare and exchange object_addr, markOop | 1, stack address of basicLock
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), mark_reg, temp_reg, cas_ptr(mark_addr.base(), mark_reg, temp_reg);
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
// if the compare and exchange succeeded we are done (we saw an unlocked object) // if the compare and exchange succeeded we are done (we saw an unlocked object)
cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
...@@ -1291,8 +1290,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) { ...@@ -1291,8 +1290,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// we expect to see the stack address of the basicLock in case the // we expect to see the stack address of the basicLock in case the
// lock is still a light weight lock (lock_reg) // lock is still a light weight lock (lock_reg)
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cmp(lock_reg, displaced_header_reg); cmp(lock_reg, displaced_header_reg);
brx(Assembler::equal, true, Assembler::pn, done); brx(Assembler::equal, true, Assembler::pn, done);
delayed()->st_ptr(G0, lockobj_addr); // free entry delayed()->st_ptr(G0, lockobj_addr); // free entry
......
...@@ -1056,13 +1056,6 @@ public: ...@@ -1056,13 +1056,6 @@ public:
void breakpoint_trap(); void breakpoint_trap();
void breakpoint_trap(Condition c, CC cc); void breakpoint_trap(Condition c, CC cc);
void flush_windows_trap();
void clean_windows_trap();
void get_psr_trap();
void set_psr_trap();
// V8/V9 flush_windows
void flush_windows();
// Support for serializing memory accesses between threads // Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp1, Register tmp2); void serialize_memory(Register thread, Register tmp1, Register tmp2);
...@@ -1071,14 +1064,6 @@ public: ...@@ -1071,14 +1064,6 @@ public:
void enter(); void enter();
void leave(); void leave();
// V8/V9 integer multiply
void mult(Register s1, Register s2, Register d);
void mult(Register s1, int simm13a, Register d);
// V8/V9 read and write of condition codes.
void read_ccr(Register d);
void write_ccr(Register s);
// Manipulation of C++ bools // Manipulation of C++ bools
// These are idioms to flag the need for care with accessing bools but on // These are idioms to flag the need for care with accessing bools but on
// this platform we assume byte size // this platform we assume byte size
...@@ -1162,21 +1147,6 @@ public: ...@@ -1162,21 +1147,6 @@ public:
// check_and_forward_exception to handle exceptions when it is safe // check_and_forward_exception to handle exceptions when it is safe
void check_and_forward_exception(Register scratch_reg); void check_and_forward_exception(Register scratch_reg);
private:
// For V8
void read_ccr_trap(Register ccr_save);
void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
#ifdef ASSERT
// For V8 debugging. Uses V8 instruction sequence and checks
// result with V9 insturctions rdccr and wrccr.
// Uses Gscatch and Gscatch2
void read_ccr_v8_assert(Register ccr_save);
void write_ccr_v8_assert(Register ccr_save);
#endif // ASSERT
public:
// Write to card table for - register is destroyed afterwards. // Write to card table for - register is destroyed afterwards.
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj); void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
...@@ -1314,20 +1284,9 @@ public: ...@@ -1314,20 +1284,9 @@ public:
FloatRegister Fa, FloatRegister Fb, FloatRegister Fa, FloatRegister Fb,
Register Rresult); Register Rresult);
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void save_all_globals_into_locals(); void save_all_globals_into_locals();
void restore_globals_from_locals(); void restore_globals_from_locals();
void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
address lock_addr=0, bool use_call_vm=false);
void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
address lock_addr=0, bool use_call_vm=false);
void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
// These set the icc condition code to equal if the lock succeeded // These set the icc condition code to equal if the lock succeeded
// and notEqual if it failed and requires a slow case // and notEqual if it failed and requires a slow case
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
......
...@@ -229,10 +229,7 @@ inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Registe ...@@ -229,10 +229,7 @@ inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Registe
// Use the right branch for the platform // Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt); Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
} }
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
...@@ -268,10 +265,7 @@ inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ...@@ -268,10 +265,7 @@ inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L
} }
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt); fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
} }
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
...@@ -334,7 +328,7 @@ inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder co ...@@ -334,7 +328,7 @@ inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder co
// prefetch instruction // prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work()) Assembler::bp( never, true, xcc, pt, d, rt );
Assembler::bp( never, true, xcc, pt, d, rt ); Assembler::bp( never, true, xcc, pt, d, rt );
} }
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
...@@ -344,15 +338,7 @@ inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } ...@@ -344,15 +338,7 @@ inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// returns delta from gotten pc to addr after // returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) { inline int MacroAssembler::get_pc( Register d ) {
int x = offset(); int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d); rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
return offset() - x; return offset() - x;
} }
...@@ -646,41 +632,26 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, Fl ...@@ -646,41 +632,26 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, Fl
// returns if membar generates anything, obviously this code should mirror // returns if membar generates anything, obviously this code should mirror
// membar below. // membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU if (!os::is_MP())
if( VM_Version::v9_instructions_work() ) { return false; // Not needed on single CPU
const Membar_mask_bits effective_mask = const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0); return (effective_mask != 0);
} else {
return true;
}
} }
inline void MacroAssembler::membar( Membar_mask_bits const7a ) { inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers // Uniprocessors do not need memory barriers
if (!os::is_MP()) return; if (!os::is_MP())
return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50. // 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done // of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad. // implicitly is StoreLoad.
const Membar_mask_bits effective_mask = const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) { if (effective_mask != 0) {
Assembler::membar( effective_mask ); Assembler::membar(effective_mask);
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
} }
} }
......
...@@ -162,7 +162,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { ...@@ -162,7 +162,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
int i1 = ((int*)code_buffer)[1]; int i1 = ((int*)code_buffer)[1];
int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
assert(inv_op(*contention_addr) == Assembler::arith_op || assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), *contention_addr == nop_instruction(),
"must not interfere with original call"); "must not interfere with original call");
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
n_call->set_long_at(1*BytesPerInstWord, i1); n_call->set_long_at(1*BytesPerInstWord, i1);
...@@ -181,7 +181,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { ...@@ -181,7 +181,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
// Make sure the first-patched instruction, which may co-exist // Make sure the first-patched instruction, which may co-exist
// briefly with the call, will do something harmless. // briefly with the call, will do something harmless.
assert(inv_op(*contention_addr) == Assembler::arith_op || assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), *contention_addr == nop_instruction(),
"must not interfere with original call"); "must not interfere with original call");
} }
...@@ -933,11 +933,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add ...@@ -933,11 +933,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
int code_size = 1 * BytesPerInstWord; int code_size = 1 * BytesPerInstWord;
CodeBuffer cb(verified_entry, code_size + 1); CodeBuffer cb(verified_entry, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb); MacroAssembler* a = new MacroAssembler(&cb);
if (VM_Version::v9_instructions_work()) {
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
} else {
a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
}
ICache::invalidate_range(verified_entry, code_size); ICache::invalidate_range(verified_entry, code_size);
} }
...@@ -1024,7 +1020,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) ...@@ -1024,7 +1020,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
int i1 = ((int*)code_buffer)[1]; int i1 = ((int*)code_buffer)[1];
int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
assert(inv_op(*contention_addr) == Assembler::arith_op || assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), *contention_addr == nop_instruction(),
"must not interfere with original call"); "must not interfere with original call");
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
h_jump->set_long_at(1*BytesPerInstWord, i1); h_jump->set_long_at(1*BytesPerInstWord, i1);
...@@ -1043,6 +1039,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) ...@@ -1043,6 +1039,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
// Make sure the first-patched instruction, which may co-exist // Make sure the first-patched instruction, which may co-exist
// briefly with the call, will do something harmless. // briefly with the call, will do something harmless.
assert(inv_op(*contention_addr) == Assembler::arith_op || assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), *contention_addr == nop_instruction(),
"must not interfere with original call"); "must not interfere with original call");
} }
...@@ -70,8 +70,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC { ...@@ -70,8 +70,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_zombie() { bool is_zombie() {
int x = long_at(0); int x = long_at(0);
return is_op3(x, return is_op3(x,
VM_Version::v9_instructions_work() ? Assembler::ldsw_op3,
Assembler::ldsw_op3 : Assembler::lduw_op3,
Assembler::ldst_op) Assembler::ldst_op)
&& Assembler::inv_rs1(x) == G0 && Assembler::inv_rs1(x) == G0
&& Assembler::inv_rd(x) == O7; && Assembler::inv_rd(x) == O7;
......
...@@ -249,12 +249,10 @@ class FloatRegisterImpl: public AbstractRegisterImpl { ...@@ -249,12 +249,10 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
case D: case D:
assert(c < 64 && (c & 1) == 0, "bad double float register"); assert(c < 64 && (c & 1) == 0, "bad double float register");
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
return (c & 0x1e) | ((c & 0x20) >> 5); return (c & 0x1e) | ((c & 0x20) >> 5);
case Q: case Q:
assert(c < 64 && (c & 3) == 0, "bad quad float register"); assert(c < 64 && (c & 3) == 0, "bad quad float register");
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
return (c & 0x1c) | ((c & 0x20) >> 5); return (c & 0x1c) | ((c & 0x20) >> 5);
} }
ShouldNotReachHere(); ShouldNotReachHere();
......
...@@ -2459,7 +2459,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, ...@@ -2459,7 +2459,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Finally just about ready to make the JNI call // Finally just about ready to make the JNI call
__ flush_windows(); __ flushw();
if (inner_frame_created) { if (inner_frame_created) {
__ restore(); __ restore();
} else { } else {
......
...@@ -2778,10 +2778,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ ...@@ -2778,10 +2778,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
Register Rold = reg_to_register_object($old$$reg); Register Rold = reg_to_register_object($old$$reg);
Register Rnew = reg_to_register_object($new$$reg); Register Rnew = reg_to_register_object($new$$reg);
// casx_under_lock picks 1 of 3 encodings: __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
// For 32-bit pointers you get a 32-bit CAS
// For 64-bit pointers you get a 64-bit CASX
__ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
__ cmp( Rold, Rnew ); __ cmp( Rold, Rnew );
%} %}
...@@ -3067,7 +3064,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r ...@@ -3067,7 +3064,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
AddressLiteral last_rethrow_addrlit(&last_rethrow); AddressLiteral last_rethrow_addrlit(&last_rethrow);
__ sethi(last_rethrow_addrlit, L1); __ sethi(last_rethrow_addrlit, L1);
Address addr(L1, last_rethrow_addrlit.low10()); Address addr(L1, last_rethrow_addrlit.low10());
__ get_pc(L2); __ rdpc(L2);
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
__ st_ptr(L2, addr); __ st_ptr(L2, addr);
__ restore(); __ restore();
......
...@@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
address start = __ pc(); address start = __ pc();
__ flush_windows(); __ flushw();
__ retl(false); __ retl(false);
__ delayed()->add( FP, STACK_BIAS, O0 ); __ delayed()->add( FP, STACK_BIAS, O0 );
// The returned value must be a stack pointer whose register save area // The returned value must be a stack pointer whose register save area
...@@ -575,67 +575,9 @@ class StubGenerator: public StubCodeGenerator { ...@@ -575,67 +575,9 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Helper functions for v8 atomic operations.
//
void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
if (mark_oop_reg == noreg) {
address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
__ set((intptr_t)lock_ptr, lock_ptr_reg);
} else {
assert(scratch_reg != noreg, "just checking");
address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
__ set((intptr_t)lock_ptr, lock_ptr_reg);
__ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
__ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
}
}
void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
__ set(StubRoutines::Sparc::locked, lock_reg);
// Initialize yield counter
__ mov(G0,yield_reg);
__ BIND(retry);
__ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
// This code can only be called from inside the VM, this
// stub is only invoked from Atomic::add(). We do not
// want to use call_VM, because _last_java_sp and such
// must already be set.
//
// Save the regs and make space for a C call
__ save(SP, -96, SP);
__ save_all_globals_into_locals();
BLOCK_COMMENT("call os::naked_sleep");
__ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
__ delayed()->nop();
__ restore_globals_from_locals();
__ restore();
// reset the counter
__ mov(G0,yield_reg);
__ BIND(dontyield);
// try to get lock
__ swap(lock_ptr_reg, 0, lock_reg);
// did we get the lock?
__ cmp(lock_reg, StubRoutines::Sparc::unlocked);
__ br(Assembler::notEqual, true, Assembler::pn, retry);
__ delayed()->add(yield_reg,1,yield_reg);
// yes, got lock. do the operation here.
}
void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
__ st(lock_reg, lock_ptr_reg, 0); // unlock
}
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
// //
// Arguments : // Arguments:
// //
// exchange_value: O0 // exchange_value: O0
// dest: O1 // dest: O1
...@@ -656,33 +598,14 @@ class StubGenerator: public StubCodeGenerator { ...@@ -656,33 +598,14 @@ class StubGenerator: public StubCodeGenerator {
__ mov(O0, O3); // scratch copy of exchange value __ mov(O0, O3); // scratch copy of exchange value
__ ld(O1, 0, O2); // observe the previous value __ ld(O1, 0, O2); // observe the previous value
// try to replace O2 with O3 // try to replace O2 with O3
__ cas_under_lock(O1, O2, O3, __ cas(O1, O2, O3);
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
__ retl(false); __ retl(false);
__ delayed()->mov(O2, O0); // report previous value to caller __ delayed()->mov(O2, O0); // report previous value to caller
} else { } else {
if (VM_Version::v9_instructions_work()) {
__ retl(false); __ retl(false);
__ delayed()->swap(O1, 0, O0); __ delayed()->swap(O1, 0, O0);
} else {
const Register& lock_reg = O2;
const Register& lock_ptr_reg = O3;
const Register& yield_reg = O4;
Label retry;
Label dontyield;
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
// got the lock, do the swap
__ swap(O1, 0, O0);
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
__ retl(false);
__ delayed()->nop();
}
} }
return start; return start;
...@@ -691,7 +614,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -691,7 +614,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
// //
// Arguments : // Arguments:
// //
// exchange_value: O0 // exchange_value: O0
// dest: O1 // dest: O1
...@@ -701,15 +624,12 @@ class StubGenerator: public StubCodeGenerator { ...@@ -701,15 +624,12 @@ class StubGenerator: public StubCodeGenerator {
// //
// O0: the value previously stored in dest // O0: the value previously stored in dest
// //
// Overwrites (v8): O3,O4,O5
//
address generate_atomic_cmpxchg() { address generate_atomic_cmpxchg() {
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
address start = __ pc(); address start = __ pc();
// cmpxchg(dest, compare_value, exchange_value) // cmpxchg(dest, compare_value, exchange_value)
__ cas_under_lock(O1, O2, O0, __ cas(O1, O2, O0);
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
__ retl(false); __ retl(false);
__ delayed()->nop(); __ delayed()->nop();
...@@ -718,7 +638,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -718,7 +638,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
// //
// Arguments : // Arguments:
// //
// exchange_value: O1:O0 // exchange_value: O1:O0
// dest: O2 // dest: O2
...@@ -728,17 +648,12 @@ class StubGenerator: public StubCodeGenerator { ...@@ -728,17 +648,12 @@ class StubGenerator: public StubCodeGenerator {
// //
// O1:O0: the value previously stored in dest // O1:O0: the value previously stored in dest
// //
// This only works on V9, on V8 we don't generate any
// code and just return NULL.
//
// Overwrites: G1,G2,G3 // Overwrites: G1,G2,G3
// //
address generate_atomic_cmpxchg_long() { address generate_atomic_cmpxchg_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
address start = __ pc(); address start = __ pc();
if (!VM_Version::supports_cx8())
return NULL;;
__ sllx(O0, 32, O0); __ sllx(O0, 32, O0);
__ srl(O1, 0, O1); __ srl(O1, 0, O1);
__ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
...@@ -756,7 +671,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -756,7 +671,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jint Atomic::add(jint add_value, volatile jint* dest). // Support for jint Atomic::add(jint add_value, volatile jint* dest).
// //
// Arguments : // Arguments:
// //
// add_value: O0 (e.g., +1 or -1) // add_value: O0 (e.g., +1 or -1)
// dest: O1 // dest: O1
...@@ -765,15 +680,13 @@ class StubGenerator: public StubCodeGenerator { ...@@ -765,15 +680,13 @@ class StubGenerator: public StubCodeGenerator {
// //
// O0: the new value stored in dest // O0: the new value stored in dest
// //
// Overwrites (v9): O3 // Overwrites: O3
// Overwrites (v8): O3,O4,O5
// //
address generate_atomic_add() { address generate_atomic_add() {
StubCodeMark mark(this, "StubRoutines", "atomic_add"); StubCodeMark mark(this, "StubRoutines", "atomic_add");
address start = __ pc(); address start = __ pc();
__ BIND(_atomic_add_stub); __ BIND(_atomic_add_stub);
if (VM_Version::v9_instructions_work()) {
Label(retry); Label(retry);
__ BIND(retry); __ BIND(retry);
...@@ -783,29 +696,6 @@ class StubGenerator: public StubCodeGenerator { ...@@ -783,29 +696,6 @@ class StubGenerator: public StubCodeGenerator {
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
__ retl(false); __ retl(false);
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
} else {
const Register& lock_reg = O2;
const Register& lock_ptr_reg = O3;
const Register& value_reg = O4;
const Register& yield_reg = O5;
Label(retry);
Label(dontyield);
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
// got lock, do the increment
__ ld(O1, 0, value_reg);
__ add(O0, value_reg, value_reg);
__ st(value_reg, O1, 0);
// %%% only for RMO and PSO
__ membar(Assembler::StoreStore);
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
__ retl(false);
__ delayed()->mov(value_reg, O0);
}
return start; return start;
} }
...@@ -841,7 +731,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -841,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(G3, L3); __ mov(G3, L3);
__ mov(G4, L4); __ mov(G4, L4);
__ mov(G5, L5); __ mov(G5, L5);
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { for (i = 0; i < 64; i += 2) {
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
} }
...@@ -855,7 +745,7 @@ class StubGenerator: public StubCodeGenerator { ...@@ -855,7 +745,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(L3, G3); __ mov(L3, G3);
__ mov(L4, G4); __ mov(L4, G4);
__ mov(L5, G5); __ mov(L5, G5);
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { for (i = 0; i < 64; i += 2) {
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
} }
......
...@@ -52,7 +52,3 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL; ...@@ -52,7 +52,3 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL;
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows); address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
address StubRoutines::Sparc::_partial_subtype_check = NULL; address StubRoutines::Sparc::_partial_subtype_check = NULL;
int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
...@@ -47,46 +47,14 @@ enum /* platform_dependent_constants */ { ...@@ -47,46 +47,14 @@ enum /* platform_dependent_constants */ {
class Sparc { class Sparc {
friend class StubGenerator; friend class StubGenerator;
public:
enum { nof_instance_allocators = 10 };
// allocator lock values
enum {
unlocked = 0,
locked = 1
};
enum {
v8_oop_lock_ignore_bits = 2,
v8_oop_lock_bits = 4,
nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
};
static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
private: private:
static address _test_stop_entry; static address _test_stop_entry;
static address _stop_subroutine_entry; static address _stop_subroutine_entry;
static address _flush_callers_register_windows_entry; static address _flush_callers_register_windows_entry;
static int _atomic_memory_operation_lock;
static address _partial_subtype_check; static address _partial_subtype_check;
public: public:
// %%% global lock for everyone who needs to use atomic_compare_and_exchange
// %%% or atomic_increment -- should probably use more locks for more
// %%% scalability-- for instance one for each eden space or group of
// address of the lock for atomic_compare_and_exchange
static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
// accessor and mutator for _atomic_memory_operation_lock
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
// test assembler stop routine by setting registers // test assembler stop routine by setting registers
static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); } static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
......
...@@ -1054,7 +1054,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { ...@@ -1054,7 +1054,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// flush the windows now. We don't care about the current (protection) frame // flush the windows now. We don't care about the current (protection) frame
// only the outer frames // only the outer frames
__ flush_windows(); __ flushw();
// mark windows as flushed // mark windows as flushed
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
......
...@@ -1338,14 +1338,13 @@ void TemplateTable::lneg() { ...@@ -1338,14 +1338,13 @@ void TemplateTable::lneg() {
void TemplateTable::fneg() { void TemplateTable::fneg() {
transition(ftos, ftos); transition(ftos, ftos);
__ fneg(FloatRegisterImpl::S, Ftos_f); __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
} }
void TemplateTable::dneg() { void TemplateTable::dneg() {
transition(dtos, dtos); transition(dtos, dtos);
// v8 has fnegd if source and dest are the same __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
__ fneg(FloatRegisterImpl::D, Ftos_f);
} }
...@@ -1470,31 +1469,17 @@ void TemplateTable::convert() { ...@@ -1470,31 +1469,17 @@ void TemplateTable::convert() {
__ st_long(Otos_l, __ d_tmp); __ st_long(Otos_l, __ d_tmp);
__ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
if (VM_Version::v9_instructions_work()) {
if (bytecode() == Bytecodes::_l2f) { if (bytecode() == Bytecodes::_l2f) {
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
} else { } else {
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
} }
} else {
__ call_VM_leaf(
Lscratch,
bytecode() == Bytecodes::_l2f
? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
: CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
);
}
break; break;
case Bytecodes::_f2i: { case Bytecodes::_f2i: {
Label isNaN; Label isNaN;
// result must be 0 if value is NaN; test by comparing value to itself // result must be 0 if value is NaN; test by comparing value to itself
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
// According to the v8 manual, you have to have a non-fp instruction
// between fcmp and fb.
if (!VM_Version::v9_instructions_work()) {
__ nop();
}
__ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
__ delayed()->clr(Otos_i); // NaN __ delayed()->clr(Otos_i); // NaN
__ ftoi(FloatRegisterImpl::S, Ftos_f, F30); __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
...@@ -1537,16 +1522,7 @@ void TemplateTable::convert() { ...@@ -1537,16 +1522,7 @@ void TemplateTable::convert() {
break; break;
case Bytecodes::_d2f: case Bytecodes::_d2f:
if (VM_Version::v9_instructions_work()) {
__ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
}
else {
// must uncache tos
__ push_d();
__ pop_i(O0);
__ pop_i(O1);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
}
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
...@@ -1956,17 +1932,8 @@ void TemplateTable::fast_binaryswitch() { ...@@ -1956,17 +1932,8 @@ void TemplateTable::fast_binaryswitch() {
__ ld( Rarray, Rscratch, Rscratch ); __ ld( Rarray, Rscratch, Rscratch );
// (Rscratch is already in the native byte-ordering.) // (Rscratch is already in the native byte-ordering.)
__ cmp( Rkey, Rscratch ); __ cmp( Rkey, Rscratch );
if ( VM_Version::v9_instructions_work() ) {
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
}
else {
Label end_of_if;
__ br( Assembler::less, true, Assembler::pt, end_of_if );
__ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
__ mov( Rh, Ri ); // else i = h
__ bind(end_of_if); // }
}
// while (i+1 < j) // while (i+1 < j)
__ bind( entry ); __ bind( entry );
...@@ -3418,9 +3385,7 @@ void TemplateTable::_new() { ...@@ -3418,9 +3385,7 @@ void TemplateTable::_new() {
// has been allocated. // has been allocated.
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
__ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue, __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
VM_Version::v9_instructions_work() ? NULL :
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
// if someone beat us on the allocation, try again, otherwise continue // if someone beat us on the allocation, try again, otherwise continue
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
...@@ -3701,14 +3666,7 @@ void TemplateTable::monitorenter() { ...@@ -3701,14 +3666,7 @@ void TemplateTable::monitorenter() {
__ verify_oop(O4); // verify each monitor's oop __ verify_oop(O4); // verify each monitor's oop
__ tst(O4); // is this entry unused? __ tst(O4); // is this entry unused?
if (VM_Version::v9_instructions_work())
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
else {
Label L;
__ br( Assembler::zero, true, Assembler::pn, L );
__ delayed()->mov(O3, O1); // rememeber this one if match
__ bind(L);
}
__ cmp(O4, O0); // check if current entry is for same object __ cmp(O4, O0); // check if current entry is for same object
__ brx( Assembler::equal, false, Assembler::pn, exit ); __ brx( Assembler::equal, false, Assembler::pn, exit );
......
...@@ -75,23 +75,14 @@ void VM_Version::initialize() { ...@@ -75,23 +75,14 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
} }
if (has_v9()) { guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value"); assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
if (ArraycopySrcPrefetchDistance >= 4096) if (ArraycopySrcPrefetchDistance >= 4096)
ArraycopySrcPrefetchDistance = 4064; ArraycopySrcPrefetchDistance = 4064;
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value"); assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
if (ArraycopyDstPrefetchDistance >= 4096) if (ArraycopyDstPrefetchDistance >= 4096)
ArraycopyDstPrefetchDistance = 4064; ArraycopyDstPrefetchDistance = 4064;
} else {
if (ArraycopySrcPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
}
if (ArraycopyDstPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
}
}
UseSSE = 0; // Only on x86 and x64 UseSSE = 0; // Only on x86 and x64
......
...@@ -177,10 +177,6 @@ public: ...@@ -177,10 +177,6 @@ public:
return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0; return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
} }
// Legacy
static bool v8_instructions_work() { return has_v8() && !has_v9(); }
static bool v9_instructions_work() { return has_v9(); }
// Assembler testing // Assembler testing
static void allow_all(); static void allow_all();
static void revert(); static void revert();
......
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
#include <asm-sparc/traps.h>
void MacroAssembler::read_ccr_trap(Register ccr_save) {
// No implementation
breakpoint_trap();
}
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
// No implementation
breakpoint_trap();
}
void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
// Use software breakpoint trap until we figure out how to do this on Linux
void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); }
void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); }
...@@ -169,7 +169,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* ...@@ -169,7 +169,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
: "memory"); : "memory");
return rv; return rv;
#else #else
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
volatile jlong_accessor evl, cvl, rv; volatile jlong_accessor evl, cvl, rv;
evl.long_value = exchange_value; evl.long_value = exchange_value;
cvl.long_value = compare_value; cvl.long_value = compare_value;
......
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
#include <sys/trap.h> // For trap numbers
#include <v9/sys/psr_compat.h> // For V8 compatibility
void MacroAssembler::read_ccr_trap(Register ccr_save) {
// Execute a trap to get the PSR, mask and shift
// to get the condition codes.
get_psr_trap();
nop();
set(PSR_ICC, ccr_save);
and3(O0, ccr_save, ccr_save);
srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
}
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
// Execute a trap to get the PSR, shift back
// the condition codes, mask the condition codes
// back into and PSR and trap to write back the
// PSR.
sll(ccr_save, PSR_ICC_SHIFT, scratch2);
get_psr_trap();
nop();
set(~PSR_ICC, scratch1);
and3(O0, scratch1, O0);
or3(O0, scratch2, O0);
set_psr_trap();
nop();
}
void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); }
void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); }
...@@ -60,21 +60,10 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; } ...@@ -60,21 +60,10 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else #else
extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst); extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) { inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
#ifdef COMPILER2
// Compiler2 does not support v8, it is used only for v9.
_Atomic_move_long_v9(src, dst); _Atomic_move_long_v9(src, dst);
#else
// The branch is cheaper then emulated LDD.
if (VM_Version::v9_instructions_work()) {
_Atomic_move_long_v9(src, dst);
} else {
_Atomic_move_long_v8(src, dst);
}
#endif
} }
inline jlong Atomic::load(volatile jlong* src) { inline jlong Atomic::load(volatile jlong* src) {
...@@ -209,7 +198,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* ...@@ -209,7 +198,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
: "memory"); : "memory");
return rv; return rv;
#else //_LP64 #else //_LP64
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
volatile jlong_accessor evl, cvl, rv; volatile jlong_accessor evl, cvl, rv;
evl.long_value = exchange_value; evl.long_value = exchange_value;
cvl.long_value = compare_value; cvl.long_value = compare_value;
...@@ -318,7 +306,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* ...@@ -318,7 +306,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
// Return 64 bit value in %o0 // Return 64 bit value in %o0
return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value); return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
#else // _LP64 #else // _LP64
assert (VM_Version::v9_instructions_work(), "only supported on v9");
// Return 64 bit value in %o0,%o1 by hand // Return 64 bit value in %o0,%o1 by hand
return _Atomic_casl(exchange_value, dest, compare_value); return _Atomic_casl(exchange_value, dest, compare_value);
#endif // _LP64 #endif // _LP64
......
...@@ -152,23 +152,6 @@ ...@@ -152,23 +152,6 @@
.nonvolatile .nonvolatile
.end .end
// Support for jlong Atomic::load and Atomic::store on v8.
//
// void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
//
// Arguments:
// src: O0
// dest: O1
//
// Overwrites O2 and O3
.inline _Atomic_move_long_v8,2
.volatile
ldd [%o0], %o2
std %o2, [%o1]
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v9. // Support for jlong Atomic::load and Atomic::store on v9.
// //
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst) // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
......
...@@ -1885,21 +1885,6 @@ bool Arguments::check_vm_args_consistency() { ...@@ -1885,21 +1885,6 @@ bool Arguments::check_vm_args_consistency() {
// Note: Needs platform-dependent factoring. // Note: Needs platform-dependent factoring.
bool status = true; bool status = true;
#if ( (defined(COMPILER2) && defined(SPARC)))
// NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
// on sparc doesn't require generation of a stub as is the case on, e.g.,
// x86. Normally, VM_Version_init must be called from init_globals in
// init.cpp, which is called by the initial java thread *after* arguments
// have been parsed. VM_Version_init gets called twice on sparc.
extern void VM_Version_init();
VM_Version_init();
if (!VM_Version::has_v9()) {
jio_fprintf(defaultStream::error_stream(),
"V8 Machine detected, Server requires V9\n");
status = false;
}
#endif /* COMPILER2 && SPARC */
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
// builds so the cost of stack banging can be measured. // builds so the cost of stack banging can be measured.
#if (defined(PRODUCT) && defined(SOLARIS)) #if (defined(PRODUCT) && defined(SOLARIS))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册