提交 3761035f 编写于 作者: A aurel32

alpha: directly access ir registers

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5151 c046a42c-6fe2-441c-8c8c-71466251a162
上级 496cb5b9
......@@ -20,11 +20,6 @@
/* Optimized constant loads */
#if REG < 3
void OPPROTO glue(op_reset_T, REG) (void)
{
glue(T, REG) = 0;
RETURN();
}
#if !defined(HOST_SPARC) && !defined(HOST_SPARC64)
void OPPROTO glue(op_reset_FT, REG) (void)
......@@ -40,87 +35,10 @@ void OPPROTO glue(op_reset_FT, REG) (void)
}
#endif
/* XXX: This can be great on most RISC machines */
#if !defined(__i386__) && !defined(__x86_64__)
void OPPROTO glue(op_set_s16_T, REG) (void)
{
glue(T, REG) = (int16_t)PARAM(1);
RETURN();
}
void OPPROTO glue(op_set_u16_T, REG) (void)
{
glue(T, REG) = (uint16_t)PARAM(1);
RETURN();
}
#endif
void OPPROTO glue(op_set_s32_T, REG) (void)
{
glue(T, REG) = (int32_t)PARAM(1);
RETURN();
}
void OPPROTO glue(op_set_u32_T, REG) (void)
{
glue(T, REG) = (uint32_t)PARAM(1);
RETURN();
}
#if 0 // Qemu does not know how to do this...
void OPPROTO glue(op_set_64_T, REG) (void)
{
glue(T, REG) = (int64_t)PARAM(1);
RETURN();
}
#else
void OPPROTO glue(op_set_64_T, REG) (void)
{
glue(T, REG) = ((int64_t)PARAM(1) << 32) | (int64_t)PARAM(2);
RETURN();
}
#endif
#endif /* REG < 3 */
/* Fixed-point register moves */
#if REG < 31
void OPPROTO glue(op_load_T0_ir, REG) (void)
{
T0 = env->ir[REG];
RETURN();
}
void OPPROTO glue(op_load_T1_ir, REG) (void)
{
T1 = env->ir[REG];
RETURN();
}
void OPPROTO glue(op_load_T2_ir, REG) (void)
{
T2 = env->ir[REG];
RETURN();
}
void OPPROTO glue(op_store_T0_ir, REG) (void)
{
env->ir[REG] = T0;
RETURN();
}
void OPPROTO glue(op_store_T1_ir, REG) (void)
{
env->ir[REG] = T1;
RETURN();
}
void OPPROTO glue(op_store_T2_ir, REG) (void)
{
env->ir[REG] = T2;
RETURN();
}
void OPPROTO glue(op_cmov_ir, REG) (void)
{
if (T0)
......
......@@ -44,10 +44,15 @@ struct DisasContext {
uint32_t amask;
};
/* global register indexes */
static TCGv cpu_env;
static TCGv cpu_ir[31];
static TCGv cpu_pc;
/* dyngen register indexes */
static TCGv cpu_T[3];
/* register names */
static char cpu_reg_names[5*31];
#include "gen-icount.h"
......@@ -63,6 +68,19 @@ static void alpha_translate_init(void)
cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
#if TARGET_LONG_BITS > HOST_LONG_BITS
cpu_T[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
offsetof(CPUState, t0), "T0");
cpu_T[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
offsetof(CPUState, t1), "T1");
cpu_T[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
offsetof(CPUState, t2), "T2");
#else
cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG1, "T0");
cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG2, "T1");
cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG3, "T2");
#endif
p = cpu_reg_names;
for (i = 0; i < 31; i++) {
sprintf(p, "ir%d", i);
......@@ -107,65 +125,9 @@ static always_inline void func (int n) \
/* IR moves */
/* Special hacks for ir31 */
#define gen_op_load_T0_ir31 gen_op_reset_T0
#define gen_op_load_T1_ir31 gen_op_reset_T1
#define gen_op_load_T2_ir31 gen_op_reset_T2
#define gen_op_store_T0_ir31 gen_op_nop
#define gen_op_store_T1_ir31 gen_op_nop
#define gen_op_store_T2_ir31 gen_op_nop
#define gen_op_cmov_ir31 gen_op_nop
GEN32(gen_op_load_T0_ir, gen_op_load_T0_ir);
GEN32(gen_op_load_T1_ir, gen_op_load_T1_ir);
GEN32(gen_op_load_T2_ir, gen_op_load_T2_ir);
GEN32(gen_op_store_T0_ir, gen_op_store_T0_ir);
GEN32(gen_op_store_T1_ir, gen_op_store_T1_ir);
GEN32(gen_op_store_T2_ir, gen_op_store_T2_ir);
GEN32(gen_op_cmov_ir, gen_op_cmov_ir);
static always_inline void gen_load_ir (DisasContext *ctx, int irn, int Tn)
{
switch (Tn) {
case 0:
gen_op_load_T0_ir(irn);
break;
case 1:
gen_op_load_T1_ir(irn);
break;
case 2:
gen_op_load_T2_ir(irn);
break;
}
}
static always_inline void gen_store_ir (DisasContext *ctx, int irn, int Tn)
{
switch (Tn) {
case 0:
gen_op_store_T0_ir(irn);
break;
case 1:
gen_op_store_T1_ir(irn);
break;
case 2:
gen_op_store_T2_ir(irn);
break;
}
}
static inline void get_ir (TCGv t, int reg)
{
if (reg == 31)
tcg_gen_movi_i64(t, 0);
else
tcg_gen_mov_i64(t, cpu_ir[reg]);
}
static inline void set_ir (TCGv t, int reg)
{
if (reg != 31)
tcg_gen_mov_i64(cpu_ir[reg], t);
}
/* FIR moves */
/* Special hacks for fir31 */
#define gen_op_load_FT0_fir31 gen_op_reset_FT0
......@@ -280,120 +242,6 @@ GEN_ST(s);
GEN_LD(t);
GEN_ST(t);
#if defined(__i386__) || defined(__x86_64__)
static always_inline void gen_op_set_s16_T0 (int16_t imm)
{
gen_op_set_s32_T0((int32_t)imm);
}
static always_inline void gen_op_set_s16_T1 (int16_t imm)
{
gen_op_set_s32_T1((int32_t)imm);
}
static always_inline void gen_op_set_u16_T0 (uint16_t imm)
{
gen_op_set_s32_T0((uint32_t)imm);
}
static always_inline void gen_op_set_u16_T1 (uint16_t imm)
{
gen_op_set_s32_T1((uint32_t)imm);
}
#endif
static always_inline void gen_set_sT0 (DisasContext *ctx, int64_t imm)
{
int32_t imm32;
int16_t imm16;
imm32 = imm;
if (imm32 == imm) {
imm16 = imm;
if (imm16 == imm) {
if (imm == 0) {
gen_op_reset_T0();
} else {
gen_op_set_s16_T0(imm16);
}
} else {
gen_op_set_s32_T0(imm32);
}
} else {
#if 0 // Qemu does not know how to do this...
gen_op_set_64_T0(imm);
#else
gen_op_set_64_T0(imm >> 32, imm);
#endif
}
}
static always_inline void gen_set_sT1 (DisasContext *ctx, int64_t imm)
{
int32_t imm32;
int16_t imm16;
imm32 = imm;
if (imm32 == imm) {
imm16 = imm;
if (imm16 == imm) {
if (imm == 0) {
gen_op_reset_T1();
} else {
gen_op_set_s16_T1(imm16);
}
} else {
gen_op_set_s32_T1(imm32);
}
} else {
#if 0 // Qemu does not know how to do this...
gen_op_set_64_T1(imm);
#else
gen_op_set_64_T1(imm >> 32, imm);
#endif
}
}
static always_inline void gen_set_uT0 (DisasContext *ctx, uint64_t imm)
{
if (!(imm >> 32)) {
if ((!imm >> 16)) {
if (imm == 0)
gen_op_reset_T0();
else
gen_op_set_u16_T0(imm);
} else {
gen_op_set_u32_T0(imm);
}
} else {
#if 0 // Qemu does not know how to do this...
gen_op_set_64_T0(imm);
#else
gen_op_set_64_T0(imm >> 32, imm);
#endif
}
}
static always_inline void gen_set_uT1 (DisasContext *ctx, uint64_t imm)
{
if (!(imm >> 32)) {
if ((!imm >> 16)) {
if (imm == 0)
gen_op_reset_T1();
else
gen_op_set_u16_T1(imm);
} else {
gen_op_set_u32_T1(imm);
}
} else {
#if 0 // Qemu does not know how to do this...
gen_op_set_64_T1(imm);
#else
gen_op_set_64_T1(imm >> 32, imm);
#endif
}
}
static always_inline void _gen_op_bcond (DisasContext *ctx)
{
#if 0 // Qemu does not know how to do this...
......@@ -424,15 +272,19 @@ static always_inline void gen_load_mem (DisasContext *ctx,
/* UNOP */
gen_op_nop();
} else {
gen_load_ir(ctx, rb, 0);
if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
if (disp16 != 0) {
gen_set_sT1(ctx, disp16);
tcg_gen_movi_i64(cpu_T[1], disp16);
gen_op_addq();
}
if (clear)
gen_op_n7();
(*gen_load_op)(ctx);
gen_store_ir(ctx, ra, 1);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]);
}
}
......@@ -441,14 +293,20 @@ static always_inline void gen_store_mem (DisasContext *ctx,
int ra, int rb, int32_t disp16,
int clear)
{
gen_load_ir(ctx, rb, 0);
if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
if (disp16 != 0) {
gen_set_sT1(ctx, disp16);
tcg_gen_movi_i64(cpu_T[1], disp16);
gen_op_addq();
}
if (clear)
gen_op_n7();
gen_load_ir(ctx, ra, 1);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[1], 0);
(*gen_store_op)(ctx);
}
......@@ -456,9 +314,12 @@ static always_inline void gen_load_fmem (DisasContext *ctx,
void (*gen_load_fop)(DisasContext *ctx),
int ra, int rb, int32_t disp16)
{
gen_load_ir(ctx, rb, 0);
if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
if (disp16 != 0) {
gen_set_sT1(ctx, disp16);
tcg_gen_movi_i64(cpu_T[1], disp16);
gen_op_addq();
}
(*gen_load_fop)(ctx);
......@@ -469,9 +330,12 @@ static always_inline void gen_store_fmem (DisasContext *ctx,
void (*gen_store_fop)(DisasContext *ctx),
int ra, int rb, int32_t disp16)
{
gen_load_ir(ctx, rb, 0);
if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
if (disp16 != 0) {
gen_set_sT1(ctx, disp16);
tcg_gen_movi_i64(cpu_T[1], disp16);
gen_op_addq();
}
gen_load_fir(ctx, ra, 1);
......@@ -483,13 +347,16 @@ static always_inline void gen_bcond (DisasContext *ctx,
int ra, int32_t disp16)
{
if (disp16 != 0) {
gen_set_uT0(ctx, ctx->pc);
gen_set_sT1(ctx, disp16 << 2);
tcg_gen_movi_i64(cpu_T[0], ctx->pc);
tcg_gen_movi_i64(cpu_T[1], disp16 << 2);
gen_op_addq1();
} else {
gen_set_uT1(ctx, ctx->pc);
tcg_gen_movi_i64(cpu_T[1], ctx->pc);
}
gen_load_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
(*gen_test_op)();
_gen_op_bcond(ctx);
}
......@@ -499,11 +366,11 @@ static always_inline void gen_fbcond (DisasContext *ctx,
int ra, int32_t disp16)
{
if (disp16 != 0) {
gen_set_uT0(ctx, ctx->pc);
gen_set_sT1(ctx, disp16 << 2);
tcg_gen_movi_i64(cpu_T[0], ctx->pc);
tcg_gen_movi_i64(cpu_T[1], disp16 << 2);
gen_op_addq1();
} else {
gen_set_uT1(ctx, ctx->pc);
tcg_gen_movi_i64(cpu_T[1], ctx->pc);
}
gen_load_fir(ctx, ra, 0);
(*gen_test_op)();
......@@ -515,11 +382,14 @@ static always_inline void gen_arith2 (DisasContext *ctx,
int rb, int rc, int islit, int8_t lit)
{
if (islit)
gen_set_sT0(ctx, lit);
tcg_gen_movi_i64(cpu_T[0], lit);
else if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
gen_load_ir(ctx, rb, 0);
tcg_gen_movi_i64(cpu_T[0], 0);
(*gen_arith_op)();
gen_store_ir(ctx, rc, 0);
if (rc != 31)
tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]);
}
static always_inline void gen_arith3 (DisasContext *ctx,
......@@ -527,13 +397,19 @@ static always_inline void gen_arith3 (DisasContext *ctx,
int ra, int rb, int rc,
int islit, int8_t lit)
{
gen_load_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
if (islit)
gen_set_sT1(ctx, lit);
tcg_gen_movi_i64(cpu_T[1], lit);
else if (rb != 31)
tcg_gen_mov_i64(cpu_T[1], cpu_ir[rb]);
else
gen_load_ir(ctx, rb, 1);
tcg_gen_movi_i64(cpu_T[1], 0);
(*gen_arith_op)();
gen_store_ir(ctx, rc, 0);
if (rc != 31)
tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]);
}
static always_inline void gen_cmov (DisasContext *ctx,
......@@ -541,11 +417,16 @@ static always_inline void gen_cmov (DisasContext *ctx,
int ra, int rb, int rc,
int islit, int8_t lit)
{
gen_load_ir(ctx, ra, 1);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[1], 0);
if (islit)
gen_set_sT0(ctx, lit);
tcg_gen_movi_i64(cpu_T[0], lit);
else if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
gen_load_ir(ctx, rb, 0);
tcg_gen_movi_i64(cpu_T[0], 0);
(*gen_test_op)();
gen_op_cmov_ir(rc);
}
......@@ -585,14 +466,18 @@ static always_inline void gen_fti (DisasContext *ctx,
{
gen_load_fir(ctx, rc, 0);
(*gen_move_fop)();
gen_store_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
}
static always_inline void gen_itf (DisasContext *ctx,
void (*gen_move_fop)(void),
int ra, int rc)
{
gen_load_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
(*gen_move_fop)();
gen_store_fir(ctx, rc, 0);
}
......@@ -727,22 +612,20 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0x08:
/* LDA */
{
TCGv v = tcg_const_i64(disp16);
if (ra != 31) {
if (rb != 31)
tcg_gen_add_i64(v, cpu_ir[rb], v);
set_ir(v, ra);
tcg_temp_free(v);
tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
else
tcg_gen_movi_i64(cpu_ir[ra], disp16);
}
break;
case 0x09:
/* LDAH */
{
TCGv v = tcg_const_i64(disp16 << 16);
if (ra != 31) {
if (rb != 31)
tcg_gen_add_i64(v, cpu_ir[rb], v);
set_ir(v, ra);
tcg_temp_free(v);
tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
else
tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
}
break;
case 0x0A:
......@@ -897,8 +780,12 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
gen_op_nop();
} else {
/* MOV */
gen_load_ir(ctx, rb, 0);
gen_store_ir(ctx, rc, 0);
if (rc != 31) {
if (rb != 31)
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
}
} else {
gen_arith3(ctx, &gen_op_bis, ra, rb, rc, islit, lit);
......@@ -947,7 +834,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
case 0x6C:
/* IMPLVER */
gen_op_load_implver();
gen_store_ir(ctx, rc, 0);
if (rc != 31)
tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]);
break;
default:
goto invalid_opc;
......@@ -1413,12 +1301,14 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
case 0xC000:
/* RPCC */
gen_op_load_pcc();
gen_store_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
break;
case 0xE000:
/* RC */
gen_op_load_irf();
gen_store_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
gen_op_clear_irf();
break;
case 0xE800:
......@@ -1433,7 +1323,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
case 0xF000:
/* RS */
gen_op_load_irf();
gen_store_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
gen_op_set_irf();
break;
case 0xF800:
......@@ -1452,16 +1343,17 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
if (!ctx->pal_mode)
goto invalid_opc;
gen_op_mfpr(insn & 0xFF);
gen_store_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]);
break;
#endif
case 0x1A:
gen_load_ir(ctx, rb, 0);
if (ra != 31) {
gen_set_uT1(ctx, ctx->pc);
gen_store_ir(ctx, ra, 1);
}
gen_op_branch();
if (ra != 31)
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
if (rb != 31)
tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
else
tcg_gen_movi_i64(cpu_pc, 0);
/* Those four jumps only differ by the branch prediction hint */
switch (fn2) {
case 0x0:
......@@ -1486,8 +1378,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
#else
if (!ctx->pal_mode)
goto invalid_opc;
gen_load_ir(ctx, rb, 0);
gen_set_sT1(ctx, disp12);
if (rb != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
tcg_gen_movi_i64(cpu_T[1], disp12);
gen_op_addq();
switch ((insn >> 12) & 0xF) {
case 0x0:
......@@ -1569,7 +1464,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
gen_op_restore_mode();
break;
}
gen_store_ir(ctx, ra, 1);
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]);
break;
#endif
case 0x1C:
......@@ -1718,7 +1614,10 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
#else
if (!ctx->pal_mode)
goto invalid_opc;
gen_load_ir(ctx, ra, 0);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
gen_op_mtpr(insn & 0xFF);
ret = 2;
break;
......@@ -1734,8 +1633,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
/* "Old" alpha */
gen_op_hw_rei();
} else {
gen_load_ir(ctx, rb, 0);
gen_set_uT1(ctx, (((int64_t)insn << 51) >> 51));
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51));
gen_op_addq();
gen_op_hw_ret();
}
......@@ -1749,10 +1651,16 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
#else
if (!ctx->pal_mode)
goto invalid_opc;
gen_load_ir(ctx, rb, 0);
gen_set_sT1(ctx, disp12);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]);
else
tcg_gen_movi_i64(cpu_T[0], 0);
tcg_gen_movi_i64(cpu_T[1], disp12);
gen_op_addq();
gen_load_ir(ctx, ra, 1);
if (ra != 31)
tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
else
tcg_gen_movi_i64(cpu_T[1], 0);
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
......@@ -1904,12 +1812,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
break;
case 0x30:
/* BR */
if (ra != 31) {
TCGv t = tcg_const_i64(ctx->pc);
set_ir(t, ra);
tcg_temp_free(t);
}
tcg_gen_movi_i64(cpu_pc, ctx->pc + (disp21 << 2));
if (ra != 31)
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
ret = 1;
break;
case 0x31:
......@@ -1929,13 +1834,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
break;
case 0x34:
/* BSR */
gen_set_uT0(ctx, ctx->pc);
gen_store_ir(ctx, ra, 0);
if (disp21 != 0) {
gen_set_sT1(ctx, disp21 << 2);
gen_op_addq();
}
gen_op_branch();
if (ra != 31)
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
ret = 1;
break;
case 0x35:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册