提交 8f8e3aa4 编写于 作者: P pbrook

ARM TCG conversion 13/16.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4150 c046a42c-6fe2-441c-8c8c-71466251a162
上级 8984bd2e
......@@ -23,7 +23,6 @@
register struct CPUARMState *env asm(AREG0);
register uint32_t T0 asm(AREG1);
register uint32_t T1 asm(AREG2);
register uint32_t T2 asm(AREG3);
#define M0 env->iwmmxt.val
......@@ -59,15 +58,8 @@ static inline int cpu_halted(CPUState *env) {
#include "softmmu_exec.h"
#endif
/* In op_helper.c */
void helper_mark_exclusive(CPUARMState *, uint32_t addr);
int helper_test_exclusive(CPUARMState *, uint32_t addr);
void helper_clrex(CPUARMState *env);
void cpu_loop_exit(void);
void raise_exception(int);
void helper_neon_tbl(int rn, int maxindex);
uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2);
......@@ -432,7 +432,7 @@ static void flush_mmon(uint32_t addr)
}
/* Mark an address for exclusive access. */
void helper_mark_exclusive(CPUState *env, uint32_t addr)
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
{
if (!env->mmon_entry)
allocate_mmon_state(env);
......@@ -443,7 +443,7 @@ void helper_mark_exclusive(CPUState *env, uint32_t addr)
/* Test if an exclusive address is still exclusive. Returns zero
if the address is still exclusive. */
int helper_test_exclusive(CPUState *env, uint32_t addr)
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
{
int res;
......@@ -457,7 +457,7 @@ int helper_test_exclusive(CPUState *env, uint32_t addr)
return res;
}
void helper_clrex(CPUState *env)
void HELPER(clrex)(CPUState *env)
{
if (!(env->mmon_entry && env->mmon_entry->addr))
return;
......@@ -1176,17 +1176,17 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
/* Not really implemented. Need to figure out a sane way of doing this.
Maybe add generic watchpoint support and use that. */
void helper_mark_exclusive(CPUState *env, uint32_t addr)
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
{
env->mmon_addr = addr;
}
int helper_test_exclusive(CPUState *env, uint32_t addr)
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
{
return (env->mmon_addr != addr);
}
void helper_clrex(CPUState *env)
void HELPER(clrex)(CPUState *env)
{
env->mmon_addr = -1;
}
......@@ -2496,6 +2496,8 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
return float32_sub(three, float32_mul(a, b, s), s);
}
/* NEON helpers. */
/* TODO: The architecture specifies the value that the estimate functions
should return. We return the exact reciprocal/root instead. */
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
......
......@@ -51,6 +51,13 @@ static inline void gen_helper_##name(TCGv ret, \
{ \
tcg_gen_helper_1_3(helper_##name, ret, arg1, arg2, arg3); \
}
#define DEF_HELPER_1_4(name, ret, args) \
DEF_HELPER(name, ret, args) \
static inline void gen_helper_##name(TCGv ret, \
TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4) \
{ \
tcg_gen_helper_1_4(helper_##name, ret, arg1, arg2, arg3, arg4); \
}
#else /* !GEN_HELPER */
#define DEF_HELPER_0_0 DEF_HELPER
#define DEF_HELPER_0_1 DEF_HELPER
......@@ -60,6 +67,7 @@ static inline void gen_helper_##name(TCGv ret, \
#define DEF_HELPER_1_1 DEF_HELPER
#define DEF_HELPER_1_2 DEF_HELPER
#define DEF_HELPER_1_3 DEF_HELPER
#define DEF_HELPER_1_4 DEF_HELPER
#define HELPER(x) glue(helper_,x)
#endif
......@@ -130,6 +138,10 @@ DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t))
DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t))
DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t))
DEF_HELPER_0_2(mark_exclusive, void, (CPUState *, uint32_t))
DEF_HELPER_1_2(test_exclusive, uint32_t, (CPUState *, uint32_t))
DEF_HELPER_0_1(clrex, void, (CPUState *))
DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t))
DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t))
......@@ -195,6 +207,7 @@ DEF_HELPER_1_2(recpe_f32, float32, (float32, CPUState *))
DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *))
DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *))
DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *))
DEF_HELPER_1_4(neon_tbl, uint32_t, (uint32_t, uint32_t, uint32_t, uint32_t))
DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t))
DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t))
......
......@@ -40,27 +40,26 @@ void cpu_unlock(void)
spin_unlock(&global_cpu_lock);
}
void helper_neon_tbl(int rn, int maxindex)
uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
uint32_t rn, uint32_t maxindex)
{
uint32_t val;
uint32_t mask;
uint32_t tmp;
int index;
int shift;
uint64_t *table;
table = (uint64_t *)&env->vfp.regs[rn];
val = 0;
mask = 0;
for (shift = 0; shift < 32; shift += 8) {
index = (T1 >> shift) & 0xff;
if (index <= maxindex) {
index = (ireg >> shift) & 0xff;
if (index < maxindex) {
tmp = (table[index >> 3] >> (index & 7)) & 0xff;
val |= tmp << shift;
} else {
val |= T0 & (0xff << shift);
val |= def & (0xff << shift);
}
}
T0 = val;
return val;
}
#if !defined(CONFIG_USER_ONLY)
......
/* ARM memory operations. */
/* Load-locked, store exclusive. */
#define EXCLUSIVE_OP(suffix, ldsuffix) \
void OPPROTO glue(op_ld##suffix##ex,MEMSUFFIX)(void) \
{ \
cpu_lock(); \
helper_mark_exclusive(env, T1); \
T0 = glue(ld##ldsuffix,MEMSUFFIX)(T1); \
cpu_unlock(); \
FORCE_RET(); \
} \
\
void OPPROTO glue(op_st##suffix##ex,MEMSUFFIX)(void) \
{ \
int failed; \
cpu_lock(); \
failed = helper_test_exclusive(env, T1); \
/* ??? Is it safe to hold the cpu lock over a store? */ \
if (!failed) { \
glue(st##suffix,MEMSUFFIX)(T1, T0); \
} \
T0 = failed; \
cpu_unlock(); \
FORCE_RET(); \
}
EXCLUSIVE_OP(b, ub)
EXCLUSIVE_OP(w, uw)
EXCLUSIVE_OP(l, l)
#undef EXCLUSIVE_OP
/* Load exclusive T0:T1 from address T1. */
void OPPROTO glue(op_ldqex,MEMSUFFIX)(void)
{
cpu_lock();
helper_mark_exclusive(env, T1);
T0 = glue(ldl,MEMSUFFIX)(T1);
T1 = glue(ldl,MEMSUFFIX)((T1 + 4));
cpu_unlock();
FORCE_RET();
}
/* Store exclusive T0:T2 to address T1. */
void OPPROTO glue(op_stqex,MEMSUFFIX)(void)
{
int failed;
cpu_lock();
failed = helper_test_exclusive(env, T1);
/* ??? Is it safe to hold the cpu lock over a store? */
if (!failed) {
glue(stl,MEMSUFFIX)(T1, T0);
glue(stl,MEMSUFFIX)((T1 + 4), T2);
}
T0 = failed;
cpu_unlock();
FORCE_RET();
}
/* iwMMXt load/store. Address is in T1 */
#define MMX_MEM_OP(name, ldname) \
void OPPROTO glue(op_iwmmxt_ld##name,MEMSUFFIX)(void) \
......
......@@ -47,11 +47,6 @@ NEON_OP(getreg_T1)
T1 = *(uint32_t *)((char *) env + PARAM1);
}
NEON_OP(getreg_T2)
{
T2 = *(uint32_t *)((char *) env + PARAM1);
}
NEON_OP(setreg_T0)
{
*(uint32_t *)((char *) env + PARAM1) = T0;
......@@ -62,11 +57,6 @@ NEON_OP(setreg_T1)
*(uint32_t *)((char *) env + PARAM1) = T1;
}
NEON_OP(setreg_T2)
{
*(uint32_t *)((char *) env + PARAM1) = T2;
}
#define NEON_TYPE1(name, type) \
typedef struct \
{ \
......@@ -293,28 +283,6 @@ NEON_OP(hsub_u32)
FORCE_RET();
}
/* ??? bsl, bif and bit are all the same op, just with the oparands in a
differnet order. It's currently easier to have 3 differnt ops than
rearange the operands. */
/* Bitwise Select. */
NEON_OP(bsl)
{
T0 = (T0 & T2) | (T1 & ~T2);
}
/* Bitwise Insert If True. */
NEON_OP(bit)
{
T0 = (T0 & T1) | (T2 & ~T1);
}
/* Bitwise Insert If False. */
NEON_OP(bif)
{
T0 = (T2 & T1) | (T0 & ~T1);
}
#define NEON_USAT(dest, src1, src2, type) do { \
uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
if (tmp != (type)tmp) { \
......@@ -423,7 +391,7 @@ NEON_VOP(shl_u32, neon_u32, 1)
NEON_OP(shl_u64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
uint64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val >>= -shift;
......@@ -437,7 +405,7 @@ NEON_OP(shl_u64)
NEON_OP(shl_s64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
int64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val >>= -shift;
......@@ -468,7 +436,7 @@ NEON_VOP(rshl_u32, neon_u32, 1)
NEON_OP(rshl_u64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
uint64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
......@@ -483,7 +451,7 @@ NEON_OP(rshl_u64)
NEON_OP(rshl_s64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
int64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
......@@ -514,7 +482,7 @@ NEON_VOP(qshl_s32, neon_s32, 1)
NEON_OP(qshl_s64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
int64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val >>= -shift;
......@@ -550,7 +518,7 @@ NEON_VOP(qshl_u32, neon_u32, 1)
NEON_OP(qshl_u64)
{
int8_t shift = T2;
int8_t shift = env->vfp.scratch[0];
uint64_t val = T0 | ((uint64_t)T1 << 32);
if (shift < 0) {
val >>= -shift;
......@@ -1713,12 +1681,6 @@ NEON_OP(zip_u16)
FORCE_RET();
}
/* Table lookup. This accessed the register file directly. */
NEON_OP(tbl)
{
helper_neon_tbl(PARAM1, PARAM2);
}
NEON_OP(dup_u8)
{
T0 = (T0 >> PARAM1) & 0xff;
......@@ -1726,20 +1688,3 @@ NEON_OP(dup_u8)
T0 |= T0 << 16;
FORCE_RET();
}
/* Helpers for element load/store. */
NEON_OP(insert_elt)
{
int shift = PARAM1;
uint32_t mask = PARAM2;
T2 = (T2 & mask) | (T0 << shift);
FORCE_RET();
}
NEON_OP(extract_elt)
{
int shift = PARAM1;
uint32_t mask = PARAM2;
T0 = (T2 & mask) >> shift;
FORCE_RET();
}
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册