提交 e141ab52 编写于 作者: B Blue Swirl

softmmu templates: optionally pass CPUState to memory access functions

Optionally, make memory access helpers take a parameter for CPUState
instead of relying on global env.

On most targets, perform simple moves to reorder registers. On i386,
switch from regparm(3) calling convention to standard stack-based
version.
Signed-off-by: NBlue Swirl <blauwirbel@gmail.com>
上级 6a18ae2d
...@@ -259,12 +259,21 @@ extern unsigned long reserved_va; ...@@ -259,12 +259,21 @@ extern unsigned long reserved_va;
#define stfl(p, v) stfl_raw(p, v) #define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v) #define stfq(p, v) stfq_raw(p, v)
#ifndef CONFIG_TCG_PASS_AREG0
#define ldub_code(p) ldub_raw(p) #define ldub_code(p) ldub_raw(p)
#define ldsb_code(p) ldsb_raw(p) #define ldsb_code(p) ldsb_raw(p)
#define lduw_code(p) lduw_raw(p) #define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p) #define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p) #define ldl_code(p) ldl_raw(p)
#define ldq_code(p) ldq_raw(p) #define ldq_code(p) ldq_raw(p)
#else
#define cpu_ldub_code(env1, p) ldub_raw(p)
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
#define cpu_lduw_code(env1, p) lduw_raw(p)
#define cpu_ldsw_code(env1, p) ldsw_raw(p)
#define cpu_ldl_code(env1, p) ldl_raw(p)
#define cpu_ldq_code(env1, p) ldq_raw(p)
#endif
#define ldub_kernel(p) ldub_raw(p) #define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p) #define ldsb_kernel(p) ldsb_raw(p)
......
...@@ -312,7 +312,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, ...@@ -312,7 +312,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
#define ACCESS_TYPE (NB_MMU_MODES + 1) #define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code #define MEMSUFFIX _code
#ifndef CONFIG_TCG_PASS_AREG0
#define env cpu_single_env #define env cpu_single_env
#endif
#define DATA_SIZE 1 #define DATA_SIZE 1
#include "softmmu_header.h" #include "softmmu_header.h"
......
...@@ -4595,7 +4595,11 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) ...@@ -4595,7 +4595,11 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
mmu_idx = cpu_mmu_index(env1); mmu_idx = cpu_mmu_index(env1);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) { (addr & TARGET_PAGE_MASK))) {
#ifdef CONFIG_TCG_PASS_AREG0
cpu_ldub_code(env1, addr);
#else
ldub_code(addr); ldub_code(addr);
#endif
} }
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
mr = iotlb_to_region(pd); mr = iotlb_to_region(pd);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#ifndef SOFTMMU_DEFS_H #ifndef SOFTMMU_DEFS_H
#define SOFTMMU_DEFS_H #define SOFTMMU_DEFS_H
#ifndef CONFIG_TCG_PASS_AREG0
uint8_t __ldb_mmu(target_ulong addr, int mmu_idx); uint8_t __ldb_mmu(target_ulong addr, int mmu_idx);
void __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx); void __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx);
uint16_t __ldw_mmu(target_ulong addr, int mmu_idx); uint16_t __ldw_mmu(target_ulong addr, int mmu_idx);
...@@ -26,5 +27,32 @@ uint32_t __ldl_cmmu(target_ulong addr, int mmu_idx); ...@@ -26,5 +27,32 @@ uint32_t __ldl_cmmu(target_ulong addr, int mmu_idx);
void __stl_cmmu(target_ulong addr, uint32_t val, int mmu_idx); void __stl_cmmu(target_ulong addr, uint32_t val, int mmu_idx);
uint64_t __ldq_cmmu(target_ulong addr, int mmu_idx); uint64_t __ldq_cmmu(target_ulong addr, int mmu_idx);
void __stq_cmmu(target_ulong addr, uint64_t val, int mmu_idx); void __stq_cmmu(target_ulong addr, uint64_t val, int mmu_idx);
#else
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx);
#endif
#endif #endif
...@@ -78,9 +78,23 @@ ...@@ -78,9 +78,23 @@
#define ADDR_READ addr_read #define ADDR_READ addr_read
#endif #endif
#ifndef CONFIG_TCG_PASS_AREG0
#define ENV_PARAM
#define ENV_VAR
#define CPU_PREFIX
#define HELPER_PREFIX __
#else
#define ENV_PARAM CPUArchState *env,
#define ENV_VAR env,
#define CPU_PREFIX cpu_
#define HELPER_PREFIX helper_
#endif
/* generic load/store macros */ /* generic load/store macros */
static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) static inline RES_TYPE
glue(glue(glue(CPU_PREFIX, ld), USUFFIX), MEMSUFFIX)(ENV_PARAM
target_ulong ptr)
{ {
int page_index; int page_index;
RES_TYPE res; RES_TYPE res;
...@@ -93,7 +107,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) ...@@ -93,7 +107,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
mmu_idx = CPU_MMU_INDEX; mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); res = glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_VAR
addr,
mmu_idx);
} else { } else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
...@@ -102,7 +118,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) ...@@ -102,7 +118,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
} }
#if DATA_SIZE <= 2 #if DATA_SIZE <= 2
static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) static inline int
glue(glue(glue(CPU_PREFIX, lds), SUFFIX), MEMSUFFIX)(ENV_PARAM
target_ulong ptr)
{ {
int res, page_index; int res, page_index;
target_ulong addr; target_ulong addr;
...@@ -114,7 +132,8 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) ...@@ -114,7 +132,8 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
mmu_idx = CPU_MMU_INDEX; mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); res = (DATA_STYPE)glue(glue(glue(HELPER_PREFIX, ld), SUFFIX),
MMUSUFFIX)(ENV_VAR addr, mmu_idx);
} else { } else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
...@@ -127,7 +146,9 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) ...@@ -127,7 +146,9 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
/* generic store macro */ /* generic store macro */
static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) static inline void
glue(glue(glue(CPU_PREFIX, st), SUFFIX), MEMSUFFIX)(ENV_PARAM target_ulong ptr,
RES_TYPE v)
{ {
int page_index; int page_index;
target_ulong addr; target_ulong addr;
...@@ -139,7 +160,8 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE ...@@ -139,7 +160,8 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
mmu_idx = CPU_MMU_INDEX; mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx); glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_VAR addr, v,
mmu_idx);
} else { } else {
physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
...@@ -151,46 +173,52 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE ...@@ -151,46 +173,52 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
#if ACCESS_TYPE != (NB_MMU_MODES + 1) #if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8 #if DATA_SIZE == 8
static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) static inline float64 glue(glue(CPU_PREFIX, ldfq), MEMSUFFIX)(ENV_PARAM
target_ulong ptr)
{ {
union { union {
float64 d; float64 d;
uint64_t i; uint64_t i;
} u; } u;
u.i = glue(ldq, MEMSUFFIX)(ptr); u.i = glue(glue(CPU_PREFIX, ldq), MEMSUFFIX)(ENV_VAR ptr);
return u.d; return u.d;
} }
static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v) static inline void glue(glue(CPU_PREFIX, stfq), MEMSUFFIX)(ENV_PARAM
target_ulong ptr,
float64 v)
{ {
union { union {
float64 d; float64 d;
uint64_t i; uint64_t i;
} u; } u;
u.d = v; u.d = v;
glue(stq, MEMSUFFIX)(ptr, u.i); glue(glue(CPU_PREFIX, stq), MEMSUFFIX)(ENV_VAR ptr, u.i);
} }
#endif /* DATA_SIZE == 8 */ #endif /* DATA_SIZE == 8 */
#if DATA_SIZE == 4 #if DATA_SIZE == 4
static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr) static inline float32 glue(glue(CPU_PREFIX, ldfl), MEMSUFFIX)(ENV_PARAM
target_ulong ptr)
{ {
union { union {
float32 f; float32 f;
uint32_t i; uint32_t i;
} u; } u;
u.i = glue(ldl, MEMSUFFIX)(ptr); u.i = glue(glue(CPU_PREFIX, ldl), MEMSUFFIX)(ENV_VAR ptr);
return u.f; return u.f;
} }
static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) static inline void glue(glue(CPU_PREFIX, stfl), MEMSUFFIX)(ENV_PARAM
target_ulong ptr,
float32 v)
{ {
union { union {
float32 f; float32 f;
uint32_t i; uint32_t i;
} u; } u;
u.f = v; u.f = v;
glue(stl, MEMSUFFIX)(ptr, u.i); glue(glue(CPU_PREFIX, stl), MEMSUFFIX)(ENV_VAR ptr, u.i);
} }
#endif /* DATA_SIZE == 4 */ #endif /* DATA_SIZE == 4 */
...@@ -205,3 +233,7 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) ...@@ -205,3 +233,7 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
#undef CPU_MMU_INDEX #undef CPU_MMU_INDEX
#undef MMUSUFFIX #undef MMUSUFFIX
#undef ADDR_READ #undef ADDR_READ
#undef ENV_PARAM
#undef ENV_VAR
#undef CPU_PREFIX
#undef HELPER_PREFIX
...@@ -54,10 +54,24 @@ ...@@ -54,10 +54,24 @@
#define ADDR_READ addr_read #define ADDR_READ addr_read
#endif #endif
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, #ifndef CONFIG_TCG_PASS_AREG0
#define ENV_PARAM
#define ENV_VAR
#define CPU_PREFIX
#define HELPER_PREFIX __
#else
#define ENV_PARAM CPUArchState *env,
#define ENV_VAR env,
#define CPU_PREFIX cpu_
#define HELPER_PREFIX helper_
#endif
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
target_ulong addr,
int mmu_idx, int mmu_idx,
void *retaddr); void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM
target_phys_addr_t physaddr,
target_ulong addr, target_ulong addr,
void *retaddr) void *retaddr)
{ {
...@@ -89,7 +103,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, ...@@ -89,7 +103,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
} }
/* handle all cases except unaligned access which span two pages */ /* handle all cases except unaligned access which span two pages */
DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) DATA_TYPE
glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM
target_ulong addr,
int mmu_idx)
{ {
DATA_TYPE res; DATA_TYPE res;
int index; int index;
...@@ -110,22 +127,22 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) ...@@ -110,22 +127,22 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx)
goto do_unaligned_access; goto do_unaligned_access;
retaddr = GETPC(); retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */ /* slow unaligned access (it spans two pages or IO) */
do_unaligned_access: do_unaligned_access:
retaddr = GETPC(); retaddr = GETPC();
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif #endif
res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr,
mmu_idx, retaddr); mmu_idx, retaddr);
} else { } else {
/* unaligned/aligned access in the same page */ /* unaligned/aligned access in the same page */
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC(); retaddr = GETPC();
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
} }
#endif #endif
addend = env->tlb_table[mmu_idx][index].addend; addend = env->tlb_table[mmu_idx][index].addend;
...@@ -136,7 +153,7 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) ...@@ -136,7 +153,7 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx)
retaddr = GETPC(); retaddr = GETPC();
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif #endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
goto redo; goto redo;
...@@ -145,9 +162,11 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) ...@@ -145,9 +162,11 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx)
} }
/* handle all unaligned cases */ /* handle all unaligned cases */
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, static DATA_TYPE
int mmu_idx, glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
void *retaddr) target_ulong addr,
int mmu_idx,
void *retaddr)
{ {
DATA_TYPE res, res1, res2; DATA_TYPE res, res1, res2;
int index, shift; int index, shift;
...@@ -164,15 +183,15 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, ...@@ -164,15 +183,15 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
/* slow unaligned access (it spans two pages) */ /* slow unaligned access (it spans two pages) */
addr1 = addr & ~(DATA_SIZE - 1); addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE; addr2 = addr1 + DATA_SIZE;
res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1,
mmu_idx, retaddr); mmu_idx, retaddr);
res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2,
mmu_idx, retaddr); mmu_idx, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8; shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN #ifdef TARGET_WORDS_BIGENDIAN
...@@ -196,12 +215,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, ...@@ -196,12 +215,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
#ifndef SOFTMMU_CODE_ACCESS #ifndef SOFTMMU_CODE_ACCESS
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
target_ulong addr,
DATA_TYPE val, DATA_TYPE val,
int mmu_idx, int mmu_idx,
void *retaddr); void *retaddr);
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, static inline void glue(io_write, SUFFIX)(ENV_PARAM
target_phys_addr_t physaddr,
DATA_TYPE val, DATA_TYPE val,
target_ulong addr, target_ulong addr,
void *retaddr) void *retaddr)
...@@ -231,8 +252,10 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, ...@@ -231,8 +252,10 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
#endif /* SHIFT > 2 */ #endif /* SHIFT > 2 */
} }
void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, void glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM
int mmu_idx) target_ulong addr,
DATA_TYPE val,
int mmu_idx)
{ {
target_phys_addr_t ioaddr; target_phys_addr_t ioaddr;
unsigned long addend; unsigned long addend;
...@@ -250,21 +273,21 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, ...@@ -250,21 +273,21 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val,
goto do_unaligned_access; goto do_unaligned_access;
retaddr = GETPC(); retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
retaddr = GETPC(); retaddr = GETPC();
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
do_unaligned_access(addr, 1, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
#endif #endif
glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val,
mmu_idx, retaddr); mmu_idx, retaddr);
} else { } else {
/* aligned/unaligned access in the same page */ /* aligned/unaligned access in the same page */
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) { if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC(); retaddr = GETPC();
do_unaligned_access(addr, 1, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
} }
#endif #endif
addend = env->tlb_table[mmu_idx][index].addend; addend = env->tlb_table[mmu_idx][index].addend;
...@@ -275,7 +298,7 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, ...@@ -275,7 +298,7 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val,
retaddr = GETPC(); retaddr = GETPC();
#ifdef ALIGNED_ONLY #ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
do_unaligned_access(addr, 1, mmu_idx, retaddr); do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
#endif #endif
tlb_fill(env, addr, 1, mmu_idx, retaddr); tlb_fill(env, addr, 1, mmu_idx, retaddr);
goto redo; goto redo;
...@@ -283,7 +306,8 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, ...@@ -283,7 +306,8 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val,
} }
/* handles all unaligned cases */ /* handles all unaligned cases */
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
target_ulong addr,
DATA_TYPE val, DATA_TYPE val,
int mmu_idx, int mmu_idx,
void *retaddr) void *retaddr)
...@@ -302,7 +326,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, ...@@ -302,7 +326,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
if ((addr & (DATA_SIZE - 1)) != 0) if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access; goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index]; ioaddr = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access: do_unaligned_access:
/* XXX: not efficient, but simple */ /* XXX: not efficient, but simple */
...@@ -310,10 +334,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, ...@@ -310,10 +334,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
* previous page from the TLB cache. */ * previous page from the TLB cache. */
for(i = DATA_SIZE - 1; i >= 0; i--) { for(i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN #ifdef TARGET_WORDS_BIGENDIAN
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
mmu_idx, retaddr); mmu_idx, retaddr);
#else #else
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
val >> (i * 8),
mmu_idx, retaddr); mmu_idx, retaddr);
#endif #endif
} }
...@@ -338,3 +364,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, ...@@ -338,3 +364,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
#undef USUFFIX #undef USUFFIX
#undef DATA_SIZE #undef DATA_SIZE
#undef ADDR_READ #undef ADDR_READ
#undef ENV_PARAM
#undef ENV_VAR
#undef CPU_PREFIX
#undef HELPER_PREFIX
...@@ -929,6 +929,27 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) ...@@ -929,6 +929,27 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -936,6 +957,8 @@ static void *qemu_ld_helpers[4] = { ...@@ -936,6 +957,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
...@@ -943,6 +966,7 @@ static void *qemu_st_helpers[4] = { ...@@ -943,6 +966,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu, __stq_mmu,
}; };
#endif #endif
#endif
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
...@@ -1075,6 +1099,19 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) ...@@ -1075,6 +1099,19 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
# endif # endif
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal and incorrect for 64 bit */
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[2], 0,
tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[1], 0,
tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
SHIFT_IMM_LSL(0));
#endif
tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]); tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
switch (opc) { switch (opc) {
...@@ -1341,6 +1378,22 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) ...@@ -1341,6 +1378,22 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
} }
# endif # endif
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal and incorrect for 64 bit */
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[3], 0,
tcg_target_call_iarg_regs[2], SHIFT_IMM_LSL(0));
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[2], 0,
tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[1], 0,
tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
SHIFT_IMM_LSL(0));
#endif
tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]); tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
if (opc == 3) if (opc == 3)
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10); tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
......
...@@ -882,6 +882,27 @@ static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret, ...@@ -882,6 +882,27 @@ static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -889,12 +910,15 @@ static void *qemu_ld_helpers[4] = { ...@@ -889,12 +910,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
__stl_mmu, __stl_mmu,
__stq_mmu, __stq_mmu,
}; };
#endif
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
...@@ -1061,6 +1085,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) ...@@ -1061,6 +1085,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
} }
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call(s, qemu_ld_helpers[opc & 3]); tcg_out_call(s, qemu_ld_helpers[opc & 3]);
switch (opc) { switch (opc) {
...@@ -1212,6 +1245,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) ...@@ -1212,6 +1245,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
tcg_abort(); tcg_abort();
} }
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call(s, qemu_st_helpers[opc]); tcg_out_call(s, qemu_st_helpers[opc]);
/* label2: */ /* label2: */
......
...@@ -178,6 +178,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) ...@@ -178,6 +178,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, 0xffff); tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI); tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI); tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDX);
#endif
} else { } else {
tcg_regset_set32(ct->u.regs, 0, 0xff); tcg_regset_set32(ct->u.regs, 0, 0xff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
...@@ -957,6 +960,27 @@ static void tcg_out_jmp(TCGContext *s, tcg_target_long dest) ...@@ -957,6 +960,27 @@ static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void *qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void *qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -964,12 +988,15 @@ static void *qemu_ld_helpers[4] = { ...@@ -964,12 +988,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
__stl_mmu, __stl_mmu,
__stq_mmu, __stq_mmu,
}; };
#endif
/* Perform the TLB load and compare. /* Perform the TLB load and compare.
...@@ -1188,11 +1215,26 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, ...@@ -1188,11 +1215,26 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
} }
tcg_out_push(s, args[addrlo_idx]); tcg_out_push(s, args[addrlo_idx]);
stack_adjust += 4; stack_adjust += 4;
#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
#endif
#else #else
/* The first argument is already loaded with addrlo. */ /* The first argument is already loaded with addrlo. */
arg_idx = 1; arg_idx = 1;
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx], tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx],
mem_index); mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
#endif #endif
tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]); tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
...@@ -1386,11 +1428,26 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, ...@@ -1386,11 +1428,26 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
} }
tcg_out_push(s, args[addrlo_idx]); tcg_out_push(s, args[addrlo_idx]);
stack_adjust += 4; stack_adjust += 4;
#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
#endif
#else #else
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
TCG_REG_RSI, data_reg); TCG_REG_RSI, data_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
stack_adjust = 0; stack_adjust = 0;
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
#endif #endif
tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]); tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
......
...@@ -1452,12 +1452,25 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, ...@@ -1452,12 +1452,25 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
TCG_REG_P7, TCG_REG_R3, TCG_REG_R57)); TCG_REG_P7, TCG_REG_R3, TCG_REG_R57));
} }
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
__ldl_mmu, __ldl_mmu,
__ldq_mmu, __ldq_mmu,
}; };
#endif
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
{ {
...@@ -1517,6 +1530,15 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) ...@@ -1517,6 +1530,15 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
} }
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
if (!bswap || s_bits == 0) { if (!bswap || s_bits == 0) {
tcg_out_bundle(s, miB, tcg_out_bundle(s, miB,
tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
...@@ -1547,12 +1569,25 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) ...@@ -1547,12 +1569,25 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
} }
} }
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
__stl_mmu, __stl_mmu,
__stq_mmu, __stq_mmu,
}; };
#endif
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
{ {
...@@ -1622,6 +1657,17 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) ...@@ -1622,6 +1657,17 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
data_reg = TCG_REG_R2; data_reg = TCG_REG_R2;
} }
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_bundle(s, miB, tcg_out_bundle(s, miB,
tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc], tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc],
data_reg, TCG_REG_R3), data_reg, TCG_REG_R3),
......
...@@ -750,6 +750,27 @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, int ret, ...@@ -750,6 +750,27 @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, int ret,
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -757,6 +778,8 @@ static void *qemu_ld_helpers[4] = { ...@@ -757,6 +778,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
...@@ -764,6 +787,7 @@ static void *qemu_st_helpers[4] = { ...@@ -764,6 +787,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu, __stq_mmu,
}; };
#endif #endif
#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
int opc) int opc)
...@@ -858,6 +882,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, ...@@ -858,6 +882,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
# endif # endif
tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index); tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
tcg_out_nop(s); tcg_out_nop(s);
...@@ -1069,6 +1102,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, ...@@ -1069,6 +1102,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
} }
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_st_helpers[s_bits]); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_st_helpers[s_bits]);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
tcg_out_nop(s); tcg_out_nop(s);
......
...@@ -508,6 +508,27 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg) ...@@ -508,6 +508,27 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -515,6 +536,8 @@ static void *qemu_ld_helpers[4] = { ...@@ -515,6 +536,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
...@@ -522,6 +545,7 @@ static void *qemu_st_helpers[4] = { ...@@ -522,6 +545,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu, __stq_mmu,
}; };
#endif #endif
#endif
static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
{ {
...@@ -598,6 +622,16 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) ...@@ -598,6 +622,16 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index); tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index);
#endif #endif
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
switch (opc) { switch (opc) {
case 0|4: case 0|4:
...@@ -829,6 +863,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) ...@@ -829,6 +863,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
ir++; ir++;
tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index); tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1); tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
label2_ptr = s->code_ptr; label2_ptr = s->code_ptr;
tcg_out32 (s, B); tcg_out32 (s, B);
......
...@@ -552,6 +552,27 @@ static void tcg_out_ldsta (TCGContext *s, int ret, int addr, ...@@ -552,6 +552,27 @@ static void tcg_out_ldsta (TCGContext *s, int ret, int addr,
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -559,12 +580,15 @@ static void *qemu_ld_helpers[4] = { ...@@ -559,12 +580,15 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
__stl_mmu, __stl_mmu,
__stq_mmu, __stq_mmu,
}; };
#endif
static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2, static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
int addr_reg, int s_bits, int offset) int addr_reg, int s_bits, int offset)
...@@ -648,6 +672,15 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) ...@@ -648,6 +672,15 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out_mov (s, TCG_TYPE_I64, 3, addr_reg); tcg_out_mov (s, TCG_TYPE_I64, 3, addr_reg);
tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index); tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
switch (opc) { switch (opc) {
...@@ -796,6 +829,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) ...@@ -796,6 +829,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc))); tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc)));
tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index); tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1); tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
label2_ptr = s->code_ptr; label2_ptr = s->code_ptr;
......
...@@ -301,6 +301,27 @@ static const uint8_t tcg_cond_to_ltr_cond[10] = { ...@@ -301,6 +301,27 @@ static const uint8_t tcg_cond_to_ltr_cond[10] = {
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static void *qemu_ld_helpers[4] = { static void *qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -308,6 +329,8 @@ static void *qemu_ld_helpers[4] = { ...@@ -308,6 +329,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static void *qemu_st_helpers[4] = { static void *qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
...@@ -315,6 +338,7 @@ static void *qemu_st_helpers[4] = { ...@@ -315,6 +338,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu, __stq_mmu,
}; };
#endif #endif
#endif
static uint8_t *tb_ret_addr; static uint8_t *tb_ret_addr;
...@@ -1483,9 +1507,29 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg, ...@@ -1483,9 +1507,29 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
tcg_abort(); tcg_abort();
} }
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]); tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
} else { } else {
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index); tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]); tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
/* sign extension */ /* sign extension */
......
...@@ -59,6 +59,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { ...@@ -59,6 +59,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
}; };
#endif #endif
#ifdef CONFIG_TCG_PASS_AREG0
#define ARG_OFFSET 1
#else
#define ARG_OFFSET 0
#endif
static const int tcg_target_reg_alloc_order[] = { static const int tcg_target_reg_alloc_order[] = {
TCG_REG_L0, TCG_REG_L0,
TCG_REG_L1, TCG_REG_L1,
...@@ -86,9 +92,9 @@ static const int tcg_target_call_iarg_regs[6] = { ...@@ -86,9 +92,9 @@ static const int tcg_target_call_iarg_regs[6] = {
static const int tcg_target_call_oarg_regs[] = { static const int tcg_target_call_oarg_regs[] = {
TCG_REG_O0, TCG_REG_O0,
#if TCG_TARGET_REG_BITS == 32 TCG_REG_O1,
TCG_REG_O1 TCG_REG_O2,
#endif TCG_REG_O3,
}; };
static inline int check_fit_tl(tcg_target_long val, unsigned int bits) static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
...@@ -155,6 +161,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) ...@@ -155,6 +161,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O3);
#endif
break; break;
case 'I': case 'I':
ct->ct |= TCG_CT_CONST_S11; ct->ct |= TCG_CT_CONST_S11;
...@@ -706,6 +715,27 @@ static void tcg_target_qemu_prologue(TCGContext *s) ...@@ -706,6 +715,27 @@ static void tcg_target_qemu_prologue(TCGContext *s)
#include "../../softmmu_defs.h" #include "../../softmmu_defs.h"
#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
};
#else
/* legacy helper signature: __ld_mmu(target_ulong addr, int
mmu_idx) */
static const void * const qemu_ld_helpers[4] = { static const void * const qemu_ld_helpers[4] = {
__ldb_mmu, __ldb_mmu,
__ldw_mmu, __ldw_mmu,
...@@ -713,6 +743,8 @@ static const void * const qemu_ld_helpers[4] = { ...@@ -713,6 +743,8 @@ static const void * const qemu_ld_helpers[4] = {
__ldq_mmu, __ldq_mmu,
}; };
/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
int mmu_idx) */
static const void * const qemu_st_helpers[4] = { static const void * const qemu_st_helpers[4] = {
__stb_mmu, __stb_mmu,
__stw_mmu, __stw_mmu,
...@@ -720,6 +752,7 @@ static const void * const qemu_st_helpers[4] = { ...@@ -720,6 +752,7 @@ static const void * const qemu_st_helpers[4] = {
__stq_mmu, __stq_mmu,
}; };
#endif #endif
#endif
#if TARGET_LONG_BITS == 32 #if TARGET_LONG_BITS == 32
#define TARGET_LD_OP LDUW #define TARGET_LD_OP LDUW
...@@ -801,6 +834,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, ...@@ -801,6 +834,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
/* mov */ /* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index); tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
#endif
/* XXX: move that code at the end of the TB */ /* XXX: move that code at the end of the TB */
/* qemu_ld_helper[s_bits](arg0, arg1) */ /* qemu_ld_helper[s_bits](arg0, arg1) */
......
...@@ -798,6 +798,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, ...@@ -798,6 +798,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_qemu_st8: case INDEX_op_qemu_st8:
case INDEX_op_qemu_st16: case INDEX_op_qemu_st16:
case INDEX_op_qemu_st32: case INDEX_op_qemu_st32:
#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_r(s, TCG_AREG0);
#endif
tcg_out_r(s, *args++); tcg_out_r(s, *args++);
tcg_out_r(s, *args++); tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
...@@ -808,6 +811,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, ...@@ -808,6 +811,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
#endif #endif
break; break;
case INDEX_op_qemu_st64: case INDEX_op_qemu_st64:
#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_r(s, TCG_AREG0);
#endif
tcg_out_r(s, *args++); tcg_out_r(s, *args++);
#if TCG_TARGET_REG_BITS == 32 #if TCG_TARGET_REG_BITS == 32
tcg_out_r(s, *args++); tcg_out_r(s, *args++);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册