提交 6a73ecf5 编写于 作者: R Richard Henderson

target-alpha: Introduce MMU_PHYS_IDX

Rather than using helpers for physical accesses, use a mmu index.
The primary cleanup is with store-conditional on physical addresses.
Signed-off-by: NRichard Henderson <rth@twiddle.net>
上级 05188cc7
......@@ -201,7 +201,7 @@ enum {
/* MMU modes definitions */
/* Alpha has 5 MMU modes: PALcode, kernel, executive, supervisor, and user.
/* Alpha has 5 MMU modes: PALcode, Kernel, Executive, Supervisor, and User.
The Unix PALcode only exposes the kernel and user modes; presumably
executive and supervisor are used by VMS.
......@@ -209,22 +209,18 @@ enum {
there are PALmode instructions that can access data via physical mode
or via an os-installed "alternate mode", which is one of the 4 above.
QEMU does not currently properly distinguish between code/data when
looking up addresses. To avoid having to address this issue, our
emulated PALcode will cheat and use the KSEG mapping for its code+data
rather than physical addresses.
That said, we're only emulating Unix PALcode, and not attempting VMS,
so we don't need to implement Executive and Supervisor. QEMU's own
PALcode cheats and usees the KSEG mapping for its code+data rather than
physical addresses. */
Moreover, we're only emulating Unix PALcode, and not attempting VMS.
All of which allows us to drop all but kernel and user modes.
Elide the unused MMU modes to save space. */
#define NB_MMU_MODES 2
#define NB_MMU_MODES 3
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 1
#define MMU_PHYS_IDX 2
typedef struct CPUAlphaState CPUAlphaState;
......
......@@ -126,6 +126,14 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot = 0;
int ret = MM_K_ACV;
/* Handle physical accesses. */
if (mmu_idx == MMU_PHYS_IDX) {
phys = addr;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
ret = -1;
goto exit;
}
/* Ensure that the virtual address is properly sign-extended from
the last implemented virtual address bit. */
if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
......
......@@ -92,15 +92,6 @@ DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64)
#if !defined (CONFIG_USER_ONLY)
DEF_HELPER_2(ldl_phys, i64, env, i64)
DEF_HELPER_2(ldq_phys, i64, env, i64)
DEF_HELPER_2(ldl_l_phys, i64, env, i64)
DEF_HELPER_2(ldq_l_phys, i64, env, i64)
DEF_HELPER_3(stl_phys, void, env, i64, i64)
DEF_HELPER_3(stq_phys, void, env, i64, i64)
DEF_HELPER_3(stl_c_phys, i64, env, i64, i64)
DEF_HELPER_3(stq_c_phys, i64, env, i64, i64)
DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env)
......
......@@ -25,79 +25,6 @@
/* Softmmu support */
#ifndef CONFIG_USER_ONLY
uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
return (int32_t)ldl_phys(cs->as, p);
}
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
return ldq_phys(cs->as, p);
}
uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p;
return env->lock_value = (int32_t)ldl_phys(cs->as, p);
}
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p;
return env->lock_value = ldq_phys(cs->as, p);
}
void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
stl_phys(cs->as, p, v);
}
void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
stq_phys(cs->as, p, v);
}
uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0;
if (p == env->lock_addr) {
int32_t old = ldl_phys(cs->as, p);
if (old == (int32_t)env->lock_value) {
stl_phys(cs->as, p, v);
ret = 1;
}
}
env->lock_addr = -1;
return ret;
}
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0;
if (p == env->lock_addr) {
uint64_t old = ldq_phys(cs->as, p);
if (old == env->lock_value) {
stq_phys(cs->as, p, v);
ret = 1;
}
}
env->lock_addr = -1;
return ret;
}
void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
......
......@@ -392,7 +392,8 @@ static inline void gen_store_mem(DisasContext *ctx,
}
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
int32_t disp16, int quad)
int32_t disp16, int mem_idx,
TCGMemOp op)
{
TCGv addr;
......@@ -414,7 +415,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
/* ??? This is handled via a complicated version of compare-and-swap
in the cpu_loop. Hopefully one day we'll have a real CAS opcode
in TCG so that this isn't necessary. */
return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
return gen_excp(ctx, (op & MO_SIZE) == MO_64 ? EXCP_STQ_C : EXCP_STL_C, ra);
#else
/* ??? In system mode we are never multi-threaded, so CAS can be
implemented via a non-atomic load-compare-store sequence. */
......@@ -427,11 +428,10 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
val = tcg_temp_new();
tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
tcg_gen_qemu_ld_i64(val, addr, mem_idx, op);
tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
quad ? MO_LEQ : MO_LEUL);
tcg_gen_qemu_st_i64(ctx->ir[ra], addr, mem_idx, op);
tcg_gen_movi_i64(ctx->ir[ra], 1);
tcg_gen_br(lab_done);
......@@ -2423,19 +2423,19 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
gen_helper_ldl_phys(va, cpu_env, addr);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
gen_helper_ldq_phys(va, cpu_env, addr);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
gen_helper_ldl_l_phys(va, cpu_env, addr);
gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
gen_helper_ldq_l_phys(va, cpu_env, addr);
gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
break;
case 0x4:
/* Longword virtual PTE fetch (hw_ldl/v) */
......@@ -2674,27 +2674,34 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
#ifndef CONFIG_USER_ONLY
REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
{
TCGv addr = tcg_temp_new();
va = load_gpr(ctx, ra);
vb = load_gpr(ctx, rb);
tcg_gen_addi_i64(addr, vb, disp12);
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
gen_helper_stl_phys(cpu_env, addr, va);
va = load_gpr(ctx, ra);
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
tcg_temp_free(tmp);
break;
case 0x1:
/* Quadword physical access */
gen_helper_stq_phys(cpu_env, addr, va);
va = load_gpr(ctx, ra);
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
tcg_temp_free(tmp);
break;
case 0x2:
/* Longword physical access with lock */
gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LESL);
break;
case 0x3:
/* Quadword physical access with lock */
gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LEQ);
break;
case 0x4:
/* Longword virtual access */
......@@ -2733,7 +2740,6 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
/* Invalid */
goto invalid_opc;
}
tcg_temp_free(addr);
break;
}
#else
......@@ -2797,11 +2803,13 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LESL);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LEQ);
break;
case 0x30:
/* BR */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册