提交 052367ba 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140527' into staging

target-arm:
 * Preliminary restructuring for EL2/EL3 support
 * improve CPACR handling
 * fix pxa2xx_lcd palette formats
 * update highbank/midway maintainer

# gpg: Signature made Tue 27 May 2014 17:26:27 BST using RSA key ID 14360CDE
# gpg: Can't check signature: public key not found

* remotes/pmaydell/tags/pull-target-arm-20140527: (26 commits)
  target-arm: A64: Register VBAR_EL3
  target-arm: A64: Register VBAR_EL2
  target-arm: Make vbar_write writeback to any CPREG
  target-arm: A64: Generalize update_spsel for the various ELs
  target-arm: A64: Generalize ERET to various ELs
  target-arm: A64: Trap ERET from EL0 at translation time
  target-arm: A64: Forbid ERET to higher or unimplemented ELs
  target-arm: Register EL3 versions of ELR and SPSR
  target-arm: Register EL2 versions of ELR and SPSR
  target-arm: Add a feature flag for EL3
  target-arm: Add a feature flag for EL2
  target-arm: A64: Introduce aarch64_banked_spsr_index()
  target-arm: Add SPSR entries for EL2/HYP and EL3/MON
  target-arm: A64: Add ELR entries for EL2 and 3
  target-arm: A64: Add SP entries for EL2 and 3
  target-arm: c12_vbar -> vbar_el[]
  target-arm: Make esr_el1 an array
  target-arm: Make elr_el1 an array
  target-arm: Use a 1:1 mapping between EL and MMU index
  target-arm: A32: Use get_mem_index for load/stores
  ...
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -243,8 +243,8 @@ S: Maintained ...@@ -243,8 +243,8 @@ S: Maintained
F: hw/*/exynos* F: hw/*/exynos*
Calxeda Highbank Calxeda Highbank
M: Mark Langsdorf <mark.langsdorf@calxeda.com> M: Rob Herring <robh@kernel.org>
S: Supported S: Maintained
F: hw/arm/highbank.c F: hw/arm/highbank.c
F: hw/net/xgmac.c F: hw/net/xgmac.c
......
...@@ -620,24 +620,24 @@ static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp) ...@@ -620,24 +620,24 @@ static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp)
src += 2; src += 2;
break; break;
case 1: /* 16 bpp plus transparency */ case 1: /* 16 bpp plus transparency */
alpha = *(uint16_t *) src & (1 << 24); alpha = *(uint32_t *) src & (1 << 24);
if (s->control[0] & LCCR0_CMS) if (s->control[0] & LCCR0_CMS)
r = g = b = *(uint16_t *) src & 0xff; r = g = b = *(uint32_t *) src & 0xff;
else { else {
r = (*(uint16_t *) src & 0xf800) >> 8; r = (*(uint32_t *) src & 0xf80000) >> 16;
g = (*(uint16_t *) src & 0x07e0) >> 3; g = (*(uint32_t *) src & 0x00fc00) >> 8;
b = (*(uint16_t *) src & 0x001f) << 3; b = (*(uint32_t *) src & 0x0000f8);
} }
src += 2; src += 4;
break; break;
case 2: /* 18 bpp plus transparency */ case 2: /* 18 bpp plus transparency */
alpha = *(uint32_t *) src & (1 << 24); alpha = *(uint32_t *) src & (1 << 24);
if (s->control[0] & LCCR0_CMS) if (s->control[0] & LCCR0_CMS)
r = g = b = *(uint32_t *) src & 0xff; r = g = b = *(uint32_t *) src & 0xff;
else { else {
r = (*(uint32_t *) src & 0xf80000) >> 16; r = (*(uint32_t *) src & 0xfc0000) >> 16;
g = (*(uint32_t *) src & 0x00fc00) >> 8; g = (*(uint32_t *) src & 0x00fc00) >> 8;
b = (*(uint32_t *) src & 0x0000f8); b = (*(uint32_t *) src & 0x0000fc);
} }
src += 4; src += 4;
break; break;
......
...@@ -143,7 +143,7 @@ typedef struct CPUARMState { ...@@ -143,7 +143,7 @@ typedef struct CPUARMState {
uint32_t spsr; uint32_t spsr;
/* Banked registers. */ /* Banked registers. */
uint64_t banked_spsr[6]; uint64_t banked_spsr[8];
uint32_t banked_r13[6]; uint32_t banked_r13[6];
uint32_t banked_r14[6]; uint32_t banked_r14[6];
...@@ -162,8 +162,8 @@ typedef struct CPUARMState { ...@@ -162,8 +162,8 @@ typedef struct CPUARMState {
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
uint64_t daif; /* exception masks, in the bits they are in in PSTATE */ uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
uint64_t elr_el1; /* AArch64 ELR_EL1 */ uint64_t elr_el[4]; /* AArch64 exception link regs */
uint64_t sp_el[2]; /* AArch64 banked stack pointers */ uint64_t sp_el[4]; /* AArch64 banked stack pointers */
/* System control coprocessor (cp15) */ /* System control coprocessor (cp15) */
struct { struct {
...@@ -185,7 +185,7 @@ typedef struct CPUARMState { ...@@ -185,7 +185,7 @@ typedef struct CPUARMState {
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
uint32_t ifsr_el2; /* Fault status registers. */ uint32_t ifsr_el2; /* Fault status registers. */
uint64_t esr_el1; uint64_t esr_el[2];
uint32_t c6_region[8]; /* MPU base/size registers. */ uint32_t c6_region[8]; /* MPU base/size registers. */
uint64_t far_el1; /* Fault address registers. */ uint64_t far_el1; /* Fault address registers. */
uint64_t par_el1; /* Translation result. */ uint64_t par_el1; /* Translation result. */
...@@ -198,7 +198,7 @@ typedef struct CPUARMState { ...@@ -198,7 +198,7 @@ typedef struct CPUARMState {
uint32_t c9_pmuserenr; /* perf monitor user enable */ uint32_t c9_pmuserenr; /* perf monitor user enable */
uint32_t c9_pminten; /* perf monitor interrupt enables */ uint32_t c9_pminten; /* perf monitor interrupt enables */
uint64_t mair_el1; uint64_t mair_el1;
uint64_t c12_vbar; /* vector base address register */ uint64_t vbar_el[4]; /* vector base address register */
uint32_t c13_fcse; /* FCSE PID. */ uint32_t c13_fcse; /* FCSE PID. */
uint64_t contextidr_el1; /* Context ID. */ uint64_t contextidr_el1; /* Context ID. */
uint64_t tpidr_el0; /* User RW Thread register. */ uint64_t tpidr_el0; /* User RW Thread register. */
...@@ -563,7 +563,9 @@ enum arm_cpu_mode { ...@@ -563,7 +563,9 @@ enum arm_cpu_mode {
ARM_CPU_MODE_FIQ = 0x11, ARM_CPU_MODE_FIQ = 0x11,
ARM_CPU_MODE_IRQ = 0x12, ARM_CPU_MODE_IRQ = 0x12,
ARM_CPU_MODE_SVC = 0x13, ARM_CPU_MODE_SVC = 0x13,
ARM_CPU_MODE_MON = 0x16,
ARM_CPU_MODE_ABT = 0x17, ARM_CPU_MODE_ABT = 0x17,
ARM_CPU_MODE_HYP = 0x1a,
ARM_CPU_MODE_UND = 0x1b, ARM_CPU_MODE_UND = 0x1b,
ARM_CPU_MODE_SYS = 0x1f ARM_CPU_MODE_SYS = 0x1f
}; };
...@@ -631,6 +633,8 @@ enum arm_features { ...@@ -631,6 +633,8 @@ enum arm_features {
ARM_FEATURE_CBAR, /* has cp15 CBAR */ ARM_FEATURE_CBAR, /* has cp15 CBAR */
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
ARM_FEATURE_EL2, /* has EL2 Virtualization support */
ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
}; };
static inline int arm_feature(CPUARMState *env, int feature) static inline int arm_feature(CPUARMState *env, int feature)
...@@ -1080,12 +1084,12 @@ static inline CPUARMState *cpu_init(const char *cpu_model) ...@@ -1080,12 +1084,12 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define cpu_list arm_cpu_list #define cpu_list arm_cpu_list
/* MMU modes definitions */ /* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel #define MMU_MODE0_SUFFIX _user
#define MMU_MODE1_SUFFIX _user #define MMU_MODE1_SUFFIX _kernel
#define MMU_USER_IDX 1 #define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUARMState *env) static inline int cpu_mmu_index (CPUARMState *env)
{ {
return arm_current_pl(env) ? 0 : 1; return arm_current_pl(env);
} }
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
......
...@@ -443,7 +443,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs) ...@@ -443,7 +443,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
{ {
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
target_ulong addr = env->cp15.c12_vbar; target_ulong addr = env->cp15.vbar_el[1];
int i; int i;
if (arm_current_pl(env) == 0) { if (arm_current_pl(env) == 0) {
...@@ -464,7 +464,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs) ...@@ -464,7 +464,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
env->exception.syndrome); env->exception.syndrome);
} }
env->cp15.esr_el1 = env->exception.syndrome; env->cp15.esr_el[1] = env->exception.syndrome;
env->cp15.far_el1 = env->exception.vaddress; env->cp15.far_el1 = env->exception.vaddress;
switch (cs->exception_index) { switch (cs->exception_index) {
...@@ -488,16 +488,16 @@ void aarch64_cpu_do_interrupt(CPUState *cs) ...@@ -488,16 +488,16 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
} }
if (is_a64(env)) { if (is_a64(env)) {
env->banked_spsr[0] = pstate_read(env); env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env);
env->sp_el[arm_current_pl(env)] = env->xregs[31]; env->sp_el[arm_current_pl(env)] = env->xregs[31];
env->xregs[31] = env->sp_el[1]; env->xregs[31] = env->sp_el[1];
env->elr_el1 = env->pc; env->elr_el[1] = env->pc;
} else { } else {
env->banked_spsr[0] = cpsr_read(env); env->banked_spsr[0] = cpsr_read(env);
if (!env->thumb) { if (!env->thumb) {
env->cp15.esr_el1 |= 1 << 25; env->cp15.esr_el[1] |= 1 << 25;
} }
env->elr_el1 = env->regs[15]; env->elr_el[1] = env->regs[15];
for (i = 0; i < 15; i++) { for (i = 0; i < 15; i++) {
env->xregs[i] = env->regs[i]; env->xregs[i] = env->regs[i];
......
...@@ -477,11 +477,35 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = { ...@@ -477,11 +477,35 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
if (env->cp15.c1_coproc != value) { uint32_t mask = 0;
env->cp15.c1_coproc = value;
/* ??? Is this safe when called from within a TB? */ /* In ARMv8 most bits of CPACR_EL1 are RES0. */
tb_flush(env); if (!arm_feature(env, ARM_FEATURE_V8)) {
/* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
* ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
* TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
*/
if (arm_feature(env, ARM_FEATURE_VFP)) {
/* VFP coprocessor: cp10 & cp11 [23:20] */
mask |= (1 << 31) | (1 << 30) | (0xf << 20);
if (!arm_feature(env, ARM_FEATURE_NEON)) {
/* ASEDIS [31] bit is RAO/WI */
value |= (1 << 31);
}
/* VFPv3 and upwards with NEON implement 32 double precision
* registers (D0-D31).
*/
if (!arm_feature(env, ARM_FEATURE_NEON) ||
!arm_feature(env, ARM_FEATURE_VFP3)) {
/* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
value |= (1 << 30);
}
}
value &= mask;
} }
env->cp15.c1_coproc = value;
} }
static const ARMCPRegInfo v6_cp_reginfo[] = { static const ARMCPRegInfo v6_cp_reginfo[] = {
...@@ -657,7 +681,7 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, ...@@ -657,7 +681,7 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
* contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
* requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
*/ */
env->cp15.c12_vbar = value & ~0x1FULL; raw_write(env, ri, value & ~0x1FULL);
} }
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
...@@ -766,7 +790,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { ...@@ -766,7 +790,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH, { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write, .access = PL1_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.c12_vbar), .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
.resetvalue = 0 }, .resetvalue = 0 },
{ .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr), .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
...@@ -1452,7 +1476,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, ...@@ -1452,7 +1476,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo vmsa_cp_reginfo[] = { static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
.fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1), .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
.resetfn = arm_cp_reset_ignore, }, .resetfn = arm_cp_reset_ignore, },
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .access = PL1_RW,
...@@ -1460,7 +1484,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { ...@@ -1460,7 +1484,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.esr_el1), .resetvalue = 0, }, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1), .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
...@@ -1521,7 +1545,7 @@ static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, ...@@ -1521,7 +1545,7 @@ static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo omap_cp_reginfo[] = { static const ARMCPRegInfo omap_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
.fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1), .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
.resetvalue = 0, }, .resetvalue = 0, },
{ .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NOP }, .access = PL1_RW, .type = ARM_CP_NOP },
...@@ -2055,7 +2079,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { ...@@ -2055,7 +2079,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE, .type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, elr_el1) }, .access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE, .type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
...@@ -2076,6 +2101,51 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { ...@@ -2076,6 +2101,51 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
REGINFO_SENTINEL REGINFO_SENTINEL
}; };
/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
REGINFO_SENTINEL
};
static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL2_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL2_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
.resetvalue = 0 },
REGINFO_SENTINEL
};
static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL3_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL3_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
.resetvalue = 0 },
REGINFO_SENTINEL
};
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
...@@ -2327,6 +2397,19 @@ void register_cp_regs_for_features(ARMCPU *cpu) ...@@ -2327,6 +2397,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, v8_cp_reginfo); define_arm_cp_regs(cpu, v8_cp_reginfo);
define_aarch64_debug_regs(cpu); define_aarch64_debug_regs(cpu);
} }
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
} else {
/* If EL2 is missing but higher ELs are enabled, we need to
* register the no_el2 reginfos.
*/
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo);
}
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, v8_el3_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_MPU)) { if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new /* These are the MPU registers prior to PMSAv6. Any new
* PMSA core later than the ARM946 will require that we * PMSA core later than the ARM946 will require that we
...@@ -3083,6 +3166,10 @@ int bank_number(int mode) ...@@ -3083,6 +3166,10 @@ int bank_number(int mode)
return 4; return 4;
case ARM_CPU_MODE_FIQ: case ARM_CPU_MODE_FIQ:
return 5; return 5;
case ARM_CPU_MODE_HYP:
return 6;
case ARM_CPU_MODE_MON:
return 7;
} }
hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode); hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
} }
...@@ -3337,11 +3424,11 @@ void arm_cpu_do_interrupt(CPUState *cs) ...@@ -3337,11 +3424,11 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 4; offset = 4;
break; break;
case EXCP_DATA_ABORT: case EXCP_DATA_ABORT:
env->cp15.esr_el1 = env->exception.fsr; env->cp15.esr_el[1] = env->exception.fsr;
env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32, env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
env->exception.vaddress); env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
(uint32_t)env->cp15.esr_el1, (uint32_t)env->cp15.esr_el[1],
(uint32_t)env->exception.vaddress); (uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT; new_mode = ARM_CPU_MODE_ABT;
addr = 0x10; addr = 0x10;
...@@ -3378,7 +3465,7 @@ void arm_cpu_do_interrupt(CPUState *cs) ...@@ -3378,7 +3465,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
* and is never in monitor mode this feature is always active. * and is never in monitor mode this feature is always active.
* Note: only bits 31:5 are valid. * Note: only bits 31:5 are valid.
*/ */
addr += env->cp15.c12_vbar; addr += env->cp15.vbar_el[1];
} }
switch_mode (env, new_mode); switch_mode (env, new_mode);
env->spsr = cpsr_read(env); env->spsr = cpsr_read(env);
......
...@@ -75,6 +75,20 @@ static inline void arm_log_exception(int idx) ...@@ -75,6 +75,20 @@ static inline void arm_log_exception(int idx)
*/ */
#define GTIMER_SCALE 16 #define GTIMER_SCALE 16
/*
* For AArch64, map a given EL to an index in the banked_spsr array.
*/
static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
{
static const unsigned int map[4] = {
[1] = 0, /* EL1. */
[2] = 6, /* EL2. */
[3] = 7, /* EL3. */
};
assert(el >= 1 && el <= 3);
return map[el];
}
int bank_number(int mode); int bank_number(int mode);
void switch_mode(CPUARMState *, int); void switch_mode(CPUARMState *, int);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
...@@ -93,6 +107,7 @@ int arm_rmode_to_sf(int rmode); ...@@ -93,6 +107,7 @@ int arm_rmode_to_sf(int rmode);
static inline void update_spsel(CPUARMState *env, uint32_t imm) static inline void update_spsel(CPUARMState *env, uint32_t imm)
{ {
unsigned int cur_el = arm_current_pl(env);
/* Update PSTATE SPSel bit; this requires us to update the /* Update PSTATE SPSel bit; this requires us to update the
* working stack pointer in xregs[31]. * working stack pointer in xregs[31].
*/ */
...@@ -101,17 +116,17 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) ...@@ -101,17 +116,17 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
} }
env->pstate = deposit32(env->pstate, 0, 1, imm); env->pstate = deposit32(env->pstate, 0, 1, imm);
/* EL0 has no access rights to update SPSel, and this code /* We rely on illegal updates to SPsel from EL0 to get trapped
* assumes we are updating SP for EL1 while running as EL1. * at translation time.
*/ */
assert(arm_current_pl(env) == 1); assert(cur_el >= 1 && cur_el <= 3);
if (env->pstate & PSTATE_SP) { if (env->pstate & PSTATE_SP) {
/* Switch from using SP_EL0 to using SP_ELx */ /* Switch from using SP_EL0 to using SP_ELx */
env->sp_el[0] = env->xregs[31]; env->sp_el[0] = env->xregs[31];
env->xregs[31] = env->sp_el[1]; env->xregs[31] = env->sp_el[cur_el];
} else { } else {
/* Switch from SP_EL0 to SP_ELx */ /* Switch from SP_EL0 to SP_ELx */
env->sp_el[1] = env->xregs[31]; env->sp_el[cur_el] = env->xregs[31];
env->xregs[31] = env->sp_el[0]; env->xregs[31] = env->sp_el[0];
} }
} }
......
...@@ -161,7 +161,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) ...@@ -161,7 +161,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
} }
reg.id = AARCH64_CORE_REG(elr_el1); reg.id = AARCH64_CORE_REG(elr_el1);
reg.addr = (uintptr_t) &env->elr_el1; reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret) { if (ret) {
return ret; return ret;
...@@ -241,7 +241,7 @@ int kvm_arch_get_registers(CPUState *cs) ...@@ -241,7 +241,7 @@ int kvm_arch_get_registers(CPUState *cs)
} }
reg.id = AARCH64_CORE_REG(elr_el1); reg.id = AARCH64_CORE_REG(elr_el1);
reg.addr = (uintptr_t) &env->elr_el1; reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) { if (ret) {
return ret; return ret;
......
...@@ -218,8 +218,8 @@ static int cpu_post_load(void *opaque, int version_id) ...@@ -218,8 +218,8 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = { const VMStateDescription vmstate_arm_cpu = {
.name = "cpu", .name = "cpu",
.version_id = 17, .version_id = 20,
.minimum_version_id = 17, .minimum_version_id = 20,
.pre_save = cpu_pre_save, .pre_save = cpu_pre_save,
.post_load = cpu_post_load, .post_load = cpu_post_load,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
...@@ -233,13 +233,13 @@ const VMStateDescription vmstate_arm_cpu = { ...@@ -233,13 +233,13 @@ const VMStateDescription vmstate_arm_cpu = {
.offset = 0, .offset = 0,
}, },
VMSTATE_UINT32(env.spsr, ARMCPU), VMSTATE_UINT32(env.spsr, ARMCPU),
VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 6), VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6), VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6), VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5), VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5), VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
VMSTATE_UINT64(env.elr_el1, ARMCPU), VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 2), VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
/* The length-check must come before the arrays to avoid /* The length-check must come before the arrays to avoid
* incoming data possibly overflowing the array. * incoming data possibly overflowing the array.
*/ */
......
...@@ -386,11 +386,13 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) ...@@ -386,11 +386,13 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
void HELPER(exception_return)(CPUARMState *env) void HELPER(exception_return)(CPUARMState *env)
{ {
uint32_t spsr = env->banked_spsr[0]; int cur_el = arm_current_pl(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el, i; int new_el, i;
if (env->pstate & PSTATE_SP) { if (env->pstate & PSTATE_SP) {
env->sp_el[1] = env->xregs[31]; env->sp_el[cur_el] = env->xregs[31];
} else { } else {
env->sp_el[0] = env->xregs[31]; env->sp_el[0] = env->xregs[31];
} }
...@@ -398,6 +400,7 @@ void HELPER(exception_return)(CPUARMState *env) ...@@ -398,6 +400,7 @@ void HELPER(exception_return)(CPUARMState *env)
env->exclusive_addr = -1; env->exclusive_addr = -1;
if (spsr & PSTATE_nRW) { if (spsr & PSTATE_nRW) {
/* TODO: We currently assume EL1/2/3 are running in AArch64. */
env->aarch64 = 0; env->aarch64 = 0;
new_el = 0; new_el = 0;
env->uncached_cpsr = 0x10; env->uncached_cpsr = 0x10;
...@@ -406,11 +409,14 @@ void HELPER(exception_return)(CPUARMState *env) ...@@ -406,11 +409,14 @@ void HELPER(exception_return)(CPUARMState *env)
env->regs[i] = env->xregs[i]; env->regs[i] = env->xregs[i];
} }
env->regs[15] = env->elr_el1 & ~0x1; env->regs[15] = env->elr_el[1] & ~0x1;
} else { } else {
new_el = extract32(spsr, 2, 2); new_el = extract32(spsr, 2, 2);
if (new_el > 1) { if (new_el > cur_el
/* Return to unimplemented EL */ || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
/* Disallow return to an EL which is unimplemented or higher
* than the current one.
*/
goto illegal_return; goto illegal_return;
} }
if (extract32(spsr, 1, 1)) { if (extract32(spsr, 1, 1)) {
...@@ -424,7 +430,7 @@ void HELPER(exception_return)(CPUARMState *env) ...@@ -424,7 +430,7 @@ void HELPER(exception_return)(CPUARMState *env)
env->aarch64 = 1; env->aarch64 = 1;
pstate_write(env, spsr); pstate_write(env, spsr);
env->xregs[31] = env->sp_el[new_el]; env->xregs[31] = env->sp_el[new_el];
env->pc = env->elr_el1; env->pc = env->elr_el[cur_el];
} }
return; return;
...@@ -438,7 +444,7 @@ illegal_return: ...@@ -438,7 +444,7 @@ illegal_return:
* no change to exception level, execution state or stack pointer * no change to exception level, execution state or stack pointer
*/ */
env->pstate |= PSTATE_IL; env->pstate |= PSTATE_IL;
env->pc = env->elr_el1; env->pc = env->elr_el[cur_el];
spsr &= PSTATE_NZCV | PSTATE_DAIF; spsr &= PSTATE_NZCV | PSTATE_DAIF;
spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
pstate_write(env, spsr); pstate_write(env, spsr);
......
...@@ -162,15 +162,6 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f, ...@@ -162,15 +162,6 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
} }
} }
static int get_mem_index(DisasContext *s)
{
#ifdef CONFIG_USER_ONLY
return 1;
#else
return s->user;
#endif
}
void gen_a64_set_pc_im(uint64_t val) void gen_a64_set_pc_im(uint64_t val)
{ {
tcg_gen_movi_i64(cpu_pc, val); tcg_gen_movi_i64(cpu_pc, val);
...@@ -1516,6 +1507,10 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) ...@@ -1516,6 +1507,10 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc); tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
break; break;
case 4: /* ERET */ case 4: /* ERET */
if (s->current_pl == 0) {
unallocated_encoding(s);
return;
}
gen_helper_exception_return(cpu_env); gen_helper_exception_return(cpu_env);
s->is_jmp = DISAS_JUMP; s->is_jmp = DISAS_JUMP;
return; return;
......
此差异已折叠。
...@@ -52,6 +52,11 @@ static inline int arm_dc_feature(DisasContext *dc, int feature) ...@@ -52,6 +52,11 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
return (dc->features & (1ULL << feature)) != 0; return (dc->features & (1ULL << feature)) != 0;
} }
static inline int get_mem_index(DisasContext *s)
{
return s->current_pl;
}
/* target-specific extra values for is_jmp */ /* target-specific extra values for is_jmp */
/* These instructions trap after executing, so the A32/T32 decoder must /* These instructions trap after executing, so the A32/T32 decoder must
* defer them until after the conditional execution state has been updated. * defer them until after the conditional execution state has been updated.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册