提交 765bdb40 编写于 作者: L Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "KVM-ARM fixes, mostly coming from the PMU work"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  arm64: KVM: Fix guest dead loop when register accessor returns false
  arm64: KVM: Fix comments of the CP handler
  arm64: KVM: Fix wrong use of the CPSR MODE mask for 32bit guests
  arm64: KVM: Obey RES0/1 reserved bits when setting CPTR_EL2
  arm64: KVM: Fix AArch64 guest userspace exception injection
...@@ -182,6 +182,7 @@ ...@@ -182,6 +182,7 @@
#define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_DEFAULT 0x000033ff
/* Hyp Debug Configuration Register bits */ /* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TDRA (1 << 11) #define MDCR_EL2_TDRA (1 << 11)
......
...@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) ...@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
{ {
u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; u32 mode;
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu)) {
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
return mode > COMPAT_PSR_MODE_USR; return mode > COMPAT_PSR_MODE_USR;
}
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
return mode != PSR_MODE_EL0t; return mode != PSR_MODE_EL0t;
} }
......
...@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) ...@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
write_sysreg(val, hcr_el2); write_sysreg(val, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2); write_sysreg(1 << 15, hstr_el2);
write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
val = CPTR_EL2_DEFAULT;
val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
} }
...@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(HCR_RW, hcr_el2); write_sysreg(HCR_RW, hcr_el2);
write_sysreg(0, hstr_el2); write_sysreg(0, hstr_el2);
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
write_sysreg(0, cptr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
} }
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
......
...@@ -27,7 +27,11 @@ ...@@ -27,7 +27,11 @@
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
PSR_I_BIT | PSR_D_BIT) PSR_I_BIT | PSR_D_BIT)
#define EL1_EXCEPT_SYNC_OFFSET 0x200
#define CURRENT_EL_SP_EL0_VECTOR 0x0
#define CURRENT_EL_SP_ELx_VECTOR 0x200
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{ {
...@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, ...@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
*fsr = 0x14; *fsr = 0x14;
} }
enum exception_type {
except_type_sync = 0,
except_type_irq = 0x80,
except_type_fiq = 0x100,
except_type_serror = 0x180,
};
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
{
u64 exc_offset;
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
case PSR_MODE_EL1t:
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
break;
case PSR_MODE_EL1h:
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
break;
case PSR_MODE_EL0t:
exc_offset = LOWER_EL_AArch64_VECTOR;
break;
default:
exc_offset = LOWER_EL_AArch32_VECTOR;
}
return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
}
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{ {
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
...@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr ...@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
*vcpu_spsr(vcpu) = cpsr; *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
vcpu_sys_reg(vcpu, FAR_EL1) = addr; vcpu_sys_reg(vcpu, FAR_EL1) = addr;
...@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu) ...@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_spsr(vcpu) = cpsr; *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
/* /*
* Build an unknown exception, depending on the instruction * Build an unknown exception, depending on the instruction
......
...@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu, ...@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
if (likely(r->access(vcpu, params, r))) { if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */ /* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
/* Handled */
return 0;
} }
/* Handled */
return 0;
} }
/* Not handled */ /* Not handled */
...@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, ...@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
} }
/** /**
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* @run: The kvm_run struct * @run: The kvm_run struct
*/ */
...@@ -1095,7 +1094,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, ...@@ -1095,7 +1094,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
} }
/** /**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* @run: The kvm_run struct * @run: The kvm_run struct
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册