提交 8052f0b2 编写于 作者: M Marc Zyngier 提交者: Zheng Zengkai

KVM: arm64: Ensure I-cache isolation between vcpus of a same VM

stable inclusion
from stable-5.10.24
commit e7afadd0dbe2e1e9971fac5b64ff3e592bfade79
bugzilla: 51348

--------------------------------

Commit 01dc9262 upstream.

It recently became apparent that the ARMv8 architecture has interesting
rules regarding attributes being used when fetching instructions
if the MMU is off at Stage-1.

In this situation, the CPU is allowed to fetch from the PoC and
allocate into the I-cache (unless the memory is mapped with
the XN attribute at Stage-2).

If we transpose this to vcpus sharing a single physical CPU,
it is possible for a vcpu running with its MMU off to influence
another vcpu running with its MMU on, as the latter is expected to
fetch from the PoU (and self-patching code doesn't flush below that
level).

In order to solve this, reuse the vcpu-private TLB invalidation
code to apply the same policy to the I-cache, nuking it every time
the vcpu runs on a physical CPU that ran another vcpu of the same
VM in the past.

This involve renaming __kvm_tlb_flush_local_vmid() to
__kvm_flush_cpu_context(), and inserting a local i-cache invalidation
there.

Cc: stable@vger.kernel.org
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Acked-by: NWill Deacon <will@kernel.org>
Acked-by: NCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210303164505.68492-1-maz@kernel.orgSigned-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Acked-by: N  Weilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 35ccf255
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
...@@ -180,10 +180,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); ...@@ -180,10 +180,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level); int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern void __kvm_timer_set_cntvoff(u64 cntvoff);
......
...@@ -366,11 +366,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -366,11 +366,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
last_ran = this_cpu_ptr(mmu->last_vcpu_ran); last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/* /*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
* previously run on the same physical CPU, call into the
* hypervisor code to nuke the relevant contexts.
*
* We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness. * over-invalidation doesn't affect correctness.
*/ */
if (*last_ran != vcpu->vcpu_id) { if (*last_ran != vcpu->vcpu_id) {
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu); kvm_call_hyp(__kvm_flush_cpu_context, mmu);
*last_ran = vcpu->vcpu_id; *last_ran = vcpu->vcpu_id;
} }
......
...@@ -46,11 +46,11 @@ static void handle_host_hcall(unsigned long func_id, ...@@ -46,11 +46,11 @@ static void handle_host_hcall(unsigned long func_id,
__kvm_tlb_flush_vmid(kern_hyp_va(mmu)); __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { case KVM_HOST_SMCCC_FUNC(__kvm_flush_cpu_context): {
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu)); __kvm_flush_cpu_context(kern_hyp_va(mmu));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
......
...@@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) ...@@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) ...@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
asm volatile("ic iallu");
dsb(nsh); dsb(nsh);
isb(); isb();
......
...@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) ...@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) ...@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
asm volatile("ic iallu");
dsb(nsh); dsb(nsh);
isb(); isb();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册