diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 35491af879852dda8b69c85db662757366e91a94..51c9f9836befa8ea446b1faf25630dd4a72bfcb7 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -66,9 +66,9 @@ extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; extern void __kvm_flush_vm_context(void); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index c0edd450e10459612e37cc292ad8585494d12773..e8c45d088d0b1c97f0b0fd3829780c3ee7badcfd 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -56,7 +56,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) __kvm_tlb_flush_vmid(kvm); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index ff73f5462aca512a8f19acca00ab41b07bf2e34c..ab8dec4eb3aa834f470a1255db682b0738cbd706 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -63,9 +63,9 @@ extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 7fcc9c1a5f45c2c4c73070113adcbc9adf449561..c35e9b99b0c5024275696e46ebf475d13ba424d7 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -149,7 +149,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) __tlb_switch_to_host()(kvm, flags); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); unsigned long flags; @@ -158,6 +158,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalle1); + asm volatile("ic iallu"); dsb(nsh); isb(); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index cdfe28311f4141d83dc88c18af8109f8cc2ec941..c1ae17f68a05ce0cab028f9982931529b7e706ec 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -424,11 +424,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) cpu_data = this_cpu_ptr(&kvm_host_data); /* + * We guarantee that both TLBs and I-cache are private to each + * vcpu. If detecting that a vcpu from the same VM has + * previously run on the same physical CPU, call into the + * hypervisor code to nuke the relevant contexts. + * * We might get preempted before the vCPU actually runs, but * over-invalidation doesn't affect correctness. */ if (*last_ran != vcpu->vcpu_id) { - kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); + kvm_call_hyp(__kvm_flush_cpu_context, vcpu); *last_ran = vcpu->vcpu_id; }