提交 cf364e08 编写于 作者: M Marc Zyngier

KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE

Since TLB invalidation can run in parallel with VMID allocation,
we need to be careful and avoid any sort of load/store tearing.
Use {READ,WRITE}_ONCE consistently to avoid any surprise.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jade Alglave <jade.alglave@arm.com>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Signed-off-by: NWill Deacon <will@kernel.org>
Reviewed-by: NQuentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20210806113109.2475-6-will@kernel.org
上级 4efc0ede
...@@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
* When this is (directly or indirectly) used on the TLB invalidation
* path, we rely on a previously issued DSB so that page table updates
* and VMID reads are correctly ordered.
*/
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{ {
struct kvm_vmid *vmid = &mmu->vmid; struct kvm_vmid *vmid = &mmu->vmid;
...@@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) ...@@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = mmu->pgd_phys; baddr = mmu->pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
} }
......
...@@ -571,7 +571,7 @@ static void update_vmid(struct kvm_vmid *vmid) ...@@ -571,7 +571,7 @@ static void update_vmid(struct kvm_vmid *vmid)
kvm_call_hyp(__kvm_flush_vm_context); kvm_call_hyp(__kvm_flush_vm_context);
} }
vmid->vmid = kvm_next_vmid; WRITE_ONCE(vmid->vmid, kvm_next_vmid);
kvm_next_vmid++; kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
......
...@@ -109,8 +109,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) ...@@ -109,8 +109,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
mmu->arch = &host_kvm.arch; mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt; mmu->pgt = &host_kvm.pgt;
mmu->vmid.vmid_gen = 0; WRITE_ONCE(mmu->vmid.vmid_gen, 0);
mmu->vmid.vmid = 0; WRITE_ONCE(mmu->vmid.vmid, 0);
return 0; return 0;
} }
......
...@@ -485,7 +485,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -485,7 +485,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
mmu->arch = &kvm->arch; mmu->arch = &kvm->arch;
mmu->pgt = pgt; mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd); mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0; WRITE_ONCE(mmu->vmid.vmid_gen, 0);
return 0; return 0;
out_destroy_pgtable: out_destroy_pgtable:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册