diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e3312e22e8db0853fd9935e493e1bc40d40b449a..e13041ac7cdf67bb021b004748beb45c287b549e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -708,9 +708,9 @@ static u64 mark_spte_for_access_track(u64 spte) return spte; /* - * Verify that the write-protection that we do below will be fixable - * via the fast page fault path. Currently, that is always the case, at - * least when using EPT (which is when access tracking would be used). + * Making an Access Tracking PTE will result in removal of write access + * from the PTE. So, verify that we will be able to restore the write + * access in the fast page fault path later on. */ WARN_ONCE((spte & PT_WRITABLE_MASK) && !spte_can_locklessly_be_made_writable(spte),