提交 43c7812c 编写于 作者: P Paolo Bonzini 提交者: Zheng Zengkai

KVM: x86/mmu: do compare-and-exchange of gPTE via the user address

stable inclusion
from stable-v5.10.110
commit e90518d10c7dd59d5ebbe25b0f0083a7dbffa42f
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5869N
CVE: CVE-2022-1158

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=e90518d10c7d

--------------------------------

commit 2a8859f3 upstream.

FNAME(cmpxchg_gpte) is an inefficient mess.  It is at least decent if it
can go through get_user_pages_fast(), but if it cannot then it tries to
use memremap(); that is not just terribly slow, it is also wrong because
it assumes that the VM_PFNMAP VMA is contiguous.

The right way to do it would be to do the same thing as
hva_to_pfn_remapped() does since commit add6a0cd ("KVM: MMU: try to
fix up page faults before giving up", 2016-07-05), using follow_pte()
and fixup_user_fault() to determine the correct address to use for
memremap().  To do this, one could for example extract hva_to_pfn()
for use outside virt/kvm/kvm_main.c.  But really there is no reason to
do that either, because there is already a perfectly valid address to
do the cmpxchg() on, only it is a userspace address.  That means doing
user_access_begin()/user_access_end() and writing the code in assembly
to handle exceptions correctly.  Worse, the guest PTE can be 8-byte
even on i686 so there is the extra complication of using cmpxchg8b to
account for.  But at least it is an efficient mess.

(Thanks to Linus for suggesting improvement on the inline assembly).
Reported-by: NQiuhao Li <qiuhao@sysec.org>
Reported-by: NGaoning Pan <pgn@zju.edu.cn>
Reported-by: NYongkang Jia <kangel@zju.edu.cn>
Reported-by: syzbot+6cde2282daa792c49ab8@syzkaller.appspotmail.com
Debugged-by: NTadeusz Struk <tadeusz.struk@linaro.org>
Tested-by: NMaxim Levitsky <mlevitsk@redhat.com>
Cc: stable@vger.kernel.org
Fixes: bd53cb35 ("X86/KVM: Handle PFNs outside of kernel reach when touching GPTEs")
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 d1e2a690
...@@ -34,9 +34,8 @@ ...@@ -34,9 +34,8 @@
#define PT_HAVE_ACCESSED_DIRTY(mmu) true #define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#define CMPXCHG cmpxchg #define CMPXCHG "cmpxchgq"
#else #else
#define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS 2 #define PT_MAX_FULL_LEVELS 2
#endif #endif
#elif PTTYPE == 32 #elif PTTYPE == 32
...@@ -52,7 +51,7 @@ ...@@ -52,7 +51,7 @@
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define PT_HAVE_ACCESSED_DIRTY(mmu) true #define PT_HAVE_ACCESSED_DIRTY(mmu) true
#define CMPXCHG cmpxchg #define CMPXCHG "cmpxchgl"
#elif PTTYPE == PTTYPE_EPT #elif PTTYPE == PTTYPE_EPT
#define pt_element_t u64 #define pt_element_t u64
#define guest_walker guest_walkerEPT #define guest_walker guest_walkerEPT
...@@ -65,7 +64,9 @@ ...@@ -65,7 +64,9 @@
#define PT_GUEST_DIRTY_SHIFT 9 #define PT_GUEST_DIRTY_SHIFT 9
#define PT_GUEST_ACCESSED_SHIFT 8 #define PT_GUEST_ACCESSED_SHIFT 8
#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
#define CMPXCHG cmpxchg64 #ifdef CONFIG_X86_64
#define CMPXCHG "cmpxchgq"
#endif
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#else #else
#error Invalid PTTYPE value #error Invalid PTTYPE value
...@@ -147,43 +148,39 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -147,43 +148,39 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t __user *ptep_user, unsigned index, pt_element_t __user *ptep_user, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte) pt_element_t orig_pte, pt_element_t new_pte)
{ {
int npages; int r = -EFAULT;
pt_element_t ret;
pt_element_t *table; if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
struct page *page; return -EFAULT;
npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); #ifdef CMPXCHG
if (likely(npages == 1)) { asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
table = kmap_atomic(page); "mov $0, %[r]\n"
ret = CMPXCHG(&table[index], orig_pte, new_pte); "setnz %b[r]\n"
kunmap_atomic(table); "2:"
_ASM_EXTABLE_UA(1b, 2b)
kvm_release_page_dirty(page); : [ptr] "+m" (*ptep_user),
} else { [old] "+a" (orig_pte),
struct vm_area_struct *vma; [r] "+q" (r)
unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; : [new] "r" (new_pte)
unsigned long pfn; : "memory");
unsigned long paddr; #else
asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
mmap_read_lock(current->mm); "movl $0, %[r]\n"
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); "jz 2f\n"
if (!vma || !(vma->vm_flags & VM_PFNMAP)) { "incl %[r]\n"
mmap_read_unlock(current->mm); "2:"
return -EFAULT; _ASM_EXTABLE_UA(1b, 2b)
} : [ptr] "+m" (*ptep_user),
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; [old] "+A" (orig_pte),
paddr = pfn << PAGE_SHIFT; [r] "+rm" (r)
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); : [new_lo] "b" ((u32)new_pte),
if (!table) { [new_hi] "c" ((u32)(new_pte >> 32))
mmap_read_unlock(current->mm); : "memory");
return -EFAULT; #endif
}
ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table);
mmap_read_unlock(current->mm);
}
return (ret != orig_pte); user_access_end();
return r;
} }
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册