提交 cea7bb21 编写于 作者: I Izik Eidus 提交者: Avi Kivity

KVM: MMU: Make gfn_to_page() always safe

In case the page is not present in the guest memory map, return a dummy
page the guest can scribble on.

This simplifies error checking in its users.
Signed-off-by: NIzik Eidus <izike@qumranet.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 9647c14c
...@@ -565,8 +565,9 @@ static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } ...@@ -565,8 +565,9 @@ static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
extern hpa_t bad_page_address; extern struct page *bad_page;
int is_error_page(struct page *page);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
......
...@@ -993,6 +993,12 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) ...@@ -993,6 +993,12 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
return r; return r;
} }
int is_error_page(struct page *page)
{
return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{ {
int i; int i;
...@@ -1034,7 +1040,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) ...@@ -1034,7 +1040,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
slot = __gfn_to_memslot(kvm, gfn); slot = __gfn_to_memslot(kvm, gfn);
if (!slot) if (!slot)
return NULL; return bad_page;
return slot->phys_mem[gfn - slot->base_gfn]; return slot->phys_mem[gfn - slot->base_gfn];
} }
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
...@@ -1054,7 +1060,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, ...@@ -1054,7 +1060,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
struct page *page; struct page *page;
page = gfn_to_page(kvm, gfn); page = gfn_to_page(kvm, gfn);
if (!page) if (is_error_page(page))
return -EFAULT; return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0); page_virt = kmap_atomic(page, KM_USER0);
...@@ -1092,7 +1098,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, ...@@ -1092,7 +1098,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
struct page *page; struct page *page;
page = gfn_to_page(kvm, gfn); page = gfn_to_page(kvm, gfn);
if (!page) if (is_error_page(page))
return -EFAULT; return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0); page_virt = kmap_atomic(page, KM_USER0);
...@@ -1130,7 +1136,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) ...@@ -1130,7 +1136,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
struct page *page; struct page *page;
page = gfn_to_page(kvm, gfn); page = gfn_to_page(kvm, gfn);
if (!page) if (is_error_page(page))
return -EFAULT; return -EFAULT;
page_virt = kmap_atomic(page, KM_USER0); page_virt = kmap_atomic(page, KM_USER0);
...@@ -3068,7 +3074,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, ...@@ -3068,7 +3074,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
page = gfn_to_page(kvm, pgoff); page = gfn_to_page(kvm, pgoff);
if (!page) if (is_error_page(page))
return NOPAGE_SIGBUS; return NOPAGE_SIGBUS;
get_page(page); get_page(page);
if (type != NULL) if (type != NULL)
...@@ -3383,7 +3389,7 @@ static struct sys_device kvm_sysdev = { ...@@ -3383,7 +3389,7 @@ static struct sys_device kvm_sysdev = {
.cls = &kvm_sysdev_class, .cls = &kvm_sysdev_class,
}; };
hpa_t bad_page_address; struct page *bad_page;
static inline static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
...@@ -3512,7 +3518,6 @@ EXPORT_SYMBOL_GPL(kvm_exit_x86); ...@@ -3512,7 +3518,6 @@ EXPORT_SYMBOL_GPL(kvm_exit_x86);
static __init int kvm_init(void) static __init int kvm_init(void)
{ {
static struct page *bad_page;
int r; int r;
r = kvm_mmu_module_init(); r = kvm_mmu_module_init();
...@@ -3523,16 +3528,13 @@ static __init int kvm_init(void) ...@@ -3523,16 +3528,13 @@ static __init int kvm_init(void)
kvm_arch_init(); kvm_arch_init();
bad_page = alloc_page(GFP_KERNEL); bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bad_page == NULL) { if (bad_page == NULL) {
r = -ENOMEM; r = -ENOMEM;
goto out; goto out;
} }
bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
memset(__va(bad_page_address), 0, PAGE_SIZE);
return 0; return 0;
out: out:
...@@ -3545,7 +3547,7 @@ static __init int kvm_init(void) ...@@ -3545,7 +3547,7 @@ static __init int kvm_init(void)
static __exit void kvm_exit(void) static __exit void kvm_exit(void)
{ {
kvm_exit_debug(); kvm_exit_debug();
__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); __free_page(bad_page);
kvm_mmu_module_exit(); kvm_mmu_module_exit();
} }
......
...@@ -850,23 +850,17 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) ...@@ -850,23 +850,17 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
__set_bit(slot, &page_head->slot_bitmap); __set_bit(slot, &page_head->slot_bitmap);
} }
hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
{
hpa_t hpa = gpa_to_hpa(kvm, gpa);
return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
}
hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa) hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
{ {
struct page *page; struct page *page;
hpa_t hpa;
ASSERT((gpa & HPA_ERR_MASK) == 0); ASSERT((gpa & HPA_ERR_MASK) == 0);
page = gfn_to_page(kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
if (!page) hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
return gpa | HPA_ERR_MASK; if (is_error_page(page))
return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) return hpa | HPA_ERR_MASK;
| (gpa & (PAGE_SIZE-1)); return hpa;
} }
hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
......
...@@ -72,8 +72,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -72,8 +72,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, struct kvm_vcpu *vcpu, gva_t addr,
int write_fault, int user_fault, int fetch_fault) int write_fault, int user_fault, int fetch_fault)
{ {
hpa_t hpa;
struct kvm_memory_slot *slot;
struct page *page; struct page *page;
pt_element_t *table; pt_element_t *table;
pt_element_t pte; pt_element_t pte;
...@@ -105,9 +103,8 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -105,9 +103,8 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
walker->level - 1, table_gfn); walker->level - 1, table_gfn);
slot = gfn_to_memslot(vcpu->kvm, table_gfn); page = gfn_to_page(vcpu->kvm, (pte & PT64_BASE_ADDR_MASK)
hpa = safe_gpa_to_hpa(vcpu->kvm, pte & PT64_BASE_ADDR_MASK); >> PAGE_SHIFT);
page = pfn_to_page(hpa >> PAGE_SHIFT);
table = kmap_atomic(page, KM_USER0); table = kmap_atomic(page, KM_USER0);
pte = table[index]; pte = table[index];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册