提交 5972e953 编写于 作者: M Markus Rechberger 提交者: Avi Kivity

KVM: Use page_private()/set_page_private() apis

Besides using an established api, this allows using kvm in older kernels.
Signed-off-by: NMarkus Rechberger <markus.rechberger@amd.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 9d8f549d
...@@ -523,7 +523,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) ...@@ -523,7 +523,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{ {
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
return (struct kvm_mmu_page *)page->private; return (struct kvm_mmu_page *)page_private(page);
} }
static inline u16 read_fs(void) static inline u16 read_fs(void)
......
...@@ -670,7 +670,7 @@ static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm, ...@@ -670,7 +670,7 @@ static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
| __GFP_ZERO); | __GFP_ZERO);
if (!new.phys_mem[i]) if (!new.phys_mem[i])
goto out_free; goto out_free;
new.phys_mem[i]->private = 0; set_page_private(new.phys_mem[i],0);
} }
} }
......
...@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte) ...@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
if (!is_rmap_pte(*spte)) if (!is_rmap_pte(*spte))
return; return;
page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
if (!page->private) { if (!page_private(page)) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
page->private = (unsigned long)spte; set_page_private(page,(unsigned long)spte);
} else if (!(page->private & 1)) { } else if (!(page_private(page) & 1)) {
rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_rmap_desc(vcpu); desc = mmu_alloc_rmap_desc(vcpu);
desc->shadow_ptes[0] = (u64 *)page->private; desc->shadow_ptes[0] = (u64 *)page_private(page);
desc->shadow_ptes[1] = spte; desc->shadow_ptes[1] = spte;
page->private = (unsigned long)desc | 1; set_page_private(page,(unsigned long)desc | 1);
} else { } else {
rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
desc = (struct kvm_rmap_desc *)(page->private & ~1ul); desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
desc = desc->more; desc = desc->more;
if (desc->shadow_ptes[RMAP_EXT-1]) { if (desc->shadow_ptes[RMAP_EXT-1]) {
...@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, ...@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
if (j != 0) if (j != 0)
return; return;
if (!prev_desc && !desc->more) if (!prev_desc && !desc->more)
page->private = (unsigned long)desc->shadow_ptes[0]; set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
else else
if (prev_desc) if (prev_desc)
prev_desc->more = desc->more; prev_desc->more = desc->more;
else else
page->private = (unsigned long)desc->more | 1; set_page_private(page,(unsigned long)desc->more | 1);
mmu_free_rmap_desc(vcpu, desc); mmu_free_rmap_desc(vcpu, desc);
} }
...@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte) ...@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
if (!is_rmap_pte(*spte)) if (!is_rmap_pte(*spte))
return; return;
page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
if (!page->private) { if (!page_private(page)) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG(); BUG();
} else if (!(page->private & 1)) { } else if (!(page_private(page) & 1)) {
rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
if ((u64 *)page->private != spte) { if ((u64 *)page_private(page) != spte) {
printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
spte, *spte); spte, *spte);
BUG(); BUG();
} }
page->private = 0; set_page_private(page,0);
} else { } else {
rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
desc = (struct kvm_rmap_desc *)(page->private & ~1ul); desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
prev_desc = NULL; prev_desc = NULL;
while (desc) { while (desc) {
for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
...@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) ...@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
BUG_ON(!slot); BUG_ON(!slot);
page = gfn_to_page(slot, gfn); page = gfn_to_page(slot, gfn);
while (page->private) { while (page_private(page)) {
if (!(page->private & 1)) if (!(page_private(page) & 1))
spte = (u64 *)page->private; spte = (u64 *)page_private(page);
else { else {
desc = (struct kvm_rmap_desc *)(page->private & ~1ul); desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
spte = desc->shadow_ptes[0]; spte = desc->shadow_ptes[0];
} }
BUG_ON(!spte); BUG_ON(!spte);
...@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) ...@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&page_header->link); INIT_LIST_HEAD(&page_header->link);
if ((page = alloc_page(GFP_KERNEL)) == NULL) if ((page = alloc_page(GFP_KERNEL)) == NULL)
goto error_1; goto error_1;
page->private = (unsigned long)page_header; set_page_private(page, (unsigned long)page_header);
page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
memset(__va(page_header->page_hpa), 0, PAGE_SIZE); memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
list_add(&page_header->link, &vcpu->free_pages); list_add(&page_header->link, &vcpu->free_pages);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册