提交 1d8007bd 编写于 作者: P Paolo Bonzini

KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region

The next patch will make x86_set_memory_region fill the
userspace_addr.  Since the struct is not used untouched
anymore, it makes sense to build it in x86_set_memory_region
directly; it also simplifies the callers.
Reported-by: NAlexandre DERUMIER <aderumier@odiso.com>
Cc: stable@vger.kernel.org
Fixes: 9da0e4d5Reviewed-by: NRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 d2922422
...@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); ...@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void); int kvm_is_in_guest(void);
int __x86_set_memory_region(struct kvm *kvm, int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
const struct kvm_userspace_memory_region *mem); int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
......
...@@ -4105,17 +4105,13 @@ static void seg_setup(int seg) ...@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm) static int alloc_apic_access_page(struct kvm *kvm)
{ {
struct page *page; struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0; int r = 0;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done) if (kvm->arch.apic_access_page_done)
goto out; goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
kvm_userspace_mem.flags = 0; APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
if (r) if (r)
goto out; goto out;
...@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm) ...@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
{ {
/* Called with kvm->slots_lock held. */ /* Called with kvm->slots_lock held. */
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0; int r = 0;
BUG_ON(kvm->arch.ept_identity_pagetable_done); BUG_ON(kvm->arch.ept_identity_pagetable_done);
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
kvm_userspace_mem.flags = 0; kvm->arch.ept_identity_map_addr, PAGE_SIZE);
kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
return r; return r;
} }
...@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) ...@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{ {
int ret; int ret;
struct kvm_userspace_memory_region tss_mem = {
.slot = TSS_PRIVATE_MEMSLOT,
.guest_phys_addr = addr,
.memory_size = PAGE_SIZE * 3,
.flags = 0,
};
ret = x86_set_memory_region(kvm, &tss_mem); ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
PAGE_SIZE * 3);
if (ret) if (ret)
return ret; return ret;
kvm->arch.tss_addr = addr; kvm->arch.tss_addr = addr;
......
...@@ -7474,18 +7474,21 @@ void kvm_arch_sync_events(struct kvm *kvm) ...@@ -7474,18 +7474,21 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm); kvm_free_pit(kvm);
} }
int __x86_set_memory_region(struct kvm *kvm, int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
const struct kvm_userspace_memory_region *mem)
{ {
int i, r; int i, r;
/* Called with kvm->slots_lock held. */ /* Called with kvm->slots_lock held. */
BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
return -EINVAL;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m = *mem; struct kvm_userspace_memory_region m;
m.slot |= i << 16; m.slot = id | (i << 16);
m.flags = 0;
m.guest_phys_addr = gpa;
m.memory_size = size;
r = __kvm_set_memory_region(kvm, &m); r = __kvm_set_memory_region(kvm, &m);
if (r < 0) if (r < 0)
return r; return r;
...@@ -7495,13 +7498,12 @@ int __x86_set_memory_region(struct kvm *kvm, ...@@ -7495,13 +7498,12 @@ int __x86_set_memory_region(struct kvm *kvm,
} }
EXPORT_SYMBOL_GPL(__x86_set_memory_region); EXPORT_SYMBOL_GPL(__x86_set_memory_region);
int x86_set_memory_region(struct kvm *kvm, int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
const struct kvm_userspace_memory_region *mem)
{ {
int r; int r;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = __x86_set_memory_region(kvm, mem); r = __x86_set_memory_region(kvm, id, gpa, size);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
...@@ -7516,16 +7518,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -7516,16 +7518,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
* unless the the memory map has changed due to process exit * unless the the memory map has changed due to process exit
* or fd copying. * or fd copying.
*/ */
struct kvm_userspace_memory_region mem; x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
memset(&mem, 0, sizeof(mem)); x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
x86_set_memory_region(kvm, &mem);
mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
x86_set_memory_region(kvm, &mem);
mem.slot = TSS_PRIVATE_MEMSLOT;
x86_set_memory_region(kvm, &mem);
} }
kvm_iommu_unmap_guest(kvm); kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic); kfree(kvm->arch.vpic);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册