提交 7b6195a9 编写于 作者: T Takuya Yoshikawa 提交者: Marcelo Tosatti

KVM: set_memory_region: Refactor prepare_memory_region()

This patch drops the parameter old, a copy of the old memory slot, and
adds a new parameter named change to know the change being requested.

This not only cleans up the code but also removes extra copying of the
memory slot structure.
Signed-off-by: NTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 74d0727c
...@@ -230,8 +230,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -230,8 +230,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem) enum kvm_mr_change change)
{ {
return 0; return 0;
} }
......
...@@ -1560,8 +1560,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) ...@@ -1560,8 +1560,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem) enum kvm_mr_change change)
{ {
unsigned long i; unsigned long i;
unsigned long pfn; unsigned long pfn;
......
...@@ -412,8 +412,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) ...@@ -412,8 +412,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem) enum kvm_mr_change change)
{ {
return kvmppc_core_prepare_memory_region(kvm, memslot, mem); return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
} }
......
...@@ -974,8 +974,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) ...@@ -974,8 +974,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
/* Section: memory related */ /* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem) enum kvm_mr_change change)
{ {
/* A few sanity checks. We can have exactly one memory slot which has /* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a to start at guest virtual zero and which has to be located at a
......
...@@ -6906,23 +6906,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) ...@@ -6906,23 +6906,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem) enum kvm_mr_change change)
{ {
int npages = memslot->npages;
/* /*
* Only private memory slots need to be mapped here since * Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported. * KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/ */
if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) { if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
unsigned long userspace_addr; unsigned long userspace_addr;
/* /*
* MAP_SHARED to prevent internal slot pages from being moved * MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW. * by fork()/COW.
*/ */
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE, userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0); MAP_SHARED | MAP_ANONYMOUS, 0);
......
...@@ -479,8 +479,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free, ...@@ -479,8 +479,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem,
struct kvm_userspace_memory_region *mem); enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old); struct kvm_memory_slot old);
......
...@@ -856,7 +856,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -856,7 +856,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
slots = old_memslots; slots = old_memslots;
} }
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem); r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
if (r) if (r)
goto out_slots; goto out_slots;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册