提交 f82a8cfe 编写于 作者: A Alex Williamson 提交者: Marcelo Tosatti

KVM: struct kvm_memory_slot.user_alloc -> bool

There's no need for this to be an int, it holds a boolean.
Move to the end of the struct for alignment.
Reviewed-by: NGleb Natapov <gleb@redhat.com>
Signed-off-by: NAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 0743247f
...@@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
kvm_mem.guest_phys_addr; kvm_mem.guest_phys_addr;
kvm_userspace_mem.memory_size = kvm_mem.memory_size; kvm_userspace_mem.memory_size = kvm_mem.memory_size;
r = kvm_vm_ioctl_set_memory_region(kvm, r = kvm_vm_ioctl_set_memory_region(kvm,
&kvm_userspace_mem, 0); &kvm_userspace_mem, false);
if (r) if (r)
goto out; goto out;
break; break;
...@@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
unsigned long i; unsigned long i;
unsigned long pfn; unsigned long pfn;
...@@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc) bool user_alloc)
{ {
return; return;
} }
......
...@@ -412,7 +412,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -412,7 +412,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
return kvmppc_core_prepare_memory_region(kvm, memslot, mem); return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
} }
...@@ -420,7 +420,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -420,7 +420,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc) bool user_alloc)
{ {
kvmppc_core_commit_memory_region(kvm, mem, old); kvmppc_core_commit_memory_region(kvm, mem, old);
} }
......
...@@ -928,7 +928,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -928,7 +928,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
/* A few sanity checks. We can have exactly one memory slot which has /* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a to start at guest virtual zero and which has to be located at a
...@@ -958,7 +958,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -958,7 +958,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc) bool user_alloc)
{ {
int rc; int rc;
......
...@@ -3667,7 +3667,7 @@ static int alloc_apic_access_page(struct kvm *kvm) ...@@ -3667,7 +3667,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm_userspace_mem.flags = 0; kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
if (r) if (r)
goto out; goto out;
...@@ -3697,7 +3697,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) ...@@ -3697,7 +3697,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm_userspace_mem.guest_phys_addr = kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr; kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
if (r) if (r)
goto out; goto out;
...@@ -4251,7 +4251,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) ...@@ -4251,7 +4251,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
.flags = 0, .flags = 0,
}; };
ret = kvm_set_memory_region(kvm, &tss_mem, 0); ret = kvm_set_memory_region(kvm, &tss_mem, false);
if (ret) if (ret)
return ret; return ret;
kvm->arch.tss_addr = addr; kvm->arch.tss_addr = addr;
......
...@@ -6839,7 +6839,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -6839,7 +6839,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
int npages = memslot->npages; int npages = memslot->npages;
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
...@@ -6875,7 +6875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -6875,7 +6875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc) bool user_alloc)
{ {
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
......
...@@ -270,8 +270,8 @@ struct kvm_memory_slot { ...@@ -270,8 +270,8 @@ struct kvm_memory_slot {
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
struct kvm_arch_memory_slot arch; struct kvm_arch_memory_slot arch;
unsigned long userspace_addr; unsigned long userspace_addr;
int user_alloc;
int id; int id;
bool user_alloc;
}; };
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
...@@ -451,10 +451,10 @@ id_to_memslot(struct kvm_memslots *slots, int id) ...@@ -451,10 +451,10 @@ id_to_memslot(struct kvm_memslots *slots, int id)
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc); bool user_alloc);
int __kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc); bool user_alloc);
void kvm_arch_free_memslot(struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
...@@ -462,11 +462,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -462,11 +462,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_memory_slot old, struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc); bool user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc); bool user_alloc);
bool kvm_largepages_enabled(void); bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void); void kvm_disable_largepages(void);
/* flush all memory translations */ /* flush all memory translations */
...@@ -553,7 +553,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -553,7 +553,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct struct
kvm_userspace_memory_region *mem, kvm_userspace_memory_region *mem,
int user_alloc); bool user_alloc);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
long kvm_arch_vm_ioctl(struct file *filp, long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg); unsigned int ioctl, unsigned long arg);
......
...@@ -709,7 +709,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) ...@@ -709,7 +709,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
*/ */
int __kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
int r; int r;
gfn_t base_gfn; gfn_t base_gfn;
...@@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(__kvm_set_memory_region); ...@@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
int r; int r;
...@@ -903,7 +903,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); ...@@ -903,7 +903,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct struct
kvm_userspace_memory_region *mem, kvm_userspace_memory_region *mem,
int user_alloc) bool user_alloc)
{ {
if (mem->slot >= KVM_USER_MEM_SLOTS) if (mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL; return -EINVAL;
...@@ -2148,7 +2148,7 @@ static long kvm_vm_ioctl(struct file *filp, ...@@ -2148,7 +2148,7 @@ static long kvm_vm_ioctl(struct file *filp,
sizeof kvm_userspace_mem)) sizeof kvm_userspace_mem))
goto out; goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
break; break;
} }
case KVM_GET_DIRTY_LOG: { case KVM_GET_DIRTY_LOG: {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册