提交 46a26bf5 编写于 作者: M Marcelo Tosatti

KVM: modify memslots layout in struct kvm

Have a pointer to an allocated region inside struct kvm.

[alex: fix ppc book 3s]
Signed-off-by: NAlexander Graf <agraf@suse.de>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 2044892d
...@@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm) ...@@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm)
static void kvm_release_vm_pages(struct kvm *kvm) static void kvm_release_vm_pages(struct kvm *kvm)
{ {
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int i, j; int i, j;
unsigned long base_gfn; unsigned long base_gfn;
for (i = 0; i < kvm->nmemslots; i++) { slots = kvm->memslots;
memslot = &kvm->memslots[i]; for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn; base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) { for (j = 0; j < memslot->npages; j++) {
...@@ -1802,7 +1804,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1802,7 +1804,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT; r = -ENOENT;
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
...@@ -1840,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1840,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
......
...@@ -865,7 +865,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -865,7 +865,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
ga = memslot->base_gfn << PAGE_SHIFT; ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT); ga_end = ga + (memslot->npages << PAGE_SHIFT);
......
...@@ -807,13 +807,14 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -807,13 +807,14 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
{ {
int i, j; int i, j;
int retval = 0; int retval = 0;
struct kvm_memslots *slots = kvm->memslots;
/* /*
* If mmap_sem isn't taken, we can look the memslots with only * If mmap_sem isn't taken, we can look the memslots with only
* the mmu_lock by skipping over the slots with userspace_addr == 0. * the mmu_lock by skipping over the slots with userspace_addr == 0.
*/ */
for (i = 0; i < kvm->nmemslots; i++) { for (i = 0; i < slots->nmemslots; i++) {
struct kvm_memory_slot *memslot = &kvm->memslots[i]; struct kvm_memory_slot *memslot = &slots->memslots[i];
unsigned long start = memslot->userspace_addr; unsigned long start = memslot->userspace_addr;
unsigned long end; unsigned long end;
...@@ -3021,8 +3022,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) ...@@ -3021,8 +3022,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_mmu_pages; unsigned int nr_mmu_pages;
unsigned int nr_pages = 0; unsigned int nr_pages = 0;
for (i = 0; i < kvm->nmemslots; i++) for (i = 0; i < kvm->memslots->nmemslots; i++)
nr_pages += kvm->memslots[i].npages; nr_pages += kvm->memslots->memslots[i].npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages, nr_mmu_pages = max(nr_mmu_pages,
...@@ -3295,7 +3296,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu) ...@@ -3295,7 +3296,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
int i, j, k; int i, j, k;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i];
struct kvm_rmap_desc *d; struct kvm_rmap_desc *d;
for (j = 0; j < m->npages; ++j) { for (j = 0; j < m->npages; ++j) {
......
...@@ -1503,8 +1503,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) ...@@ -1503,8 +1503,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm) static gva_t rmode_tss_base(struct kvm *kvm)
{ {
if (!kvm->arch.tss_addr) { if (!kvm->arch.tss_addr) {
gfn_t base_gfn = kvm->memslots[0].base_gfn + gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn +
kvm->memslots[0].npages - 3; kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT; return base_gfn << PAGE_SHIFT;
} }
return kvm->arch.tss_addr; return kvm->arch.tss_addr;
......
...@@ -2427,7 +2427,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -2427,7 +2427,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
} }
...@@ -5223,7 +5223,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -5223,7 +5223,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc) int user_alloc)
{ {
int npages = mem->memory_size >> PAGE_SHIFT; int npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
/*To keep backward compatibility with older userspace, /*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case. *x86 needs to hanlde !user_alloc case.
......
...@@ -150,14 +150,18 @@ struct kvm_irq_routing_table {}; ...@@ -150,14 +150,18 @@ struct kvm_irq_routing_table {};
#endif #endif
struct kvm_memslots {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
};
struct kvm { struct kvm {
spinlock_t mmu_lock; spinlock_t mmu_lock;
spinlock_t requests_lock; spinlock_t requests_lock;
struct rw_semaphore slots_lock; struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */ struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots; struct kvm_memslots *memslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
#ifdef CONFIG_KVM_APIC_ARCHITECTURE #ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id; u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu; struct kvm_vcpu *bsp_vcpu;
...@@ -482,7 +486,7 @@ static inline void kvm_guest_exit(void) ...@@ -482,7 +486,7 @@ static inline void kvm_guest_exit(void)
static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
{ {
return slot - kvm->memslots; return slot - kvm->memslots->memslots;
} }
static inline gpa_t gfn_to_gpa(gfn_t gfn) static inline gpa_t gfn_to_gpa(gfn_t gfn)
......
...@@ -76,10 +76,13 @@ int kvm_iommu_map_pages(struct kvm *kvm, ...@@ -76,10 +76,13 @@ int kvm_iommu_map_pages(struct kvm *kvm,
static int kvm_iommu_map_memslots(struct kvm *kvm) static int kvm_iommu_map_memslots(struct kvm *kvm)
{ {
int i, r = 0; int i, r = 0;
struct kvm_memslots *slots;
for (i = 0; i < kvm->nmemslots; i++) { slots = kvm->memslots;
r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
kvm->memslots[i].npages); for (i = 0; i < slots->nmemslots; i++) {
r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
slots->memslots[i].npages);
if (r) if (r)
break; break;
} }
...@@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, ...@@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm) static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{ {
int i; int i;
struct kvm_memslots *slots;
slots = kvm->memslots;
for (i = 0; i < kvm->nmemslots; i++) { for (i = 0; i < slots->nmemslots; i++) {
kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
kvm->memslots[i].npages); slots->memslots[i].npages);
} }
return 0; return 0;
......
...@@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void) ...@@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif #endif
r = -ENOMEM;
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO); page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) { if (!page)
r = -ENOMEM;
goto out_err; goto out_err;
}
kvm->coalesced_mmio_ring = kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page); (struct kvm_coalesced_mmio_ring *)page_address(page);
#endif #endif
...@@ -416,6 +420,7 @@ static struct kvm *kvm_create_vm(void) ...@@ -416,6 +420,7 @@ static struct kvm *kvm_create_vm(void)
out_err: out_err:
hardware_disable_all(); hardware_disable_all();
out_err_nodisable: out_err_nodisable:
kfree(kvm->memslots);
kfree(kvm); kfree(kvm);
return ERR_PTR(r); return ERR_PTR(r);
} }
...@@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, ...@@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm) void kvm_free_physmem(struct kvm *kvm)
{ {
int i; int i;
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < slots->nmemslots; ++i)
kvm_free_physmem_slot(&slots->memslots[i], NULL);
for (i = 0; i < kvm->nmemslots; ++i) kfree(kvm->memslots);
kvm_free_physmem_slot(&kvm->memslots[i], NULL);
} }
static void kvm_destroy_vm(struct kvm *kvm) static void kvm_destroy_vm(struct kvm *kvm)
...@@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out; goto out;
memslot = &kvm->memslots[mem->slot]; memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT;
...@@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */ /* Check for overlaps */
r = -EEXIST; r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots[i]; struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages) if (s == memslot || !s->npages)
continue; continue;
...@@ -656,8 +664,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -656,8 +664,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow(kvm);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->nmemslots) if (mem->slot >= kvm->memslots->nmemslots)
kvm->nmemslots = mem->slot + 1; kvm->memslots->nmemslots = mem->slot + 1;
*memslot = new; *memslot = new;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT; r = -ENOENT;
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
...@@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); ...@@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{ {
int i; int i;
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < kvm->nmemslots; ++i) { for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &kvm->memslots[i]; struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages) && gfn < memslot->base_gfn + memslot->npages)
...@@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) ...@@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{ {
int i; int i;
struct kvm_memslots *slots = kvm->memslots;
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &kvm->memslots[i]; struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages) && gfn < memslot->base_gfn + memslot->npages)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册