提交 604b38ac 编写于 作者: A Andrea Arcangeli 提交者: Avi Kivity

KVM: Allow browsing memslots with mmu_lock

This allows reading memslots with only the mmu_lock hold for mmu
notifiers that runs in atomic context and with mmu_lock held.
Signed-off-by: NAndrea Arcangeli <andrea@qumranet.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 a1708ce8
...@@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
*/ */
if (!user_alloc) { if (!user_alloc) {
if (npages && !old.rmap) { if (npages && !old.rmap) {
unsigned long userspace_addr;
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
memslot->userspace_addr = do_mmap(NULL, 0, userspace_addr = do_mmap(NULL, 0,
npages * PAGE_SIZE, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, MAP_SHARED | MAP_ANONYMOUS,
0); 0);
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (IS_ERR((void *)memslot->userspace_addr)) if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)memslot->userspace_addr); return PTR_ERR((void *)userspace_addr);
/* set userspace_addr atomically for kvm_hva_to_rmapp */
spin_lock(&kvm->mmu_lock);
memslot->userspace_addr = userspace_addr;
spin_unlock(&kvm->mmu_lock);
} else { } else {
if (!old.user_alloc && old.rmap) { if (!old.user_alloc && old.rmap) {
int ret; int ret;
......
...@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.rmap, 0, npages * sizeof(*new.rmap)); memset(new.rmap, 0, npages * sizeof(*new.rmap));
new.user_alloc = user_alloc; new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr; /*
* hva_to_rmmap() serialzies with the mmu_lock and to be
* safe it has to ignore memslots with !user_alloc &&
* !userspace_addr.
*/
if (user_alloc)
new.userspace_addr = mem->userspace_addr;
else
new.userspace_addr = 0;
} }
if (npages && !new.lpage_info) { if (npages && !new.lpage_info) {
int largepages = npages / KVM_PAGES_PER_HPAGE; int largepages = npages / KVM_PAGES_PER_HPAGE;
...@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
} }
#endif /* not defined CONFIG_S390 */ #endif /* not defined CONFIG_S390 */
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
if (!npages) if (!npages)
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow(kvm);
spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
*memslot = new; *memslot = new;
spin_unlock(&kvm->mmu_lock);
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
if (r) { if (r) {
spin_lock(&kvm->mmu_lock);
*memslot = old; *memslot = old;
spin_unlock(&kvm->mmu_lock);
goto out_free; goto out_free;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册