提交 7c8a83b7 编写于 作者: M Marcelo Tosatti 提交者: Avi Kivity

KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock

kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.

Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 310b5d30
...@@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) ...@@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
spin_lock(&kvm->mmu_lock);
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
int i; int i;
u64 *pt; u64 *pt;
...@@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) ...@@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
pt[i] &= ~PT_WRITABLE_MASK; pt[i] &= ~PT_WRITABLE_MASK;
} }
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
} }
void kvm_mmu_zap_all(struct kvm *kvm) void kvm_mmu_zap_all(struct kvm *kvm)
......
...@@ -1625,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, ...@@ -1625,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return -EINVAL; return -EINVAL;
down_write(&kvm->slots_lock); down_write(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
up_write(&kvm->slots_lock); up_write(&kvm->slots_lock);
return 0; return 0;
} }
...@@ -1804,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1804,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot]; memslot = &kvm->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
...@@ -4548,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -4548,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
} }
} }
spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) { if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
} }
kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册