提交 b455a717 编写于 作者: K Keqian Zhu 提交者: Zheng Zengkai

KVM: arm64: Remove the creation time's mapping of MMIO regions

mainline inclusion
from mainline-v5.14-rc1
commit fd6f17ba
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5R1MW
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fd6f17bade21

---------------------------------------------------------------------

The MMIO regions may be unmapped for many reasons and can be remapped
by stage2 fault path. Map MMIO regions at creation time becomes a
minor optimization and makes these two mapping path hard to sync.

Remove the mapping code while keep the useful sanity check.
Signed-off-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Signed-off-by: NHeng Zhang <zhangheng191@h-partners.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Link: https://lore.kernel.org/r/20210507110322.23348-2-zhukeqian1@huawei.comSigned-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 04f91e64
...@@ -1302,7 +1302,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1302,7 +1302,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
{ {
hva_t hva = mem->userspace_addr; hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size; hva_t reg_end = hva + mem->memory_size;
bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0; int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
...@@ -1319,8 +1318,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1319,8 +1318,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
/* /*
* A memory region could potentially cover multiple VMAs, and any holes * A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map * between them, so iterate over all of them.
* any of them right now.
* *
* +--------------------------------------------+ * +--------------------------------------------+
* +---------------+----------------+ +----------------+ * +---------------+----------------+ +----------------+
...@@ -1331,50 +1329,21 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -1331,50 +1329,21 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
*/ */
do { do {
struct vm_area_struct *vma = find_vma(current->mm, hva); struct vm_area_struct *vma = find_vma(current->mm, hva);
hva_t vm_start, vm_end;
if (!vma || vma->vm_start >= reg_end) if (!vma || vma->vm_start >= reg_end)
break; break;
/*
* Take the intersection of this VMA with the memory region
*/
vm_start = max(hva, vma->vm_start);
vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) { if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */ /* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL; ret = -EINVAL;
goto out;
}
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable);
if (ret)
break; break;
}
} }
hva = vm_end; hva = min(reg_end, vma->vm_end);
} while (hva < reg_end); } while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册