提交 98047888 编写于 作者: C Christoffer Dall

arm/arm64: KVM: Support KVM_CAP_READONLY_MEM

When userspace loads code and data in a read-only memory regions, KVM
needs to be able to handle this on arm and arm64.  Specifically this is
used when running code directly from a read-only flash device; the
common scenario is a UEFI blob loaded with the -bios option in QEMU.

Note that the MMIO exit on writes to a read-only memory is ABI and can
be used to emulate block-erase style flash devices.
Acked-by: NMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: NChristoffer Dall <christoffer.dall@linaro.org>
上级 64d83126
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_REG_SIZE(id) \ #define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
......
...@@ -188,6 +188,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -188,6 +188,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_ARM_PSCI: case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2: case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
......
...@@ -747,14 +747,13 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) ...@@ -747,14 +747,13 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
} }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status) unsigned long fault_status)
{ {
int ret; int ret;
bool write_fault, writable, hugetlb = false, force_pte = false; bool write_fault, writable, hugetlb = false, force_pte = false;
unsigned long mmu_seq; unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT; gfn_t gfn = fault_ipa >> PAGE_SHIFT;
unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -863,7 +862,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -863,7 +862,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
unsigned long fault_status; unsigned long fault_status;
phys_addr_t fault_ipa; phys_addr_t fault_ipa;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
bool is_iabt; unsigned long hva;
bool is_iabt, write_fault, writable;
gfn_t gfn; gfn_t gfn;
int ret, idx; int ret, idx;
...@@ -884,7 +884,10 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -884,7 +884,10 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
gfn = fault_ipa >> PAGE_SHIFT; gfn = fault_ipa >> PAGE_SHIFT;
if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { memslot = gfn_to_memslot(vcpu->kvm, gfn);
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
if (is_iabt) { if (is_iabt) {
/* Prefetch Abort on I/O address */ /* Prefetch Abort on I/O address */
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
...@@ -892,13 +895,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -892,13 +895,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto out_unlock; goto out_unlock;
} }
if (fault_status != FSC_FAULT) {
kvm_err("Unsupported fault status on io memory: %#lx\n",
fault_status);
ret = -EFAULT;
goto out_unlock;
}
/* /*
* The IPA is reported as [MAX:12], so we need to * The IPA is reported as [MAX:12], so we need to
* complement it with the bottom 12 bits from the * complement it with the bottom 12 bits from the
...@@ -910,9 +906,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -910,9 +906,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto out_unlock; goto out_unlock;
} }
memslot = gfn_to_memslot(vcpu->kvm, gfn); ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
if (ret == 0) if (ret == 0)
ret = 1; ret = 1;
out_unlock: out_unlock:
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_REG_SIZE(id) \ #define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册