提交 22945688 编写于 作者: B Bharata B Rao 提交者: Paul Mackerras

KVM: PPC: Book3S HV: Support reset of secure guest

Add support for reset of secure guest via a new ioctl KVM_PPC_SVM_OFF.
This ioctl will be issued by QEMU during reset and includes the
the following steps:

- Release all device pages of the secure guest.
- Ask UV to terminate the guest via UV_SVM_TERMINATE ucall
- Unpin the VPA pages so that they can be migrated back to secure
  side when guest becomes secure again. This is required because
  pinned pages can't be migrated.
- Reinit the partition scoped page tables

After these steps, guest is ready to issue UV_ESM call once again
to switch to secure mode.
Signed-off-by: NBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: NSukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
	[Implementation of uv_svm_terminate() and its call from
	guest shutdown path]
Signed-off-by: NRam Pai <linuxram@us.ibm.com>
	[Unpinning of VPA pages]
Signed-off-by: NPaul Mackerras <paulus@ozlabs.org>
上级 c3262257
...@@ -4149,6 +4149,24 @@ Valid values for 'action': ...@@ -4149,6 +4149,24 @@ Valid values for 'action':
#define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1 #define KVM_PMU_EVENT_DENY 1
4.121 KVM_PPC_SVM_OFF
Capability: basic
Architectures: powerpc
Type: vm ioctl
Parameters: none
Returns: 0 on successful completion,
Errors:
EINVAL: if ultravisor failed to terminate the secure guest
ENOMEM: if hypervisor failed to allocate new radix page tables for guest
This ioctl is used to turn off the secure mode of the guest or transition
the guest from secure mode to normal mode. This is invoked when the guest
is reset. This has no effect if called for a normal guest.
This ioctl issues an ultravisor call to terminate the secure guest,
unpins the VPA pages and releases all the device pages that are used to
track the secure pages by hypervisor.
5. The kvm_run structure 5. The kvm_run structure
------------------------ ------------------------
......
...@@ -322,6 +322,7 @@ struct kvmppc_ops { ...@@ -322,6 +322,7 @@ struct kvmppc_ops {
int size); int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size); int size);
int (*svm_off)(struct kvm *kvm);
}; };
extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_hv_ops;
......
...@@ -34,5 +34,6 @@ ...@@ -34,5 +34,6 @@
#define UV_UNSHARE_PAGE 0xF134 #define UV_UNSHARE_PAGE 0xF134
#define UV_UNSHARE_ALL_PAGES 0xF140 #define UV_UNSHARE_ALL_PAGES 0xF140
#define UV_PAGE_INVAL 0xF138 #define UV_PAGE_INVAL 0xF138
#define UV_SVM_TERMINATE 0xF13C
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
...@@ -77,4 +77,9 @@ static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) ...@@ -77,4 +77,9 @@ static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
} }
static inline int uv_svm_terminate(u64 lpid)
{
return ucall_norets(UV_SVM_TERMINATE, lpid);
}
#endif /* _ASM_POWERPC_ULTRAVISOR_H */ #endif /* _ASM_POWERPC_ULTRAVISOR_H */
...@@ -4983,6 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) ...@@ -4983,6 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
if (nesting_enabled(kvm)) if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
kvm->arch.process_table = 0; kvm->arch.process_table = 0;
uv_svm_terminate(kvm->arch.lpid);
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
} }
...@@ -5425,6 +5426,94 @@ static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, ...@@ -5425,6 +5426,94 @@ static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
return rc; return rc;
} }
static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
{
unpin_vpa(kvm, vpa);
vpa->gpa = 0;
vpa->pinned_addr = NULL;
vpa->dirty = false;
vpa->update_pending = 0;
}
/*
* IOCTL handler to turn off secure mode of guest
*
* - Release all device pages
* - Issue ucall to terminate the guest on the UV side
* - Unpin the VPA pages.
* - Reinit the partition scoped page tables
*/
static int kvmhv_svm_off(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int mmu_was_ready;
int srcu_idx;
int ret = 0;
int i;
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
return ret;
mutex_lock(&kvm->arch.mmu_setup_lock);
mmu_was_ready = kvm->arch.mmu_ready;
if (kvm->arch.mmu_ready) {
kvm->arch.mmu_ready = 0;
/* order mmu_ready vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.mmu_ready = 1;
ret = -EBUSY;
goto out;
}
}
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memory_slot *memslot;
struct kvm_memslots *slots = __kvm_memslots(kvm, i);
if (!slots)
continue;
kvm_for_each_memslot(memslot, slots) {
kvmppc_uvmem_drop_pages(memslot, kvm);
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
}
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
ret = uv_svm_terminate(kvm->arch.lpid);
if (ret != U_SUCCESS) {
ret = -EINVAL;
goto out;
}
/*
* When secure guest is reset, all the guest pages are sent
* to UV via UV_PAGE_IN before the non-boot vcpus get a
* chance to run and unpin their VPA pages. Unpinning of all
* VPA pages is done here explicitly so that VPA pages
* can be migrated to the secure side.
*
* This is required to for the secure SMP guest to reboot
* correctly.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
spin_lock(&vcpu->arch.vpa_update_lock);
unpin_vpa_reset(kvm, &vcpu->arch.dtl);
unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
unpin_vpa_reset(kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
}
kvmppc_setup_partition_table(kvm);
kvm->arch.secure_guest = 0;
kvm->arch.mmu_ready = mmu_was_ready;
out:
mutex_unlock(&kvm->arch.mmu_setup_lock);
return ret;
}
static struct kvmppc_ops kvm_ops_hv = { static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
...@@ -5468,6 +5557,7 @@ static struct kvmppc_ops kvm_ops_hv = { ...@@ -5468,6 +5557,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.enable_nested = kvmhv_enable_nested, .enable_nested = kvmhv_enable_nested,
.load_from_eaddr = kvmhv_load_from_eaddr, .load_from_eaddr = kvmhv_load_from_eaddr,
.store_to_eaddr = kvmhv_store_to_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr,
.svm_off = kvmhv_svm_off,
}; };
static int kvm_init_subcore_bitmap(void) static int kvm_init_subcore_bitmap(void)
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/plpar_wrappers.h> #include <asm/plpar_wrappers.h>
#endif #endif
#include <asm/ultravisor.h>
#include <asm/kvm_host.h>
#include "timing.h" #include "timing.h"
#include "irq.h" #include "irq.h"
...@@ -2413,6 +2415,16 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -2413,6 +2415,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT; r = -EFAULT;
break; break;
} }
case KVM_PPC_SVM_OFF: {
struct kvm *kvm = filp->private_data;
r = 0;
if (!kvm->arch.kvm_ops->svm_off)
goto out;
r = kvm->arch.kvm_ops->svm_off(kvm);
break;
}
default: { default: {
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
......
...@@ -1348,6 +1348,7 @@ struct kvm_s390_ucas_mapping { ...@@ -1348,6 +1348,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
/* Available with KVM_CAP_PMU_EVENT_FILTER */ /* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
/* ioctl for vm fd */ /* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册