提交 aa069a99 编写于 作者: P Paul Mackerras

KVM: PPC: Book3S HV: Add a VM capability to enable nested virtualization

With this, userspace can enable a KVM-HV guest to run nested guests
under it.

The administrator can control whether any nested guests can be run;
setting the "nested" module parameter to false prevents any guests
becoming nested hypervisors (that is, any attempt to enable the nested
capability on a guest will fail).  Guests which are already nested
hypervisors will continue to be so.
Reviewed-by: NDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: NPaul Mackerras <paulus@ozlabs.org>
上级 9d67121a
...@@ -4532,6 +4532,20 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, ...@@ -4532,6 +4532,20 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
a #GP would be raised when the guest tries to access. Currently, this a #GP would be raised when the guest tries to access. Currently, this
capability does not enable write permissions of this MSR for the guest. capability does not enable write permissions of this MSR for the guest.
7.16 KVM_CAP_PPC_NESTED_HV
Architectures: ppc
Parameters: none
Returns: 0 on success, -EINVAL when the implementation doesn't support
nested-HV virtualization.
HV-KVM on POWER9 and later systems allows for "nested-HV"
virtualization, which provides a way for a guest VM to run guests that
can run using the CPU's supervisor mode (privileged non-hypervisor
state). Enabling this capability on a VM depends on the CPU having
the necessary functionality and on the facility being enabled with a
kvm-hv module parameter.
8. Other capabilities. 8. Other capabilities.
---------------------- ----------------------
......
...@@ -325,6 +325,7 @@ struct kvmppc_ops { ...@@ -325,6 +325,7 @@ struct kvmppc_ops {
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
unsigned long flags); unsigned long flags);
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
int (*enable_nested)(struct kvm *kvm);
}; };
extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_hv_ops;
......
...@@ -122,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); ...@@ -122,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif #endif
/* If set, guests are allowed to create and control nested guests */
static bool nested = true;
module_param(nested, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
static inline bool nesting_enabled(struct kvm *kvm)
{
return kvm->arch.nested_enable && kvm_is_radix(kvm);
}
/* If set, the threads on each CPU core have to be in the same MMU mode */ /* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix; static bool no_mixing_hpt_and_radix;
...@@ -963,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -963,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SET_PARTITION_TABLE: case H_SET_PARTITION_TABLE:
ret = H_FUNCTION; ret = H_FUNCTION;
if (vcpu->kvm->arch.nested_enable) if (nesting_enabled(vcpu->kvm))
ret = kvmhv_set_partition_table(vcpu); ret = kvmhv_set_partition_table(vcpu);
break; break;
case H_ENTER_NESTED: case H_ENTER_NESTED:
ret = H_FUNCTION; ret = H_FUNCTION;
if (!vcpu->kvm->arch.nested_enable) if (!nesting_enabled(vcpu->kvm))
break; break;
ret = kvmhv_enter_nested_guest(vcpu); ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) { if (ret == H_INTERRUPT) {
...@@ -978,8 +988,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -978,8 +988,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break; break;
case H_TLB_INVALIDATE: case H_TLB_INVALIDATE:
ret = H_FUNCTION; ret = H_FUNCTION;
if (!vcpu->kvm->arch.nested_enable) if (nesting_enabled(vcpu->kvm))
break;
ret = kvmhv_do_nested_tlbie(vcpu); ret = kvmhv_do_nested_tlbie(vcpu);
break; break;
...@@ -4508,10 +4517,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -4508,10 +4517,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{ {
if (kvm->arch.nested_enable) { if (nesting_enabled(kvm))
kvm->arch.nested_enable = false;
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
}
kvmppc_free_radix(kvm); kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1, kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
...@@ -4788,7 +4795,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) ...@@ -4788,7 +4795,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
/* Perform global invalidation and return lpid to the pool */ /* Perform global invalidation and return lpid to the pool */
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (kvm->arch.nested_enable) if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
kvm->arch.process_table = 0; kvm->arch.process_table = 0;
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
...@@ -5181,6 +5188,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) ...@@ -5181,6 +5188,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
return err; return err;
} }
static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
kvm->arch.nested_enable = true;
return 0;
}
static struct kvmppc_ops kvm_ops_hv = { static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
...@@ -5220,6 +5240,7 @@ static struct kvmppc_ops kvm_ops_hv = { ...@@ -5220,6 +5240,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.configure_mmu = kvmhv_configure_mmu, .configure_mmu = kvmhv_configure_mmu,
.get_rmmu_info = kvmhv_get_rmmu_info, .get_rmmu_info = kvmhv_get_rmmu_info,
.set_smt_mode = kvmhv_set_smt_mode, .set_smt_mode = kvmhv_set_smt_mode,
.enable_nested = kvmhv_enable_nested,
}; };
static int kvm_init_subcore_bitmap(void) static int kvm_init_subcore_bitmap(void)
......
...@@ -597,6 +597,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -597,6 +597,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
cpu_has_feature(CPU_FTR_HVMODE)); cpu_has_feature(CPU_FTR_HVMODE));
break; break;
case KVM_CAP_PPC_NESTED_HV:
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
!kvmppc_hv_ops->enable_nested(NULL));
break;
#endif #endif
case KVM_CAP_SYNC_MMU: case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
...@@ -2115,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -2115,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
break; break;
} }
case KVM_CAP_PPC_NESTED_HV:
r = -EINVAL;
if (!is_kvmppc_hv_enabled(kvm) ||
!kvm->arch.kvm_ops->enable_nested)
break;
r = kvm->arch.kvm_ops->enable_nested(kvm);
break;
#endif #endif
default: default:
r = -EINVAL; r = -EINVAL;
......
...@@ -953,6 +953,7 @@ struct kvm_ppc_resize_hpt { ...@@ -953,6 +953,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#define KVM_CAP_MSR_PLATFORM_INFO 159 #define KVM_CAP_MSR_PLATFORM_INFO 159
#define KVM_CAP_PPC_NESTED_HV 160
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册