提交 8f060f53 编写于 作者: P Paolo Bonzini

Merge tag 'kvm-s390-next-5.1-1' of...

Merge tag 'kvm-s390-next-5.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

KVM: s390: Features for 5.1

- Clarify KVM related kernel messages
- Interrupt cleanup
- Introduction of the Guest Information Block (GIB)
- Preparation for processor subfunctions in cpu model
...@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void); ...@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void);
/* Function from drivers/s390/cio/chsc.c */ /* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size); int chsc_sstpi(void *page, void *result, size_t size);
int chsc_sgib(u32 origin);
#endif #endif
...@@ -62,6 +62,7 @@ enum interruption_class { ...@@ -62,6 +62,7 @@ enum interruption_class {
IRQIO_MSI, IRQIO_MSI,
IRQIO_VIR, IRQIO_VIR,
IRQIO_VAI, IRQIO_VAI,
IRQIO_GAL,
NMI_NMI, NMI_NMI,
CPU_RST, CPU_RST,
NR_ARCH_IRQS NR_ARCH_IRQS
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
/* Adapter interrupts. */ /* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */ #define PCI_ISC 2 /* PCI I/O subchannels */
#define GAL_ISC 5 /* GIB alert */
#define AP_ISC 6 /* adjunct processor (crypto) devices */ #define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */ /* Functions for registration of I/O interruption subclasses */
......
...@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt { ...@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal; struct kvm_s390_ext_info srv_signal;
int next_rr_cpu; int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct mutex ais_lock; struct mutex ais_lock;
u8 simm; u8 simm;
u8 nimm; u8 nimm;
...@@ -712,6 +711,7 @@ struct s390_io_adapter { ...@@ -712,6 +711,7 @@ struct s390_io_adapter {
struct kvm_s390_cpu_model { struct kvm_s390_cpu_model {
/* facility mask supported by kvm & hosting machine */ /* facility mask supported by kvm & hosting machine */
__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64]; __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
struct kvm_s390_vm_cpu_subfunc subfuncs;
/* facility list requested by guest (in dma page) */ /* facility list requested by guest (in dma page) */
__u64 *fac_list; __u64 *fac_list;
u64 cpuid; u64 cpuid;
...@@ -782,9 +782,21 @@ struct kvm_s390_gisa { ...@@ -782,9 +782,21 @@ struct kvm_s390_gisa {
u8 reserved03[11]; u8 reserved03[11];
u32 airq_count; u32 airq_count;
} g1; } g1;
struct {
u64 word[4];
} u64;
}; };
}; };
struct kvm_s390_gib {
u32 alert_list_origin;
u32 reserved01;
u8:5;
u8 nisc:3;
u8 reserved03[3];
u32 reserved04[5];
};
/* /*
* sie_page2 has to be allocated as DMA because fac_list, crycb and * sie_page2 has to be allocated as DMA because fac_list, crycb and
* gisa need 31bit addresses in the sie control block. * gisa need 31bit addresses in the sie control block.
...@@ -793,7 +805,8 @@ struct sie_page2 { ...@@ -793,7 +805,8 @@ struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */ __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */ struct kvm_s390_crypto_cb crycb; /* 0x0800 */
struct kvm_s390_gisa gisa; /* 0x0900 */ struct kvm_s390_gisa gisa; /* 0x0900 */
u8 reserved920[0x1000 - 0x920]; /* 0x0920 */ struct kvm *kvm; /* 0x0920 */
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
}; };
struct kvm_s390_vsie { struct kvm_s390_vsie {
...@@ -804,6 +817,20 @@ struct kvm_s390_vsie { ...@@ -804,6 +817,20 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS]; struct page *pages[KVM_MAX_VCPUS];
}; };
struct kvm_s390_gisa_iam {
u8 mask;
spinlock_t ref_lock;
u32 ref_count[MAX_ISC + 1];
};
struct kvm_s390_gisa_interrupt {
struct kvm_s390_gisa *origin;
struct kvm_s390_gisa_iam alert;
struct hrtimer timer;
u64 expires;
DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
};
struct kvm_arch{ struct kvm_arch{
void *sca; void *sca;
int use_esca; int use_esca;
...@@ -837,7 +864,8 @@ struct kvm_arch{ ...@@ -837,7 +864,8 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages; atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */ /* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
struct kvm_s390_gisa *gisa; DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
}; };
#define KVM_HVA_ERR_BAD (-1UL) #define KVM_HVA_ERR_BAD (-1UL)
...@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, ...@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit; extern char sie_exit;
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {} static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
......
...@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = { ...@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" }, {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
{.irq = IRQIO_GAL, .name = "GAL", .desc = "[I/O] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"}, {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
}; };
......
此差异已折叠。
...@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque) ...@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque)
/* Register floating interrupt controller interface. */ /* Register floating interrupt controller interface. */
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) { if (rc) {
pr_err("Failed to register FLIC rc=%d\n", rc); pr_err("A FLIC registration call failed with rc=%d\n", rc);
goto out_debug_unreg; goto out_debug_unreg;
} }
rc = kvm_s390_gib_init(GAL_ISC);
if (rc)
goto out_gib_destroy;
return 0; return 0;
out_gib_destroy:
kvm_s390_gib_destroy();
out_debug_unreg: out_debug_unreg:
debug_unregister(kvm_s390_dbf); debug_unregister(kvm_s390_dbf);
return rc; return rc;
...@@ -444,6 +451,7 @@ int kvm_arch_init(void *opaque) ...@@ -444,6 +451,7 @@ int kvm_arch_init(void *opaque)
void kvm_arch_exit(void) void kvm_arch_exit(void)
{ {
kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf); debug_unregister(kvm_s390_dbf);
} }
...@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, ...@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
static int kvm_s390_set_processor_subfunc(struct kvm *kvm, static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
/* mutex_lock(&kvm->lock);
* Once supported by kernel + hw, we have to store the subfunctions if (kvm->created_vcpus) {
* in kvm->arch and remember that user space configured them. mutex_unlock(&kvm->lock);
*/ return -EBUSY;
return -ENXIO; }
if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
sizeof(struct kvm_s390_vm_cpu_subfunc))) {
mutex_unlock(&kvm->lock);
return -EFAULT;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
return 0;
} }
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
...@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm, ...@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
static int kvm_s390_get_processor_subfunc(struct kvm *kvm, static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
/* if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
* Once we can actually configure subfunctions (kernel + hw support), sizeof(struct kvm_s390_vm_cpu_subfunc)))
* we have to check if they were already set by user space, if so copy return -EFAULT;
* them from kvm->arch.
*/ VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
return -ENXIO; ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
return 0;
} }
static int kvm_s390_get_machine_subfunc(struct kvm *kvm, static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
...@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm, ...@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc))) sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT; return -EFAULT;
VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.km)[0],
((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
return 0; return 0;
} }
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
int ret = -ENXIO; int ret = -ENXIO;
...@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR_FEAT: case KVM_S390_VM_CPU_PROCESSOR_FEAT:
case KVM_S390_VM_CPU_MACHINE_FEAT: case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC: case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = 0; ret = 0;
break; break;
/* configuring subfunctions is not supported yet */
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sie_page2) if (!kvm->arch.sie_page2)
goto out_err; goto out_err;
kvm->arch.sie_page2->kvm = kvm;
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) { for (i = 0; i < kvm_s390_fac_size(); i++) {
...@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm_s390_fac_base[i]; kvm_s390_fac_base[i];
} }
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
/* we are always in czam mode - even on pre z14 machines */ /* we are always in czam mode - even on pre z14 machines */
set_kvm_facility(kvm->arch.model.fac_mask, 138); set_kvm_facility(kvm->arch.model.fac_mask, 138);
...@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id; vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
if (vcpu->arch.sie_block->gd && sclp.has_gisaf) if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu->arch.sie_block->gd |= GISA_FORMAT1; vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount); seqcount_init(&vcpu->arch.cputm_seqcount);
...@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu); kvm_s390_patch_guest_per_regs(vcpu);
} }
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0; vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
...@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void) ...@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void)
int i; int i;
if (!sclp.has_sief2) { if (!sclp.has_sief2) {
pr_info("SIE not available\n"); pr_info("SIE is not available\n");
return -ENODEV; return -ENODEV;
} }
if (nested && hpage) { if (nested && hpage) {
pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently"); pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) ...@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{ {
return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)
...@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, ...@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
void kvm_s390_gisa_init(struct kvm *kvm); void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm); void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm); void kvm_s390_gisa_destroy(struct kvm *kvm);
int kvm_s390_gib_init(u8 nisc);
void kvm_s390_gib_destroy(void);
/* implemented in guestdbg.c */ /* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
......
...@@ -1382,3 +1382,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid, ...@@ -1382,3 +1382,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
return chsc_error_from_response(brinfo_area->response.code); return chsc_error_from_response(brinfo_area->response.code);
} }
EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
int chsc_sgib(u32 origin)
{
struct {
struct chsc_header request;
u16 op;
u8 reserved01[2];
u8 reserved02:4;
u8 fmt:4;
u8 reserved03[7];
/* operation data area begin */
u8 reserved04[4];
u32 gib_origin;
u8 reserved05[10];
u8 aix;
u8 reserved06[4029];
struct chsc_header response;
u8 reserved07[4];
} *sgib_area;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
sgib_area = chsc_page;
sgib_area->request.length = 0x0fe0;
sgib_area->request.code = 0x0021;
sgib_area->op = 0x1;
sgib_area->gib_origin = origin;
ret = chsc(sgib_area);
if (ret == 0)
ret = chsc_error_from_response(sgib_area->response.code);
spin_unlock_irq(&chsc_page_lock);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_sgib);
...@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp); ...@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
u64 summary_indicator_addr, u64 subchannel_indicator_addr); u64 summary_indicator_addr, u64 subchannel_indicator_addr);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response); int chsc_error_from_response(int response);
int chsc_siosl(struct subchannel_id schid); int chsc_siosl(struct subchannel_id schid);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册