提交 ff5dc149 编写于 作者: D David Hildenbrand 提交者: Christian Borntraeger

KVM: s390: fix delivery of vector regs during machine checks

Vector registers are only to be stored if the facility is available
and if the guest has set up the machine check extended save area.

If anything goes wrong while writing the vector registers, the vector
registers are to be marked as invalid. Please note that we are allowed
to write the registers although they are marked as invalid.

Machine checks and "store status" SIGP orders are two different concepts,
let's correctly separate these. As the SIGP part is completely handled in
user space, we can drop it.

This patch is based on a patch from Cornelia Huck.
Reviewed-by: NCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: NDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: NChristian Borntraeger <borntraeger@de.ibm.com>
上级 0319dae6
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/isc.h> #include <asm/isc.h>
#include <asm/gmap.h> #include <asm/gmap.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/nmi.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
#include "trace-s390.h" #include "trace-s390.h"
...@@ -406,8 +407,10 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -406,8 +407,10 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
{ {
unsigned long ext_sa_addr; unsigned long ext_sa_addr;
freg_t fprs[NUM_FPRS]; freg_t fprs[NUM_FPRS];
union mci mci;
int rc; int rc;
mci.val = mchk->mcic;
/* take care of lazy register loading via vcpu load/put */ /* take care of lazy register loading via vcpu load/put */
save_fpu_regs(); save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
...@@ -415,7 +418,15 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -415,7 +418,15 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
/* Extended save area */ /* Extended save area */
rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr, rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
sizeof(unsigned long)); sizeof(unsigned long));
rc |= kvm_s390_vcpu_store_adtl_status(vcpu, ext_sa_addr); /* Only bits 0-53 are used for address formation */
ext_sa_addr &= ~0x3ffUL;
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
512))
mci.vr = 0;
} else {
mci.vr = 0;
}
/* General interruption information */ /* General interruption information */
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
...@@ -423,7 +434,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -423,7 +434,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t)); &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t)); &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, mchk->mcic, (u64 __user *) __LC_MCCK_CODE); rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
/* Register-save areas */ /* Register-save areas */
if (MACHINE_HAS_VX) { if (MACHINE_HAS_VX) {
......
...@@ -2837,38 +2837,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -2837,38 +2837,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
} }
/*
* store additional status at address
*/
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long gpa)
{
/* Only bits 0-53 are used for address formation */
if (!(gpa & ~0x3ff))
return 0;
return write_guest_abs(vcpu, gpa & ~0x3ff,
(void *)&vcpu->run->s.regs.vrs, 512);
}
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!test_kvm_facility(vcpu->kvm, 129))
return 0;
/*
* The guest VXRS are in the host VXRs due to the lazy
* copying in vcpu load/put. We can simply call save_fpu_regs()
* to save the current register state because we are in the
* middle of a load/put cycle.
*
* Let's update our copies before we save it into the save area.
*/
save_fpu_regs();
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{ {
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
......
...@@ -273,10 +273,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu); ...@@ -273,10 +273,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu);
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册