diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 84d6dc6f938a85a513f8e3b935eca17003993faa..c0ef625b6765331a5619d5765508752688463cbf 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "kvm-s390.h" #include "gaccess.h" #include "trace-s390.h" @@ -406,8 +407,10 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, { unsigned long ext_sa_addr; freg_t fprs[NUM_FPRS]; + union mci mci; int rc; + mci.val = mchk->mcic; /* take care of lazy register loading via vcpu load/put */ save_fpu_regs(); save_access_regs(vcpu->run->s.regs.acrs); @@ -415,7 +418,15 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, /* Extended save area */ rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr, sizeof(unsigned long)); - rc |= kvm_s390_vcpu_store_adtl_status(vcpu, ext_sa_addr); + /* Only bits 0-53 are used for address formation */ + ext_sa_addr &= ~0x3ffUL; + if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { + if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, + 512)) + mci.vr = 0; + } else { + mci.vr = 0; + } /* General interruption information */ rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); @@ -423,7 +434,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, mchk->mcic, (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE); /* Register-save areas */ if (MACHINE_HAS_VX) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f142215ed30dd1dc4ae7b14d57d72732a04b82b7..dead20ae9c00e378884d330811b534bd23d54784 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2837,38 +2837,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } -/* - * store additional status at address - */ -int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, - unsigned long gpa) -{ - /* Only bits 0-53 are used for address formation */ - if (!(gpa & ~0x3ff)) - return 0; - - return write_guest_abs(vcpu, gpa & ~0x3ff, - (void *)&vcpu->run->s.regs.vrs, 512); -} - -int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) -{ - if (!test_kvm_facility(vcpu->kvm, 129)) - return 0; - - /* - * The guest VXRS are in the host VXRs due to the lazy - * copying in vcpu load/put. We can simply call save_fpu_regs() - * to save the current register state because we are in the - * middle of a load/put cycle. - * - * Let's update our copies before we save it into the save area. - */ - save_fpu_regs(); - - return kvm_s390_store_adtl_status_unloaded(vcpu, addr); -} - static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index b8432862a81715761fbe274071d0988021b61a54..c3d4f16e8e7b24d2c0c9b1f8cc9f82367d97f43c 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -273,10 +273,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu); void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); -int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, - unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); -int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);