提交 db4a29cb 编写于 作者: H Heiko Carstens 提交者: Gleb Natapov

KVM: s390: fix and enforce return code handling for irq injections

kvm_s390_inject_program_int() and friends may fail if no memory is available.
This must be reported to the calling functions, so that this gets passed
down to user space which should fix the situation.
Alternatively we end up with guest state corruption.

So fix this and enforce return value checking by adding a __must_check
annotation to all of these function prototypes.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: NCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: NCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: NGleb Natapov <gleb@redhat.com>
上级 3736b874
...@@ -45,10 +45,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) ...@@ -45,10 +45,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
do { do {
rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
(u64 __user *) useraddr); (u64 __user *) useraddr);
if (rc) { if (rc)
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break;
}
useraddr += 8; useraddr += 8;
if (reg == reg3) if (reg == reg3)
break; break;
...@@ -79,10 +77,8 @@ static int handle_lctl(struct kvm_vcpu *vcpu) ...@@ -79,10 +77,8 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
reg = reg1; reg = reg1;
do { do {
rc = get_guest(vcpu, val, (u32 __user *) useraddr); rc = get_guest(vcpu, val, (u32 __user *) useraddr);
if (rc) { if (rc)
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
break;
}
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
vcpu->arch.sie_block->gcr[reg] |= val; vcpu->arch.sie_block->gcr[reg] |= val;
useraddr += 4; useraddr += 4;
......
...@@ -633,8 +633,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -633,8 +633,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
} else { } else {
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
trace_kvm_s390_sie_fault(vcpu); trace_kvm_s390_sie_fault(vcpu);
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
rc = 0;
} }
} }
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
......
...@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); ...@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm); void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
int kvm_s390_inject_vm(struct kvm *kvm, int __must_check kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int); struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt *s390int); struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); int __must_check kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid); u64 cr6, u64 schid);
......
...@@ -36,31 +36,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) ...@@ -36,31 +36,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu); operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */ /* must be word boundary */
if (operand2 & 3) { if (operand2 & 3)
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
goto out;
}
/* get the value */ /* get the value */
if (get_guest(vcpu, address, (u32 __user *) operand2)) { if (get_guest(vcpu, address, (u32 __user *) operand2))
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out;
}
address = address & 0x7fffe000u; address = address & 0x7fffe000u;
/* make sure that the new value is valid memory */ /* make sure that the new value is valid memory */
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
(copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out;
}
kvm_s390_set_prefix(vcpu, address); kvm_s390_set_prefix(vcpu, address);
VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
trace_kvm_s390_handle_prefix(vcpu, 1, address); trace_kvm_s390_handle_prefix(vcpu, 1, address);
out:
return 0; return 0;
} }
...@@ -74,49 +67,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) ...@@ -74,49 +67,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu); operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */ /* must be word boundary */
if (operand2 & 3) { if (operand2 & 3)
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
goto out;
}
address = vcpu->arch.sie_block->prefix; address = vcpu->arch.sie_block->prefix;
address = address & 0x7fffe000u; address = address & 0x7fffe000u;
/* get the value */ /* get the value */
if (put_guest(vcpu, address, (u32 __user *)operand2)) { if (put_guest(vcpu, address, (u32 __user *)operand2))
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out;
}
VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
trace_kvm_s390_handle_prefix(vcpu, 0, address); trace_kvm_s390_handle_prefix(vcpu, 0, address);
out:
return 0; return 0;
} }
static int handle_store_cpu_address(struct kvm_vcpu *vcpu) static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{ {
u64 useraddr; u64 useraddr;
int rc;
vcpu->stat.instruction_stap++; vcpu->stat.instruction_stap++;
useraddr = kvm_s390_get_base_disp_s(vcpu); useraddr = kvm_s390_get_base_disp_s(vcpu);
if (useraddr & 1) { if (useraddr & 1)
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
goto out;
}
rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr); if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
if (rc) { return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out;
}
VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
trace_kvm_s390_handle_stap(vcpu, useraddr); trace_kvm_s390_handle_stap(vcpu, useraddr);
out:
return 0; return 0;
} }
...@@ -135,10 +116,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu) ...@@ -135,10 +116,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
int cc; int cc;
addr = kvm_s390_get_base_disp_s(vcpu); addr = kvm_s390_get_base_disp_s(vcpu);
if (addr & 3) { if (addr & 3)
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
goto out;
}
cc = 0; cc = 0;
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
if (!inti) if (!inti)
...@@ -167,7 +146,6 @@ static int handle_tpi(struct kvm_vcpu *vcpu) ...@@ -167,7 +146,6 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
/* Set condition code and we're done. */ /* Set condition code and we're done. */
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
out:
return 0; return 0;
} }
...@@ -237,12 +215,9 @@ static int handle_stfl(struct kvm_vcpu *vcpu) ...@@ -237,12 +215,9 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list)); &facility_list, sizeof(facility_list));
if (rc) if (rc)
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
else { VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
VCPU_EVENT(vcpu, 5, "store facility list value %x", trace_kvm_s390_handle_stfl(vcpu, facility_list);
facility_list);
trace_kvm_s390_handle_stfl(vcpu, facility_list);
}
return 0; return 0;
} }
...@@ -317,25 +292,18 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) ...@@ -317,25 +292,18 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
static int handle_stidp(struct kvm_vcpu *vcpu) static int handle_stidp(struct kvm_vcpu *vcpu)
{ {
u64 operand2; u64 operand2;
int rc;
vcpu->stat.instruction_stidp++; vcpu->stat.instruction_stidp++;
operand2 = kvm_s390_get_base_disp_s(vcpu); operand2 = kvm_s390_get_base_disp_s(vcpu);
if (operand2 & 7) { if (operand2 & 7)
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
goto out;
}
rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2); if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
if (rc) { return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out;
}
VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
out:
return 0; return 0;
} }
...@@ -377,6 +345,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -377,6 +345,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
u64 operand2; u64 operand2;
unsigned long mem; unsigned long mem;
int rc = 0;
vcpu->stat.instruction_stsi++; vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
...@@ -412,7 +381,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -412,7 +381,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
} }
if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
goto out_mem; goto out_mem;
} }
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
...@@ -425,7 +394,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -425,7 +394,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
out_fail: out_fail:
/* condition code 3 */ /* condition code 3 */
vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
return 0; return rc;
} }
static const intercept_handler_t b2_handlers[256] = { static const intercept_handler_t b2_handlers[256] = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册