提交 3d95c7d2 编写于 作者: D David Hildenbrand 提交者: Christian Borntraeger

KVM: s390: sigp: move target cpu checks into dispatcher

All sigp orders targeting one VCPU have to verify that the target is valid and
available.

Let's move the check from the single functions to the dispatcher. The
destination VCPU is directly passed as a pointer - instead of the cpu address of
the target.

Please note that all SIGP orders except SIGP SET ARCHITECTURE - even unknown
ones - will now check for the availability of the target VCPU. This is what the
architecture documentation specifies.
Signed-off-by: NDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Reviewed-by: NCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: NChristian Borntraeger <borntraeger@de.ibm.com>
上级 3526a66b
...@@ -20,20 +20,13 @@ ...@@ -20,20 +20,13 @@
#include "kvm-s390.h" #include "kvm-s390.h"
#include "trace.h" #include "trace.h"
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg) u64 *reg)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int cpuflags; int cpuflags;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags); cpuflags = atomic_read(li->cpuflags);
...@@ -48,44 +41,36 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -48,44 +41,36 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED; rc = SIGP_CC_STATUS_STORED;
} }
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
rc);
return rc; return rc;
} }
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{ {
struct kvm_s390_interrupt s390int = { struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_EMERGENCY, .type = KVM_S390_INT_EMERGENCY,
.parm = vcpu->vcpu_id, .parm = vcpu->vcpu_id,
}; };
struct kvm_vcpu *dst_vcpu = NULL;
int rc = 0; int rc = 0;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
if (!rc) if (!rc)
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu,
u16 asn, u64 *reg) u16 asn, u64 *reg)
{ {
struct kvm_vcpu *dst_vcpu = NULL;
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
u16 p_asn, s_asn; u16 p_asn, s_asn;
psw_t *psw; psw_t *psw;
u32 flags; u32 flags;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
psw = &dst_vcpu->arch.sie_block->gpsw; psw = &dst_vcpu->arch.sie_block->gpsw;
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
...@@ -96,7 +81,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -96,7 +81,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
|| (psw->mask & psw_int_mask) != psw_int_mask || (psw->mask & psw_int_mask) != psw_int_mask
|| ((flags & CPUSTAT_WAIT) && psw->addr != 0) || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
|| (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
return __sigp_emergency(vcpu, cpu_addr); return __sigp_emergency(vcpu, dst_vcpu);
} else { } else {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -104,23 +89,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -104,23 +89,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
} }
} }
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __sigp_external_call(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu)
{ {
struct kvm_s390_interrupt s390int = { struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_EXTERNAL_CALL, .type = KVM_S390_INT_EXTERNAL_CALL,
.parm = vcpu->vcpu_id, .parm = vcpu->vcpu_id,
}; };
struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
if (!rc) if (!rc)
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
...@@ -160,21 +141,13 @@ static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) ...@@ -160,21 +141,13 @@ static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
return rc; return rc;
} }
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
int action)
{ {
struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
rc = __inject_sigp_stop(dst_vcpu, action); rc = __inject_sigp_stop(dst_vcpu, action);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have /* If the CPU has already been stopped, we still have
...@@ -212,18 +185,13 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -212,18 +185,13 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
return rc; return rc;
} }
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg) u32 address, u64 *reg)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; int rc;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
/* /*
...@@ -260,24 +228,20 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -260,24 +228,20 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
kvm_s390_vcpu_wakeup(dst_vcpu); kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
address);
out_li: out_li:
spin_unlock(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
u32 addr, u64 *reg) struct kvm_vcpu *dst_vcpu,
u32 addr, u64 *reg)
{ {
struct kvm_vcpu *dst_vcpu = NULL;
int flags; int flags;
int rc; int rc;
if (cpu_id < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&dst_vcpu->arch.local_int.lock); spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock(&dst_vcpu->arch.local_int.lock); spin_unlock(&dst_vcpu->arch.local_int.lock);
...@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, ...@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
return rc; return rc;
} }
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_sense_running(struct kvm_vcpu *vcpu,
u64 *reg) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
/* running */ /* running */
...@@ -321,26 +278,18 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -321,26 +278,18 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED; rc = SIGP_CC_STATUS_STORED;
} }
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
rc); dst_vcpu->vcpu_id, rc);
return rc; return rc;
} }
/* Test whether the destination CPU is available and not busy */ /* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) static int sigp_check_callable(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED; int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
...@@ -353,53 +302,61 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, ...@@ -353,53 +302,61 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
u16 cpu_addr, u32 parameter, u64 *status_reg) u16 cpu_addr, u32 parameter, u64 *status_reg)
{ {
int rc; int rc;
struct kvm_vcpu *dst_vcpu;
if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL;
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
switch (order_code) { switch (order_code) {
case SIGP_SENSE: case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++; vcpu->stat.instruction_sigp_sense++;
rc = __sigp_sense(vcpu, cpu_addr, status_reg); rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
break; break;
case SIGP_EXTERNAL_CALL: case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++; vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, cpu_addr); rc = __sigp_external_call(vcpu, dst_vcpu);
break; break;
case SIGP_EMERGENCY_SIGNAL: case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++; vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, cpu_addr); rc = __sigp_emergency(vcpu, dst_vcpu);
break; break;
case SIGP_STOP: case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++; vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STOP_ON_STOP);
break; break;
case SIGP_STOP_AND_STORE_STATUS: case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop++; vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STORE_ON_STOP |
ACTION_STOP_ON_STOP); ACTION_STOP_ON_STOP);
break; break;
case SIGP_STORE_STATUS_AT_ADDRESS: case SIGP_STORE_STATUS_AT_ADDRESS:
rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
status_reg); status_reg);
break; break;
case SIGP_SET_PREFIX: case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++; vcpu->stat.instruction_sigp_prefix++;
rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, status_reg); rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
break; break;
case SIGP_COND_EMERGENCY_SIGNAL: case SIGP_COND_EMERGENCY_SIGNAL:
rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
status_reg); status_reg);
break; break;
case SIGP_SENSE_RUNNING: case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++; vcpu->stat.instruction_sigp_sense_running++;
rc = __sigp_sense_running(vcpu, cpu_addr, status_reg); rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
break; break;
case SIGP_START: case SIGP_START:
rc = sigp_check_callable(vcpu, cpu_addr); rc = sigp_check_callable(vcpu, dst_vcpu);
if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
rc = -EOPNOTSUPP; /* Handle START in user space */ rc = -EOPNOTSUPP; /* Handle START in user space */
break; break;
case SIGP_RESTART: case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++; vcpu->stat.instruction_sigp_restart++;
rc = sigp_check_callable(vcpu, cpu_addr); rc = sigp_check_callable(vcpu, dst_vcpu);
if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
VCPU_EVENT(vcpu, 4, VCPU_EVENT(vcpu, 4,
"sigp restart %x to handle userspace", "sigp restart %x to handle userspace",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册