sigp.c 12.5 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 28 29
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
30
		return SIGP_CC_NOT_OPERATIONAL;
31

32
	spin_lock(&fi->lock);
33
	if (fi->local_int[cpu_addr] == NULL)
34
		rc = SIGP_CC_NOT_OPERATIONAL;
35
	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 37 38
		   & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
39
		*reg &= 0xffffffff00000000UL;
40 41 42 43 44 45
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_ECALL_PEND)
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_STOPPED)
			*reg |= SIGP_STATUS_STOPPED;
46
		rc = SIGP_CC_STATUS_STORED;
47
	}
48
	spin_unlock(&fi->lock);
49 50 51 52 53 54 55

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
56 57 58
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59 60 61
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
62
		return SIGP_CC_NOT_OPERATIONAL;
63 64 65 66 67 68

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
69
	inti->emerg.code = vcpu->vcpu_id;
70

71
	spin_lock(&fi->lock);
72 73
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
74
		rc = SIGP_CC_NOT_OPERATIONAL;
75 76 77 78 79 80 81
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 83
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
84
	spin_unlock_bh(&li->lock);
85
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 87 88 89 90 91
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
					u16 asn, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

	/* Deliver the emergency signal? */
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
		return __sigp_emergency(vcpu, cpu_addr);
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

123 124 125 126 127 128 129 130
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
131
		return SIGP_CC_NOT_OPERATIONAL;
132 133 134 135 136 137 138 139 140 141 142

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
143
		rc = SIGP_CC_NOT_OPERATIONAL;
144 145 146 147 148 149 150
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151 152
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
153
	spin_unlock_bh(&li->lock);
154
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156
unlock:
157
	spin_unlock(&fi->lock);
158 159 160
	return rc;
}

161
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
162
{
163
	struct kvm_s390_interrupt_info *inti;
164
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
165

166
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167 168 169 170 171
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
172 173
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
174 175
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
176
		goto out;
177
	}
178 179 180
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181
	li->action_bits |= action;
182 183
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
184
out:
185
	spin_unlock_bh(&li->lock);
186

187
	return rc;
188 189 190 191 192 193 194 195 196
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
197
		return SIGP_CC_NOT_OPERATIONAL;
198 199 200 201

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
202
		rc = SIGP_CC_NOT_OPERATIONAL;
203 204 205 206 207
		goto unlock;
	}

	rc = __inject_sigp_stop(li, action);

208
unlock:
209
	spin_unlock(&fi->lock);
210
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 212 213 214 215 216 217 218 219 220

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

221 222 223 224 225 226
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;
227 228
	unsigned int i;
	struct kvm_vcpu *v;
229 230 231

	switch (parameter & 0xff) {
	case 0:
232
		rc = SIGP_CC_NOT_OPERATIONAL;
233 234 235
		break;
	case 1:
	case 2:
236 237 238 239 240
		kvm_for_each_vcpu(i, v, vcpu->kvm) {
			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
			kvm_clear_async_pf_completion_queue(v);
		}

241
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
242 243
		break;
	default:
244
		rc = -EOPNOTSUPP;
245 246 247 248 249
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250
			     u64 *reg)
251
{
252
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
R
Roel Kluin 已提交
253
	struct kvm_s390_local_interrupt *li = NULL;
254
	struct kvm_s390_interrupt_info *inti;
255 256 257 258 259
	int rc;
	u8 tmp;

	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
260 261
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
262
		*reg &= 0xffffffff00000000UL;
263
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
264
		return SIGP_CC_STATUS_STORED;
265 266 267 268
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
269
		return SIGP_CC_BUSY;
270

271
	spin_lock(&fi->lock);
R
Roel Kluin 已提交
272 273
	if (cpu_addr < KVM_MAX_VCPUS)
		li = fi->local_int[cpu_addr];
274

R
Roel Kluin 已提交
275
	if (li == NULL) {
276 277
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
278
		rc = SIGP_CC_STATUS_STORED;
279 280 281 282 283 284
		kfree(inti);
		goto out_fi;
	}

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
285
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
286 287
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
288
		rc = SIGP_CC_STATUS_STORED;
289 290 291 292 293 294 295 296 297
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
298 299
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
300
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
301 302 303 304 305

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
out_fi:
306
	spin_unlock(&fi->lock);
307 308 309
	return rc;
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

341
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
342
				u64 *reg)
343 344 345 346 347
{
	int rc;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	if (cpu_addr >= KVM_MAX_VCPUS)
348
		return SIGP_CC_NOT_OPERATIONAL;
349 350 351

	spin_lock(&fi->lock);
	if (fi->local_int[cpu_addr] == NULL)
352
		rc = SIGP_CC_NOT_OPERATIONAL;
353 354 355 356
	else {
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_RUNNING) {
			/* running */
357
			rc = SIGP_CC_ORDER_CODE_ACCEPTED;
358 359 360
		} else {
			/* not running */
			*reg &= 0xffffffff00000000UL;
361
			*reg |= SIGP_STATUS_NOT_RUNNING;
362
			rc = SIGP_CC_STATUS_STORED;
363 364 365 366 367 368 369 370 371 372
		}
	}
	spin_unlock(&fi->lock);

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

373 374
/* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
375 376 377
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
378
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
379 380

	if (cpu_addr >= KVM_MAX_VCPUS)
381
		return SIGP_CC_NOT_OPERATIONAL;
382 383 384 385

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
386
		rc = SIGP_CC_NOT_OPERATIONAL;
387 388 389 390 391
		goto out;
	}

	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
392
		rc = SIGP_CC_BUSY;
393 394 395 396 397 398
	spin_unlock_bh(&li->lock);
out:
	spin_unlock(&fi->lock);
	return rc;
}

399 400 401 402 403
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
404
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
405 406 407
	u8 order_code;
	int rc;

408 409
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
410
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
411

412
	order_code = kvm_s390_get_base_disp_rs(vcpu);
413 414

	if (r1 % 2)
415
		parameter = vcpu->run->s.regs.gprs[r1];
416
	else
417
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
418

419
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
420 421 422 423
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
424
				  &vcpu->run->s.regs.gprs[r1]);
425
		break;
426 427 428 429
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
430
	case SIGP_EMERGENCY_SIGNAL:
431 432 433 434 435
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
436
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
437
		break;
438
	case SIGP_STOP_AND_STORE_STATUS:
439
		vcpu->stat.instruction_sigp_stop++;
440 441
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
442
		break;
443 444 445 446
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
447
	case SIGP_SET_ARCHITECTURE:
448 449 450 451 452 453
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
454
				       &vcpu->run->s.regs.gprs[r1]);
455
		break;
456 457 458 459
	case SIGP_COND_EMERGENCY_SIGNAL:
		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
						  &vcpu->run->s.regs.gprs[r1]);
		break;
460 461 462
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
463
					  &vcpu->run->s.regs.gprs[r1]);
464
		break;
465 466 467 468 469
	case SIGP_START:
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
			rc = -EOPNOTSUPP;    /* Handle START in user space */
		break;
470 471
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
472 473 474 475 476 477 478 479 480
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
			VCPU_EVENT(vcpu, 4,
				   "sigp restart %x to handle userspace",
				   cpu_addr);
			/* user space must know about restart */
			rc = -EOPNOTSUPP;
		}
		break;
481
	default:
482
		return -EOPNOTSUPP;
483 484 485 486 487
	}

	if (rc < 0)
		return rc;

488
	kvm_s390_set_psw_cc(vcpu, rc);
489 490
	return 0;
}