sigp.c 12.4 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 28 29
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
30
		return SIGP_CC_NOT_OPERATIONAL;
31

32
	spin_lock(&fi->lock);
33
	if (fi->local_int[cpu_addr] == NULL)
34
		rc = SIGP_CC_NOT_OPERATIONAL;
35
	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 37 38
		   & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
39
		*reg &= 0xffffffff00000000UL;
40 41 42 43 44 45
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_ECALL_PEND)
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_STOPPED)
			*reg |= SIGP_STATUS_STOPPED;
46
		rc = SIGP_CC_STATUS_STORED;
47
	}
48
	spin_unlock(&fi->lock);
49 50 51 52 53 54 55

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
56 57 58
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59 60 61
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
62
		return SIGP_CC_NOT_OPERATIONAL;
63 64 65 66 67 68

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
69
	inti->emerg.code = vcpu->vcpu_id;
70

71
	spin_lock(&fi->lock);
72 73
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
74
		rc = SIGP_CC_NOT_OPERATIONAL;
75 76 77 78 79 80 81
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 83
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
84
	spin_unlock_bh(&li->lock);
85
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 87 88 89 90 91
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
					u16 asn, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

	/* Deliver the emergency signal? */
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
		return __sigp_emergency(vcpu, cpu_addr);
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

123 124 125 126 127 128 129 130
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
131
		return SIGP_CC_NOT_OPERATIONAL;
132 133 134 135 136 137 138 139 140 141 142

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
143
		rc = SIGP_CC_NOT_OPERATIONAL;
144 145 146 147 148 149 150
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151 152
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
153
	spin_unlock_bh(&li->lock);
154
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156
unlock:
157
	spin_unlock(&fi->lock);
158 159 160
	return rc;
}

161
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
162
{
163
	struct kvm_s390_interrupt_info *inti;
164
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
165

166
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167 168 169 170 171
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
172 173
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
174 175
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
176
		goto out;
177
	}
178 179 180
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181
	li->action_bits |= action;
182 183
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
184
out:
185
	spin_unlock_bh(&li->lock);
186

187
	return rc;
188 189 190 191 192 193 194 195 196
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
197
		return SIGP_CC_NOT_OPERATIONAL;
198 199 200 201

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
202
		rc = SIGP_CC_NOT_OPERATIONAL;
203 204 205 206 207
		goto unlock;
	}

	rc = __inject_sigp_stop(li, action);

208
unlock:
209
	spin_unlock(&fi->lock);
210
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 212 213 214 215 216 217 218 219 220

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

221 222 223 224 225 226
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;
227 228
	unsigned int i;
	struct kvm_vcpu *v;
229 230 231

	switch (parameter & 0xff) {
	case 0:
232
		rc = SIGP_CC_NOT_OPERATIONAL;
233 234 235
		break;
	case 1:
	case 2:
236 237 238 239 240
		kvm_for_each_vcpu(i, v, vcpu->kvm) {
			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
			kvm_clear_async_pf_completion_queue(v);
		}

241
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
242 243
		break;
	default:
244
		rc = -EOPNOTSUPP;
245 246 247 248 249
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250
			     u64 *reg)
251
{
252 253
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
254
	struct kvm_s390_interrupt_info *inti;
255 256 257
	int rc;
	u8 tmp;

258 259 260 261 262 263
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;

264 265
	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
266 267
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
268
		*reg &= 0xffffffff00000000UL;
269
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
270
		return SIGP_CC_STATUS_STORED;
271 272 273 274
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
275
		return SIGP_CC_BUSY;
276 277 278

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
279
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
280 281
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
282
		rc = SIGP_CC_STATUS_STORED;
283 284 285 286 287 288 289 290 291
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
292 293
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
294
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
295 296 297 298 299 300 301

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
	return rc;
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

333
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
334
				u64 *reg)
335 336 337 338 339
{
	int rc;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	if (cpu_addr >= KVM_MAX_VCPUS)
340
		return SIGP_CC_NOT_OPERATIONAL;
341 342 343

	spin_lock(&fi->lock);
	if (fi->local_int[cpu_addr] == NULL)
344
		rc = SIGP_CC_NOT_OPERATIONAL;
345 346 347 348
	else {
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_RUNNING) {
			/* running */
349
			rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 351 352
		} else {
			/* not running */
			*reg &= 0xffffffff00000000UL;
353
			*reg |= SIGP_STATUS_NOT_RUNNING;
354
			rc = SIGP_CC_STATUS_STORED;
355 356 357 358 359 360 361 362 363 364
		}
	}
	spin_unlock(&fi->lock);

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

365 366
/* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
367 368 369
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
370
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
371 372

	if (cpu_addr >= KVM_MAX_VCPUS)
373
		return SIGP_CC_NOT_OPERATIONAL;
374 375 376 377

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
378
		rc = SIGP_CC_NOT_OPERATIONAL;
379 380 381 382 383
		goto out;
	}

	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
384
		rc = SIGP_CC_BUSY;
385 386 387 388 389 390
	spin_unlock_bh(&li->lock);
out:
	spin_unlock(&fi->lock);
	return rc;
}

391 392 393 394 395
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
396
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
397 398 399
	u8 order_code;
	int rc;

400 401
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
402
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
403

404
	order_code = kvm_s390_get_base_disp_rs(vcpu);
405 406

	if (r1 % 2)
407
		parameter = vcpu->run->s.regs.gprs[r1];
408
	else
409
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
410

411
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
412 413 414 415
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
416
				  &vcpu->run->s.regs.gprs[r1]);
417
		break;
418 419 420 421
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
422
	case SIGP_EMERGENCY_SIGNAL:
423 424 425 426 427
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
428
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
429
		break;
430
	case SIGP_STOP_AND_STORE_STATUS:
431
		vcpu->stat.instruction_sigp_stop++;
432 433
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
434
		break;
435 436 437 438
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
439
	case SIGP_SET_ARCHITECTURE:
440 441 442 443 444 445
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
446
				       &vcpu->run->s.regs.gprs[r1]);
447
		break;
448 449 450 451
	case SIGP_COND_EMERGENCY_SIGNAL:
		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
						  &vcpu->run->s.regs.gprs[r1]);
		break;
452 453 454
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
455
					  &vcpu->run->s.regs.gprs[r1]);
456
		break;
457 458 459 460 461
	case SIGP_START:
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
			rc = -EOPNOTSUPP;    /* Handle START in user space */
		break;
462 463
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
464 465 466 467 468 469 470 471 472
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
			VCPU_EVENT(vcpu, 4,
				   "sigp restart %x to handle userspace",
				   cpu_addr);
			/* user space must know about restart */
			rc = -EOPNOTSUPP;
		}
		break;
473
	default:
474
		return -EOPNOTSUPP;
475 476 477 478 479
	}

	if (rc < 0)
		return rc;

480
	kvm_s390_set_psw_cc(vcpu, rc);
481 482
	return 0;
}