sigp.c 12.1 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 28 29
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
30
		return SIGP_CC_NOT_OPERATIONAL;
31

32
	spin_lock(&fi->lock);
33
	if (fi->local_int[cpu_addr] == NULL)
34
		rc = SIGP_CC_NOT_OPERATIONAL;
35
	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 37 38
		   & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
39
		*reg &= 0xffffffff00000000UL;
40 41 42 43 44 45
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_ECALL_PEND)
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_STOPPED)
			*reg |= SIGP_STATUS_STOPPED;
46
		rc = SIGP_CC_STATUS_STORED;
47
	}
48
	spin_unlock(&fi->lock);
49 50 51 52 53 54 55

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
56 57 58
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59 60 61
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
62
		return SIGP_CC_NOT_OPERATIONAL;
63 64 65 66 67 68

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
69
	inti->emerg.code = vcpu->vcpu_id;
70

71
	spin_lock(&fi->lock);
72 73
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
74
		rc = SIGP_CC_NOT_OPERATIONAL;
75 76 77 78 79 80 81
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 83
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
84
	spin_unlock_bh(&li->lock);
85
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 87 88 89 90 91
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
					u16 asn, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

	/* Deliver the emergency signal? */
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
		return __sigp_emergency(vcpu, cpu_addr);
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

123 124 125 126 127 128 129 130
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
131
		return SIGP_CC_NOT_OPERATIONAL;
132 133 134 135 136 137 138 139 140 141 142

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
143
		rc = SIGP_CC_NOT_OPERATIONAL;
144 145 146 147 148 149 150
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151 152
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
153
	spin_unlock_bh(&li->lock);
154
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156
unlock:
157
	spin_unlock(&fi->lock);
158 159 160
	return rc;
}

161
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
162
{
163
	struct kvm_s390_interrupt_info *inti;
164
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
165

166
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167 168 169 170 171
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
172 173
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
174 175
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
176
		goto out;
177
	}
178 179 180
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181
	li->action_bits |= action;
182 183
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
184
out:
185
	spin_unlock_bh(&li->lock);
186

187
	return rc;
188 189 190 191 192 193 194 195 196
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
197
		return SIGP_CC_NOT_OPERATIONAL;
198 199 200 201

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
202
		rc = SIGP_CC_NOT_OPERATIONAL;
203 204 205 206 207
		goto unlock;
	}

	rc = __inject_sigp_stop(li, action);

208
unlock:
209
	spin_unlock(&fi->lock);
210
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211 212 213 214 215 216 217 218 219 220

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

221 222 223 224 225 226 227 228 229
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;

	switch (parameter & 0xff) {
	case 0:
230
		rc = SIGP_CC_NOT_OPERATIONAL;
231 232 233
		break;
	case 1:
	case 2:
234
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
235 236
		break;
	default:
237
		rc = -EOPNOTSUPP;
238 239 240 241 242
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
243
			     u64 *reg)
244
{
245
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
R
Roel Kluin 已提交
246
	struct kvm_s390_local_interrupt *li = NULL;
247
	struct kvm_s390_interrupt_info *inti;
248 249 250 251 252
	int rc;
	u8 tmp;

	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
253 254
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
255
		*reg &= 0xffffffff00000000UL;
256
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
257
		return SIGP_CC_STATUS_STORED;
258 259 260 261
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
262
		return SIGP_CC_BUSY;
263

264
	spin_lock(&fi->lock);
R
Roel Kluin 已提交
265 266
	if (cpu_addr < KVM_MAX_VCPUS)
		li = fi->local_int[cpu_addr];
267

R
Roel Kluin 已提交
268
	if (li == NULL) {
269 270
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
271
		rc = SIGP_CC_STATUS_STORED;
272 273 274 275 276 277
		kfree(inti);
		goto out_fi;
	}

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
278
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
279 280
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
281
		rc = SIGP_CC_STATUS_STORED;
282 283 284 285 286 287 288 289 290
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
291 292
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
293
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
294 295 296 297 298

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
out_fi:
299
	spin_unlock(&fi->lock);
300 301 302
	return rc;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

334
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
335
				u64 *reg)
336 337 338 339 340
{
	int rc;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	if (cpu_addr >= KVM_MAX_VCPUS)
341
		return SIGP_CC_NOT_OPERATIONAL;
342 343 344

	spin_lock(&fi->lock);
	if (fi->local_int[cpu_addr] == NULL)
345
		rc = SIGP_CC_NOT_OPERATIONAL;
346 347 348 349
	else {
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_RUNNING) {
			/* running */
350
			rc = SIGP_CC_ORDER_CODE_ACCEPTED;
351 352 353
		} else {
			/* not running */
			*reg &= 0xffffffff00000000UL;
354
			*reg |= SIGP_STATUS_NOT_RUNNING;
355
			rc = SIGP_CC_STATUS_STORED;
356 357 358 359 360 361 362 363 364 365
		}
	}
	spin_unlock(&fi->lock);

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

366 367 368 369
static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
370
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
371 372

	if (cpu_addr >= KVM_MAX_VCPUS)
373
		return SIGP_CC_NOT_OPERATIONAL;
374 375 376 377

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
378
		rc = SIGP_CC_NOT_OPERATIONAL;
379 380 381 382 383
		goto out;
	}

	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
384
		rc = SIGP_CC_BUSY;
385 386 387 388 389 390 391 392 393
	else
		VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
			cpu_addr);
	spin_unlock_bh(&li->lock);
out:
	spin_unlock(&fi->lock);
	return rc;
}

394 395 396 397 398
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
399
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
400 401 402
	u8 order_code;
	int rc;

403 404
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
405
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
406

407
	order_code = kvm_s390_get_base_disp_rs(vcpu);
408 409

	if (r1 % 2)
410
		parameter = vcpu->run->s.regs.gprs[r1];
411
	else
412
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
413

414
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
415 416 417 418
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
419
				  &vcpu->run->s.regs.gprs[r1]);
420
		break;
421 422 423 424
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
425
	case SIGP_EMERGENCY_SIGNAL:
426 427 428 429 430
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
431
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
432
		break;
433
	case SIGP_STOP_AND_STORE_STATUS:
434
		vcpu->stat.instruction_sigp_stop++;
435 436
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
437
		break;
438 439 440 441
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
442
	case SIGP_SET_ARCHITECTURE:
443 444 445 446 447 448
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
449
				       &vcpu->run->s.regs.gprs[r1]);
450
		break;
451 452 453 454
	case SIGP_COND_EMERGENCY_SIGNAL:
		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
						  &vcpu->run->s.regs.gprs[r1]);
		break;
455 456 457
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
458
					  &vcpu->run->s.regs.gprs[r1]);
459
		break;
460 461
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
462
		rc = __sigp_restart(vcpu, cpu_addr);
463
		if (rc == SIGP_CC_BUSY)
464
			break;
465 466
		/* user space must know about restart */
	default:
467
		return -EOPNOTSUPP;
468 469 470 471 472
	}

	if (rc < 0)
		return rc;

473
	kvm_s390_set_psw_cc(vcpu, rc);
474 475
	return 0;
}