sigp.c 13.1 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26 27 28
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
	int cpuflags;
29 30 31
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
32
		return SIGP_CC_NOT_OPERATIONAL;
33

34 35 36 37 38 39 40
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;

	cpuflags = atomic_read(li->cpuflags);
	if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
41 42
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
43
		*reg &= 0xffffffff00000000UL;
44
		if (cpuflags & CPUSTAT_ECALL_PEND)
45
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
46
		if (cpuflags & CPUSTAT_STOPPED)
47
			*reg |= SIGP_STATUS_STOPPED;
48
		rc = SIGP_CC_STATUS_STORED;
49 50 51 52 53 54 55 56
	}

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
57 58
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59
	struct kvm_vcpu *dst_vcpu = NULL;
60

61 62 63
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
64
		return SIGP_CC_NOT_OPERATIONAL;
65 66 67 68 69 70

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
71
	inti->emerg.code = vcpu->vcpu_id;
72

73
	li = &dst_vcpu->arch.local_int;
74 75 76 77
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
78 79
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
80
	spin_unlock_bh(&li->lock);
81
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82 83

	return SIGP_CC_ORDER_CODE_ACCEPTED;
84 85
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
					u16 asn, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

	/* Deliver the emergency signal? */
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
		return __sigp_emergency(vcpu, cpu_addr);
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

117 118 119 120
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
121
	struct kvm_vcpu *dst_vcpu = NULL;
122

123 124 125
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
126
		return SIGP_CC_NOT_OPERATIONAL;
127 128 129 130 131 132 133 134

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

135
	li = &dst_vcpu->arch.local_int;
136 137 138 139
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
140 141
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
142 143
	spin_unlock_bh(&li->lock);
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
144 145

	return SIGP_CC_ORDER_CODE_ACCEPTED;
146 147
}

148
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
149
{
150
	struct kvm_s390_interrupt_info *inti;
151
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
152

153
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
154 155 156 157 158
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
159 160
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
161 162
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
163
		goto out;
164
	}
165 166 167
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168
	li->action_bits |= action;
169 170
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
171
out:
172
	spin_unlock_bh(&li->lock);
173

174
	return rc;
175 176 177 178 179
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_local_interrupt *li;
180
	struct kvm_vcpu *dst_vcpu = NULL;
181 182 183
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
184
		return SIGP_CC_NOT_OPERATIONAL;
185

186 187 188 189
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
190 191 192

	rc = __inject_sigp_stop(li, action);

193
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
194 195 196 197 198 199 200 201 202

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

203 204 205 206 207 208
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;
209 210
	unsigned int i;
	struct kvm_vcpu *v;
211 212 213

	switch (parameter & 0xff) {
	case 0:
214
		rc = SIGP_CC_NOT_OPERATIONAL;
215 216 217
		break;
	case 1:
	case 2:
218 219 220 221 222
		kvm_for_each_vcpu(i, v, vcpu->kvm) {
			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
			kvm_clear_async_pf_completion_queue(v);
		}

223
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
224 225
		break;
	default:
226
		rc = -EOPNOTSUPP;
227 228 229 230 231
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
232
			     u64 *reg)
233
{
234 235
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
236
	struct kvm_s390_interrupt_info *inti;
237 238
	int rc;

239 240 241 242 243 244
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;

245 246 247 248 249 250 251
	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	address &= 0x7fffe000u;
	if (kvm_is_error_gpa(vcpu->kvm, address)) {
252
		*reg &= 0xffffffff00000000UL;
253
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
254
		return SIGP_CC_STATUS_STORED;
255 256 257 258
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
259
		return SIGP_CC_BUSY;
260 261 262

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
263
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
264 265
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
266
		rc = SIGP_CC_STATUS_STORED;
267 268 269 270 271 272 273 274 275
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
276 277
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
278
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
279 280 281 282 283 284 285

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
	return rc;
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

317
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
318
				u64 *reg)
319
{
320 321
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
322 323 324
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
325
		return SIGP_CC_NOT_OPERATIONAL;
326

327 328 329 330 331 332 333 334 335 336 337 338
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
		/* running */
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	} else {
		/* not running */
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_NOT_RUNNING;
		rc = SIGP_CC_STATUS_STORED;
339 340 341 342 343 344 345 346
	}

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

347 348
/* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
349 350
{
	struct kvm_s390_local_interrupt *li;
351
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
352
	struct kvm_vcpu *dst_vcpu = NULL;
353 354

	if (cpu_addr >= KVM_MAX_VCPUS)
355
		return SIGP_CC_NOT_OPERATIONAL;
356

357 358 359 360
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
361 362
	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
363
		rc = SIGP_CC_BUSY;
364
	spin_unlock_bh(&li->lock);
365

366 367 368
	return rc;
}

369 370 371 372 373
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
374
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
375 376 377
	u8 order_code;
	int rc;

378 379
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
380
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
381

382
	order_code = kvm_s390_get_base_disp_rs(vcpu);
383 384

	if (r1 % 2)
385
		parameter = vcpu->run->s.regs.gprs[r1];
386
	else
387
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
388

389
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
390 391 392 393
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
394
				  &vcpu->run->s.regs.gprs[r1]);
395
		break;
396 397 398 399
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
400
	case SIGP_EMERGENCY_SIGNAL:
401 402 403 404 405
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
406
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
407
		break;
408
	case SIGP_STOP_AND_STORE_STATUS:
409
		vcpu->stat.instruction_sigp_stop++;
410 411
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
412
		break;
413 414 415 416
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
417
	case SIGP_SET_ARCHITECTURE:
418 419 420 421 422 423
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
424
				       &vcpu->run->s.regs.gprs[r1]);
425
		break;
426 427 428 429
	case SIGP_COND_EMERGENCY_SIGNAL:
		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
						  &vcpu->run->s.regs.gprs[r1]);
		break;
430 431 432
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
433
					  &vcpu->run->s.regs.gprs[r1]);
434
		break;
435 436 437 438 439
	case SIGP_START:
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
			rc = -EOPNOTSUPP;    /* Handle START in user space */
		break;
440 441
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
442 443 444 445 446 447 448 449 450
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
			VCPU_EVENT(vcpu, 4,
				   "sigp restart %x to handle userspace",
				   cpu_addr);
			/* user space must know about restart */
			rc = -EOPNOTSUPP;
		}
		break;
451
	default:
452
		return -EOPNOTSUPP;
453 454 455 456 457
	}

	if (rc < 0)
		return rc;

458
	kvm_s390_set_psw_cc(vcpu, rc);
459 460
	return 0;
}
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

/*
 * Handle SIGP partial execution interception.
 *
 * This interception will occur at the source cpu when a source cpu sends an
 * external call to a target cpu and the target cpu has the WAIT bit set in
 * its cpuflags. Interception will occurr after the interrupt indicator bits at
 * the target cpu have been set. All error cases will lead to instruction
 * interception, therefore nothing is to be checked or prepared.
 */
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
{
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
	struct kvm_vcpu *dest_vcpu;
	u8 order_code = kvm_s390_get_base_disp_rs(vcpu);

	trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);

	if (order_code == SIGP_EXTERNAL_CALL) {
		dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		BUG_ON(dest_vcpu == NULL);

		spin_lock_bh(&dest_vcpu->arch.local_int.lock);
		if (waitqueue_active(&dest_vcpu->wq))
			wake_up_interruptible(&dest_vcpu->wq);
		dest_vcpu->preempted = true;
		spin_unlock_bh(&dest_vcpu->arch.local_int.lock);

		kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
		return 0;
	}

	return -EOPNOTSUPP;
}