sigp.c 12.5 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
24
			u64 *reg)
25
{
26 27
	struct kvm_s390_local_interrupt *li;
	int cpuflags;
28
	int rc;
29
	int ext_call_pending;
30

31 32 33
	li = &dst_vcpu->arch.local_int;

	cpuflags = atomic_read(li->cpuflags);
34 35
	ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
	if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
36 37
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
38
		*reg &= 0xffffffff00000000UL;
39
		if (ext_call_pending)
40
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
41
		if (cpuflags & CPUSTAT_STOPPED)
42
			*reg |= SIGP_STATUS_STOPPED;
43
		rc = SIGP_CC_STATUS_STORED;
44 45
	}

46 47
	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
		   rc);
48 49 50
	return rc;
}

51 52
static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
				    struct kvm_vcpu *dst_vcpu)
53
{
54
	struct kvm_s390_irq irq = {
55
		.type = KVM_S390_INT_EMERGENCY,
56
		.u.emerg.code = vcpu->vcpu_id,
57 58
	};
	int rc = 0;
59

60
	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
61
	if (!rc)
62 63
		VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
			   dst_vcpu->vcpu_id);
64

65
	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
66 67
}

68 69 70 71 72
static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
	return __inject_sigp_emergency(vcpu, dst_vcpu);
}

73 74
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
					struct kvm_vcpu *dst_vcpu,
75 76 77 78 79 80 81 82 83 84 85 86
					u16 asn, u64 *reg)
{
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

87
	/* Inject the emergency signal? */
88 89 90 91
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
92
		return __inject_sigp_emergency(vcpu, dst_vcpu);
93 94 95 96 97 98 99
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

100
static int __sigp_external_call(struct kvm_vcpu *vcpu,
101
				struct kvm_vcpu *dst_vcpu, u64 *reg)
102
{
103
	struct kvm_s390_irq irq = {
104
		.type = KVM_S390_INT_EXTERNAL_CALL,
105
		.u.extcall.code = vcpu->vcpu_id,
106 107
	};
	int rc;
108

109
	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
110 111 112 113 114
	if (rc == -EBUSY) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_EXT_CALL_PENDING;
		return SIGP_CC_STATUS_STORED;
	} else if (rc == 0) {
115 116
		VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
			   dst_vcpu->vcpu_id);
117
	}
118

119
	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
120 121
}

122
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
123
{
124 125 126
	struct kvm_s390_irq irq = {
		.type = KVM_S390_SIGP_STOP,
	};
127 128
	int rc;

129 130 131 132 133 134
	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
	if (rc == -EBUSY)
		rc = SIGP_CC_BUSY;
	else if (rc == 0)
		VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
			   dst_vcpu->vcpu_id);
135

136 137 138 139 140 141
	return rc;
}

static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
					struct kvm_vcpu *dst_vcpu, u64 *reg)
{
142 143 144 145
	struct kvm_s390_irq irq = {
		.type = KVM_S390_SIGP_STOP,
		.u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
	};
146 147
	int rc;

148 149 150 151 152 153
	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
	if (rc == -EBUSY)
		rc = SIGP_CC_BUSY;
	else if (rc == 0)
		VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
			   dst_vcpu->vcpu_id);
154

155 156 157 158 159 160
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;
161 162
	unsigned int i;
	struct kvm_vcpu *v;
163 164 165

	switch (parameter & 0xff) {
	case 0:
166
		rc = SIGP_CC_NOT_OPERATIONAL;
167 168 169
		break;
	case 1:
	case 2:
170 171 172 173 174
		kvm_for_each_vcpu(i, v, vcpu->kvm) {
			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
			kvm_clear_async_pf_completion_queue(v);
		}

175
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
176 177
		break;
	default:
178
		rc = -EOPNOTSUPP;
179 180 181 182
	}
	return rc;
}

183 184
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
			     u32 address, u64 *reg)
185
{
186 187 188 189
	struct kvm_s390_irq irq = {
		.type = KVM_S390_SIGP_SET_PREFIX,
		.u.prefix.address = address & 0x7fffe000u,
	};
190 191
	int rc;

192 193 194 195 196
	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
197
	if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
198
		*reg &= 0xffffffff00000000UL;
199
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
200
		return SIGP_CC_STATUS_STORED;
201 202
	}

203 204
	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
	if (rc == -EBUSY) {
205 206
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
207
		return SIGP_CC_STATUS_STORED;
208 209 210 211 212
	}

	return rc;
}

213 214 215
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
				       struct kvm_vcpu *dst_vcpu,
				       u32 addr, u64 *reg)
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
{
	int flags;
	int rc;

	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

237 238
static int __sigp_sense_running(struct kvm_vcpu *vcpu,
				struct kvm_vcpu *dst_vcpu, u64 *reg)
239
{
240
	struct kvm_s390_local_interrupt *li;
241 242
	int rc;

243 244 245 246 247 248 249 250 251
	li = &dst_vcpu->arch.local_int;
	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
		/* running */
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	} else {
		/* not running */
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_NOT_RUNNING;
		rc = SIGP_CC_STATUS_STORED;
252 253
	}

254 255
	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
		   dst_vcpu->vcpu_id, rc);
256 257 258 259

	return rc;
}

260 261
static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
				   struct kvm_vcpu *dst_vcpu, u8 order_code)
262
{
263
	struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
264 265
	/* handle (RE)START in user space */
	int rc = -EOPNOTSUPP;
266

267
	/* make sure we don't race with STOP irq injection */
268
	spin_lock(&li->lock);
269
	if (kvm_s390_is_stop_irq_pending(dst_vcpu))
270
		rc = SIGP_CC_BUSY;
271
	spin_unlock(&li->lock);
272

273 274 275
	return rc;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289
static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
				    struct kvm_vcpu *dst_vcpu, u8 order_code)
{
	/* handle (INITIAL) CPU RESET in user space */
	return -EOPNOTSUPP;
}

static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
				  struct kvm_vcpu *dst_vcpu)
{
	/* handle unknown orders in user space */
	return -EOPNOTSUPP;
}

290 291
static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
			   u16 cpu_addr, u32 parameter, u64 *status_reg)
292 293
{
	int rc;
294 295 296 297 298 299 300 301
	struct kvm_vcpu *dst_vcpu;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return SIGP_CC_NOT_OPERATIONAL;

	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
302 303 304 305

	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
306
		rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
307
		break;
308 309
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
310
		rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
311
		break;
312
	case SIGP_EMERGENCY_SIGNAL:
313
		vcpu->stat.instruction_sigp_emergency++;
314
		rc = __sigp_emergency(vcpu, dst_vcpu);
315 316 317
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
318
		rc = __sigp_stop(vcpu, dst_vcpu);
319
		break;
320
	case SIGP_STOP_AND_STORE_STATUS:
321
		vcpu->stat.instruction_sigp_stop_store_status++;
322
		rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
323
		break;
324
	case SIGP_STORE_STATUS_AT_ADDRESS:
325
		vcpu->stat.instruction_sigp_store_status++;
326
		rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
327
						 status_reg);
328 329 330
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
331
		rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
332
		break;
333
	case SIGP_COND_EMERGENCY_SIGNAL:
334
		vcpu->stat.instruction_sigp_cond_emergency++;
335
		rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
336
						  status_reg);
337
		break;
338 339
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
340
		rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
341
		break;
342
	case SIGP_START:
343
		vcpu->stat.instruction_sigp_start++;
344
		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
345
		break;
346 347
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
348 349 350
		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
		break;
	case SIGP_INITIAL_CPU_RESET:
351
		vcpu->stat.instruction_sigp_init_cpu_reset++;
352 353 354
		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
		break;
	case SIGP_CPU_RESET:
355
		vcpu->stat.instruction_sigp_cpu_reset++;
356
		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
357
		break;
358
	default:
359
		vcpu->stat.instruction_sigp_unknown++;
360
		rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
361 362
	}

363 364 365 366 367
	if (rc == -EOPNOTSUPP)
		VCPU_EVENT(vcpu, 4,
			   "sigp order %u -> cpu %x: handled in user space",
			   order_code, dst_vcpu->vcpu_id);

368 369 370
	return rc;
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
{
	if (!vcpu->kvm->arch.user_sigp)
		return 0;

	switch (order_code) {
	case SIGP_SENSE:
	case SIGP_EXTERNAL_CALL:
	case SIGP_EMERGENCY_SIGNAL:
	case SIGP_COND_EMERGENCY_SIGNAL:
	case SIGP_SENSE_RUNNING:
		return 0;
	/* update counters as we're directly dropping to user space */
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
		break;
	case SIGP_STOP_AND_STORE_STATUS:
		vcpu->stat.instruction_sigp_stop_store_status++;
		break;
	case SIGP_STORE_STATUS_AT_ADDRESS:
		vcpu->stat.instruction_sigp_store_status++;
		break;
393 394 395
	case SIGP_STORE_ADDITIONAL_STATUS:
		vcpu->stat.instruction_sigp_store_adtl_status++;
		break;
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		break;
	case SIGP_START:
		vcpu->stat.instruction_sigp_start++;
		break;
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
		break;
	case SIGP_INITIAL_CPU_RESET:
		vcpu->stat.instruction_sigp_init_cpu_reset++;
		break;
	case SIGP_CPU_RESET:
		vcpu->stat.instruction_sigp_cpu_reset++;
		break;
	default:
		vcpu->stat.instruction_sigp_unknown++;
	}

	VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
		   order_code);

	return 1;
}

421 422 423 424 425 426 427 428 429 430 431 432 433
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
	u8 order_code;
	int rc;

	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

434
	order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
435 436
	if (handle_sigp_order_in_user_space(vcpu, order_code))
		return -EOPNOTSUPP;
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452

	if (r1 % 2)
		parameter = vcpu->run->s.regs.gprs[r1];
	else
		parameter = vcpu->run->s.regs.gprs[r1 + 1];

	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
	switch (order_code) {
	case SIGP_SET_ARCHITECTURE:
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	default:
		rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
				     parameter,
				     &vcpu->run->s.regs.gprs[r1]);
453 454 455 456 457
	}

	if (rc < 0)
		return rc;

458
	kvm_s390_set_psw_cc(vcpu, rc);
459 460
	return 0;
}
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

/*
 * Handle SIGP partial execution interception.
 *
 * This interception will occur at the source cpu when a source cpu sends an
 * external call to a target cpu and the target cpu has the WAIT bit set in
 * its cpuflags. Interception will occurr after the interrupt indicator bits at
 * the target cpu have been set. All error cases will lead to instruction
 * interception, therefore nothing is to be checked or prepared.
 */
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
{
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
	struct kvm_vcpu *dest_vcpu;
476
	u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
477 478 479 480 481 482 483

	trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);

	if (order_code == SIGP_EXTERNAL_CALL) {
		dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		BUG_ON(dest_vcpu == NULL);

484
		kvm_s390_vcpu_wakeup(dest_vcpu);
485 486 487 488 489 490
		kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
		return 0;
	}

	return -EOPNOTSUPP;
}