sigp.c 11.9 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26 27 28
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
	int cpuflags;
29 30 31
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
32
		return SIGP_CC_NOT_OPERATIONAL;
33

34 35 36 37 38 39 40
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;

	cpuflags = atomic_read(li->cpuflags);
	if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
41 42
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
43
		*reg &= 0xffffffff00000000UL;
44
		if (cpuflags & CPUSTAT_ECALL_PEND)
45
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
46
		if (cpuflags & CPUSTAT_STOPPED)
47
			*reg |= SIGP_STATUS_STOPPED;
48
		rc = SIGP_CC_STATUS_STORED;
49 50 51 52 53 54 55 56
	}

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
57 58
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59
	struct kvm_vcpu *dst_vcpu = NULL;
60

61 62 63
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
64
		return SIGP_CC_NOT_OPERATIONAL;
65 66 67 68 69 70

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
71
	inti->emerg.code = vcpu->vcpu_id;
72

73
	li = &dst_vcpu->arch.local_int;
74 75 76 77
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
78 79
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
80
	spin_unlock_bh(&li->lock);
81
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82 83

	return SIGP_CC_ORDER_CODE_ACCEPTED;
84 85
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
					u16 asn, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
	u16 p_asn, s_asn;
	psw_t *psw;
	u32 flags;

	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
	psw = &dst_vcpu->arch.sie_block->gpsw;
	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */

	/* Deliver the emergency signal? */
	if (!(flags & CPUSTAT_STOPPED)
	    || (psw->mask & psw_int_mask) != psw_int_mask
	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
		return __sigp_emergency(vcpu, cpu_addr);
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}
}

117 118 119 120
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
121
	struct kvm_vcpu *dst_vcpu = NULL;
122

123 124 125
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
126
		return SIGP_CC_NOT_OPERATIONAL;
127 128 129 130 131 132 133 134

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

135
	li = &dst_vcpu->arch.local_int;
136 137 138 139
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
140 141
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
142 143
	spin_unlock_bh(&li->lock);
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
144 145

	return SIGP_CC_ORDER_CODE_ACCEPTED;
146 147
}

148
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
149
{
150
	struct kvm_s390_interrupt_info *inti;
151
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
152

153
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
154 155 156 157 158
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
159 160
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
161 162
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
163
		goto out;
164
	}
165 166 167
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168
	li->action_bits |= action;
169 170
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
171
out:
172
	spin_unlock_bh(&li->lock);
173

174
	return rc;
175 176 177 178 179
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_local_interrupt *li;
180
	struct kvm_vcpu *dst_vcpu = NULL;
181 182 183
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
184
		return SIGP_CC_NOT_OPERATIONAL;
185

186 187 188 189
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
190 191 192

	rc = __inject_sigp_stop(li, action);

193
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
194 195 196 197 198 199 200 201 202

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

203 204 205 206 207 208
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;
209 210
	unsigned int i;
	struct kvm_vcpu *v;
211 212 213

	switch (parameter & 0xff) {
	case 0:
214
		rc = SIGP_CC_NOT_OPERATIONAL;
215 216 217
		break;
	case 1:
	case 2:
218 219 220 221 222
		kvm_for_each_vcpu(i, v, vcpu->kvm) {
			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
			kvm_clear_async_pf_completion_queue(v);
		}

223
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
224 225
		break;
	default:
226
		rc = -EOPNOTSUPP;
227 228 229 230 231
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
232
			     u64 *reg)
233
{
234 235
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
236
	struct kvm_s390_interrupt_info *inti;
237 238 239
	int rc;
	u8 tmp;

240 241 242 243 244 245
	if (cpu_addr < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;

246 247
	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
248 249
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
250
		*reg &= 0xffffffff00000000UL;
251
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
252
		return SIGP_CC_STATUS_STORED;
253 254 255 256
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
257
		return SIGP_CC_BUSY;
258 259 260

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
261
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
262 263
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
264
		rc = SIGP_CC_STATUS_STORED;
265 266 267 268 269 270 271 272 273
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
274 275
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
276
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
277 278 279 280 281 282 283

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
	return rc;
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

315
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
316
				u64 *reg)
317
{
318 319
	struct kvm_s390_local_interrupt *li;
	struct kvm_vcpu *dst_vcpu = NULL;
320 321 322
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
323
		return SIGP_CC_NOT_OPERATIONAL;
324

325 326 327 328 329 330 331 332 333 334 335 336
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
		/* running */
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	} else {
		/* not running */
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_NOT_RUNNING;
		rc = SIGP_CC_STATUS_STORED;
337 338 339 340 341 342 343 344
	}

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

345 346
/* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
347 348
{
	struct kvm_s390_local_interrupt *li;
349
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350
	struct kvm_vcpu *dst_vcpu = NULL;
351 352

	if (cpu_addr >= KVM_MAX_VCPUS)
353
		return SIGP_CC_NOT_OPERATIONAL;
354

355 356 357 358
	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;
	li = &dst_vcpu->arch.local_int;
359 360
	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
361
		rc = SIGP_CC_BUSY;
362
	spin_unlock_bh(&li->lock);
363

364 365 366
	return rc;
}

367 368 369 370 371
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
372
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
373 374 375
	u8 order_code;
	int rc;

376 377
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
378
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
379

380
	order_code = kvm_s390_get_base_disp_rs(vcpu);
381 382

	if (r1 % 2)
383
		parameter = vcpu->run->s.regs.gprs[r1];
384
	else
385
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
386

387
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
388 389 390 391
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
392
				  &vcpu->run->s.regs.gprs[r1]);
393
		break;
394 395 396 397
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
398
	case SIGP_EMERGENCY_SIGNAL:
399 400 401 402 403
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
404
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
405
		break;
406
	case SIGP_STOP_AND_STORE_STATUS:
407
		vcpu->stat.instruction_sigp_stop++;
408 409
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
410
		break;
411 412 413 414
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
415
	case SIGP_SET_ARCHITECTURE:
416 417 418 419 420 421
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
422
				       &vcpu->run->s.regs.gprs[r1]);
423
		break;
424 425 426 427
	case SIGP_COND_EMERGENCY_SIGNAL:
		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
						  &vcpu->run->s.regs.gprs[r1]);
		break;
428 429 430
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
431
					  &vcpu->run->s.regs.gprs[r1]);
432
		break;
433 434 435 436 437
	case SIGP_START:
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
			rc = -EOPNOTSUPP;    /* Handle START in user space */
		break;
438 439
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
440 441 442 443 444 445 446 447 448
		rc = sigp_check_callable(vcpu, cpu_addr);
		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
			VCPU_EVENT(vcpu, 4,
				   "sigp restart %x to handle userspace",
				   cpu_addr);
			/* user space must know about restart */
			rc = -EOPNOTSUPP;
		}
		break;
449
	default:
450
		return -EOPNOTSUPP;
451 452 453 454 455
	}

	if (rc < 0)
		return rc;

456
	kvm_s390_set_psw_cc(vcpu, rc);
457 458
	return 0;
}