sigp.c 10.9 KB
Newer Older
1
/*
2
 * handling interprocessor communication
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18
#include <asm/sigp.h>
19 20
#include "gaccess.h"
#include "kvm-s390.h"
21
#include "trace.h"
22

23
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24
			u64 *reg)
25
{
26
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 28 29
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
30
		return SIGP_CC_NOT_OPERATIONAL;
31

32
	spin_lock(&fi->lock);
33
	if (fi->local_int[cpu_addr] == NULL)
34
		rc = SIGP_CC_NOT_OPERATIONAL;
35
	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 37 38
		   & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	else {
39
		*reg &= 0xffffffff00000000UL;
40 41 42 43 44 45
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_ECALL_PEND)
			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_STOPPED)
			*reg |= SIGP_STATUS_STOPPED;
46
		rc = SIGP_CC_STATUS_STORED;
47
	}
48
	spin_unlock(&fi->lock);
49 50 51 52 53 54 55

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
56 57 58
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
59 60 61
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
62
		return SIGP_CC_NOT_OPERATIONAL;
63 64 65 66 67 68

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
69
	inti->emerg.code = vcpu->vcpu_id;
70

71
	spin_lock(&fi->lock);
72 73
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
74
		rc = SIGP_CC_NOT_OPERATIONAL;
75 76 77 78 79 80 81
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 83
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
84
	spin_unlock_bh(&li->lock);
85
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 87 88 89 90 91 92 93 94 95 96 97 98 99
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}

static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
100
		return SIGP_CC_NOT_OPERATIONAL;
101 102 103 104 105 106 107 108 109 110 111

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
112
		rc = SIGP_CC_NOT_OPERATIONAL;
113 114 115 116 117 118 119
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
120 121
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
122
	spin_unlock_bh(&li->lock);
123
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
124
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
125
unlock:
126
	spin_unlock(&fi->lock);
127 128 129
	return rc;
}

130
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
131
{
132
	struct kvm_s390_interrupt_info *inti;
133
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
134

135
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
136 137 138 139 140
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
141 142
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
		kfree(inti);
143 144
		if ((action & ACTION_STORE_ON_STOP) != 0)
			rc = -ESHUTDOWN;
145
		goto out;
146
	}
147 148 149
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
150
	li->action_bits |= action;
151 152
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
153
out:
154
	spin_unlock_bh(&li->lock);
155

156
	return rc;
157 158 159 160 161 162 163 164 165
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
166
		return SIGP_CC_NOT_OPERATIONAL;
167 168 169 170

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
171
		rc = SIGP_CC_NOT_OPERATIONAL;
172 173 174 175 176
		goto unlock;
	}

	rc = __inject_sigp_stop(li, action);

177
unlock:
178
	spin_unlock(&fi->lock);
179
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
180 181 182 183 184 185 186 187 188 189

	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
		/* If the CPU has already been stopped, we still have
		 * to save the status when doing stop-and-store. This
		 * has to be done after unlocking all spinlocks. */
		struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
		rc = kvm_s390_store_status_unloaded(dst_vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
	}

190 191 192 193 194 195 196 197 198
	return rc;
}

static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;

	switch (parameter & 0xff) {
	case 0:
199
		rc = SIGP_CC_NOT_OPERATIONAL;
200 201 202
		break;
	case 1:
	case 2:
203
		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
204 205
		break;
	default:
206
		rc = -EOPNOTSUPP;
207 208 209 210 211
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
212
			     u64 *reg)
213
{
214
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
R
Roel Kluin 已提交
215
	struct kvm_s390_local_interrupt *li = NULL;
216
	struct kvm_s390_interrupt_info *inti;
217 218 219 220 221
	int rc;
	u8 tmp;

	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
222 223
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
224
		*reg &= 0xffffffff00000000UL;
225
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
226
		return SIGP_CC_STATUS_STORED;
227 228 229 230
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
231
		return SIGP_CC_BUSY;
232

233
	spin_lock(&fi->lock);
R
Roel Kluin 已提交
234 235
	if (cpu_addr < KVM_MAX_VCPUS)
		li = fi->local_int[cpu_addr];
236

R
Roel Kluin 已提交
237
	if (li == NULL) {
238 239
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
240
		rc = SIGP_CC_STATUS_STORED;
241 242 243 244 245 246
		kfree(inti);
		goto out_fi;
	}

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
247
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
248 249
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
250
		rc = SIGP_CC_STATUS_STORED;
251 252 253 254 255 256 257 258 259
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
260 261
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
262
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
263 264 265 266 267

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
out_fi:
268
	spin_unlock(&fi->lock);
269 270 271
	return rc;
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
					u32 addr, u64 *reg)
{
	struct kvm_vcpu *dst_vcpu = NULL;
	int flags;
	int rc;

	if (cpu_id < KVM_MAX_VCPUS)
		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
	if (!dst_vcpu)
		return SIGP_CC_NOT_OPERATIONAL;

	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
	if (!(flags & CPUSTAT_STOPPED)) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INCORRECT_STATE;
		return SIGP_CC_STATUS_STORED;
	}

	addr &= 0x7ffffe00;
	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
	if (rc == -EFAULT) {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STATUS_INVALID_PARAMETER;
		rc = SIGP_CC_STATUS_STORED;
	}
	return rc;
}

303
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
304
				u64 *reg)
305 306 307 308 309
{
	int rc;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	if (cpu_addr >= KVM_MAX_VCPUS)
310
		return SIGP_CC_NOT_OPERATIONAL;
311 312 313

	spin_lock(&fi->lock);
	if (fi->local_int[cpu_addr] == NULL)
314
		rc = SIGP_CC_NOT_OPERATIONAL;
315 316 317 318
	else {
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_RUNNING) {
			/* running */
319
			rc = SIGP_CC_ORDER_CODE_ACCEPTED;
320 321 322
		} else {
			/* not running */
			*reg &= 0xffffffff00000000UL;
323
			*reg |= SIGP_STATUS_NOT_RUNNING;
324
			rc = SIGP_CC_STATUS_STORED;
325 326 327 328 329 330 331 332 333 334
		}
	}
	spin_unlock(&fi->lock);

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

335 336 337 338
static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
339
	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
340 341

	if (cpu_addr >= KVM_MAX_VCPUS)
342
		return SIGP_CC_NOT_OPERATIONAL;
343 344 345 346

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
347
		rc = SIGP_CC_NOT_OPERATIONAL;
348 349 350 351 352
		goto out;
	}

	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
353
		rc = SIGP_CC_BUSY;
354 355 356 357 358 359 360 361 362
	else
		VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
			cpu_addr);
	spin_unlock_bh(&li->lock);
out:
	spin_unlock(&fi->lock);
	return rc;
}

363 364 365 366 367
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	u32 parameter;
368
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
369 370 371
	u8 order_code;
	int rc;

372 373
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
374
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
375

376
	order_code = kvm_s390_get_base_disp_rs(vcpu);
377 378

	if (r1 % 2)
379
		parameter = vcpu->run->s.regs.gprs[r1];
380
	else
381
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
382

383
	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
384 385 386 387
	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
388
				  &vcpu->run->s.regs.gprs[r1]);
389
		break;
390 391 392 393
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
394
	case SIGP_EMERGENCY_SIGNAL:
395 396 397 398 399
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
400
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
401
		break;
402
	case SIGP_STOP_AND_STORE_STATUS:
403
		vcpu->stat.instruction_sigp_stop++;
404 405
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
						 ACTION_STOP_ON_STOP);
406
		break;
407 408 409 410
	case SIGP_STORE_STATUS_AT_ADDRESS:
		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
						 &vcpu->run->s.regs.gprs[r1]);
		break;
411
	case SIGP_SET_ARCHITECTURE:
412 413 414 415 416 417
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
418
				       &vcpu->run->s.regs.gprs[r1]);
419
		break;
420 421 422
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
423
					  &vcpu->run->s.regs.gprs[r1]);
424
		break;
425 426
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
427
		rc = __sigp_restart(vcpu, cpu_addr);
428
		if (rc == SIGP_CC_BUSY)
429
			break;
430 431
		/* user space must know about restart */
	default:
432
		return -EOPNOTSUPP;
433 434 435 436 437
	}

	if (rc < 0)
		return rc;

438
	kvm_s390_set_psw_cc(vcpu, rc);
439 440
	return 0;
}