sigp.c 10.5 KB
Newer Older
1 2 3
/*
 * sigp.c - handlinge interprocessor communication
 *
4
 * Copyright IBM Corp. 2008,2009
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
12
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13 14 15 16
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
17
#include <linux/slab.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
#include "gaccess.h"
#include "kvm-s390.h"

/* sigp order codes */
#define SIGP_SENSE             0x01
#define SIGP_EXTERNAL_CALL     0x02
#define SIGP_EMERGENCY         0x03
#define SIGP_START             0x04
#define SIGP_STOP              0x05
#define SIGP_RESTART           0x06
#define SIGP_STOP_STORE_STATUS 0x09
#define SIGP_INITIAL_CPU_RESET 0x0b
#define SIGP_CPU_RESET         0x0c
#define SIGP_SET_PREFIX        0x0d
#define SIGP_STORE_STATUS_ADDR 0x0e
#define SIGP_SET_ARCH          0x12
34
#define SIGP_SENSE_RUNNING     0x15
35 36 37

/* cpu status bits */
#define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
38
#define SIGP_STAT_NOT_RUNNING	    0x00000400UL
39 40 41 42 43 44 45 46 47 48 49
#define SIGP_STAT_INCORRECT_STATE   0x00000200UL
#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
#define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
#define SIGP_STAT_STOPPED           0x00000040UL
#define SIGP_STAT_OPERATOR_INTERV   0x00000020UL
#define SIGP_STAT_CHECK_STOP        0x00000010UL
#define SIGP_STAT_INOPERATIVE       0x00000004UL
#define SIGP_STAT_INVALID_ORDER     0x00000002UL
#define SIGP_STAT_RECEIVER_CHECK    0x00000001UL


50
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
51
			u64 *reg)
52
{
53
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
54 55 56 57 58
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

59
	spin_lock(&fi->lock);
60 61
	if (fi->local_int[cpu_addr] == NULL)
		rc = 3; /* not operational */
62 63
	else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
		  & CPUSTAT_STOPPED)) {
64 65 66 67 68 69 70
		*reg &= 0xffffffff00000000UL;
		rc = 1; /* status stored */
	} else {
		*reg &= 0xffffffff00000000UL;
		*reg |= SIGP_STAT_STOPPED;
		rc = 1; /* status stored */
	}
71
	spin_unlock(&fi->lock);
72 73 74 75 76 77 78

	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
	return rc;
}

static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
79 80 81
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
82 83 84 85 86 87 88 89 90 91
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EMERGENCY;
92
	inti->emerg.code = vcpu->vcpu_id;
93

94
	spin_lock(&fi->lock);
95 96 97 98 99 100 101 102 103 104 105 106 107 108
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = 3; /* not operational */
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	spin_unlock_bh(&li->lock);
	rc = 0; /* order accepted */
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}

static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = 3; /* not operational */
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	spin_unlock_bh(&li->lock);
	rc = 0; /* order accepted */
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
148
unlock:
149
	spin_unlock(&fi->lock);
150 151 152
	return rc;
}

153
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
154
{
155
	struct kvm_s390_interrupt_info *inti;
156

157
	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
158 159 160 161 162
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
163 164
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
		goto out;
165 166 167
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168
	li->action_bits |= action;
169 170
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
171
out:
172
	spin_unlock_bh(&li->lock);
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

	return 0; /* order accepted */
}

static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = 3; /* not operational */
		goto unlock;
	}

	rc = __inject_sigp_stop(li, action);

195
unlock:
196
	spin_unlock(&fi->lock);
197 198 199 200
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
	return rc;
}

201 202 203 204 205 206
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
{
	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
	return __inject_sigp_stop(li, action);
}

207 208 209 210 211 212 213 214 215 216 217 218 219
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
	int rc;

	switch (parameter & 0xff) {
	case 0:
		rc = 3; /* not operational */
		break;
	case 1:
	case 2:
		rc = 0; /* order accepted */
		break;
	default:
220
		rc = -EOPNOTSUPP;
221 222 223 224 225
	}
	return rc;
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
226
			     u64 *reg)
227
{
228
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
R
Roel Kluin 已提交
229
	struct kvm_s390_local_interrupt *li = NULL;
230
	struct kvm_s390_interrupt_info *inti;
231 232 233 234 235
	int rc;
	u8 tmp;

	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
236 237
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
238 239 240 241 242 243 244 245
		*reg |= SIGP_STAT_INVALID_PARAMETER;
		return 1; /* invalid parameter */
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return 2; /* busy */

246
	spin_lock(&fi->lock);
R
Roel Kluin 已提交
247 248
	if (cpu_addr < KVM_MAX_VCPUS)
		li = fi->local_int[cpu_addr];
249

R
Roel Kluin 已提交
250
	if (li == NULL) {
251 252 253 254 255 256 257 258
		rc = 1; /* incorrect state */
		*reg &= SIGP_STAT_INCORRECT_STATE;
		kfree(inti);
		goto out_fi;
	}

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
259
	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
		rc = 1; /* incorrect state */
		*reg &= SIGP_STAT_INCORRECT_STATE;
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	rc = 0; /* order accepted */

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
out_fi:
279
	spin_unlock(&fi->lock);
280 281 282
	return rc;
}

283
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
284
				u64 *reg)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
{
	int rc;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	spin_lock(&fi->lock);
	if (fi->local_int[cpu_addr] == NULL)
		rc = 3; /* not operational */
	else {
		if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
		    & CPUSTAT_RUNNING) {
			/* running */
			rc = 1;
		} else {
			/* not running */
			*reg &= 0xffffffff00000000UL;
			*reg |= SIGP_STAT_NOT_RUNNING;
			rc = 0;
		}
	}
	spin_unlock(&fi->lock);

	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
		   rc);

	return rc;
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	int rc = 0;
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = 3; /* not operational */
		goto out;
	}

	spin_lock_bh(&li->lock);
	if (li->action_bits & ACTION_STOP_ON_STOP)
		rc = 2; /* busy */
	else
		VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
			cpu_addr);
	spin_unlock_bh(&li->lock);
out:
	spin_unlock(&fi->lock);
	return rc;
}

343 344 345 346 347 348 349
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u32 parameter;
350
	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
351 352 353
	u8 order_code;
	int rc;

354 355 356 357 358
	/* sigp in userspace can exit */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

359 360
	order_code = disp2;
	if (base2)
361
		order_code += vcpu->run->s.regs.gprs[base2];
362 363

	if (r1 % 2)
364
		parameter = vcpu->run->s.regs.gprs[r1];
365
	else
366
		parameter = vcpu->run->s.regs.gprs[r1 + 1];
367 368 369 370 371

	switch (order_code) {
	case SIGP_SENSE:
		vcpu->stat.instruction_sigp_sense++;
		rc = __sigp_sense(vcpu, cpu_addr,
372
				  &vcpu->run->s.regs.gprs[r1]);
373
		break;
374 375 376 377
	case SIGP_EXTERNAL_CALL:
		vcpu->stat.instruction_sigp_external_call++;
		rc = __sigp_external_call(vcpu, cpu_addr);
		break;
378 379 380 381 382 383
	case SIGP_EMERGENCY:
		vcpu->stat.instruction_sigp_emergency++;
		rc = __sigp_emergency(vcpu, cpu_addr);
		break;
	case SIGP_STOP:
		vcpu->stat.instruction_sigp_stop++;
384
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
385 386 387
		break;
	case SIGP_STOP_STORE_STATUS:
		vcpu->stat.instruction_sigp_stop++;
388
		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
389 390 391 392 393 394 395 396
		break;
	case SIGP_SET_ARCH:
		vcpu->stat.instruction_sigp_arch++;
		rc = __sigp_set_arch(vcpu, parameter);
		break;
	case SIGP_SET_PREFIX:
		vcpu->stat.instruction_sigp_prefix++;
		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
397
				       &vcpu->run->s.regs.gprs[r1]);
398
		break;
399 400 401
	case SIGP_SENSE_RUNNING:
		vcpu->stat.instruction_sigp_sense_running++;
		rc = __sigp_sense_running(vcpu, cpu_addr,
402
					  &vcpu->run->s.regs.gprs[r1]);
403
		break;
404 405
	case SIGP_RESTART:
		vcpu->stat.instruction_sigp_restart++;
406 407 408
		rc = __sigp_restart(vcpu, cpu_addr);
		if (rc == 2) /* busy */
			break;
409 410
		/* user space must know about restart */
	default:
411
		return -EOPNOTSUPP;
412 413 414 415 416 417 418 419 420
	}

	if (rc < 0)
		return rc;

	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
	return 0;
}