priv.c 16.2 KB
Newer Older
1
/*
2
 * handling privileged instructions
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright IBM Corp. 2008
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <asm/asm-offsets.h>
18 19 20 21
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
22 23
#include <asm/ptrace.h>
#include <asm/compat.h>
24 25
#include "gaccess.h"
#include "kvm-s390.h"
26
#include "trace.h"
27 28 29 30 31 32 33 34 35

static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

36
	operand2 = kvm_s390_get_base_disp_s(vcpu);
37 38 39 40 41 42 43 44

	/* must be word boundary */
	if (operand2 & 3) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	/* get the value */
45
	if (get_guest(vcpu, address, (u32 __user *) operand2)) {
46 47 48 49 50 51 52 53 54 55 56 57 58
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

59
	kvm_s390_set_prefix(vcpu, address);
60 61

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
63 64 65 66 67 68 69 70 71 72
out:
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
73 74

	operand2 = kvm_s390_get_base_disp_s(vcpu);
75 76 77 78 79 80 81 82 83 84 85

	/* must be word boundary */
	if (operand2 & 3) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
86
	if (put_guest(vcpu, address, (u32 __user *)operand2)) {
87 88 89 90 91
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
93 94 95 96 97 98 99 100 101 102
out:
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;
	int rc;

	vcpu->stat.instruction_stap++;
103 104

	useraddr = kvm_s390_get_base_disp_s(vcpu);
105 106 107 108 109 110

	if (useraddr & 1) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

111
	rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr);
112
	if (rc) {
113 114 115 116
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

117
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
118
	trace_kvm_s390_handle_stap(vcpu, useraddr);
119 120 121 122 123 124 125 126 127 128 129 130
out:
	return 0;
}

static int handle_skey(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_storage_key++;
	vcpu->arch.sie_block->gpsw.addr -= 4;
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

131
static int handle_tpi(struct kvm_vcpu *vcpu)
132
{
133
	struct kvm_s390_interrupt_info *inti;
134
	u64 addr;
135 136 137
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
138 139 140 141 142
	if (addr & 3) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}
	cc = 0;
143
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
144 145 146 147 148 149 150 151
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
152 153 154
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
155 156 157 158 159
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
160 161 162 163
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
164
	}
165
	kfree(inti);
166
no_interrupt:
167
	/* Set condition code and we're done. */
168
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
169
	vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
170
out:
171 172 173
	return 0;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
		 * Set condition code 3 to stop the guest from issueing channel
		 * I/O instructions.
		 */
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
		return 0;
	}
}

228 229
static int handle_stfl(struct kvm_vcpu *vcpu)
{
230
	unsigned int facility_list;
231 232 233
	int rc;

	vcpu->stat.instruction_stfl++;
234
	/* only pass the facility bits, which we can handle */
235
	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
236 237 238

	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			   &facility_list, sizeof(facility_list));
239
	if (rc)
240
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
241
	else {
242 243
		VCPU_EVENT(vcpu, 5, "store facility list value %x",
			   facility_list);
244 245
		trace_kvm_s390_handle_stfl(vcpu, facility_list);
	}
246 247 248
	return 0;
}

249 250 251 252 253 254 255 256 257
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
258
#define PSW_ADDR_24 0x0000000000ffffffUL
259 260 261 262 263 264 265 266 267 268 269 270 271
#define PSW_ADDR_31 0x000000007fffffffUL

int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
	u64 addr;
	psw_compat_t new_psw;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	addr = kvm_s390_get_base_disp_s(vcpu);

272 273
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
274

275 276
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
277

278 279
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
280 281 282

	vcpu->arch.sie_block->gpsw.mask =
		(new_psw.mask & ~PSW32_MASK_BASE) << 32;
283 284
	vcpu->arch.sie_block->gpsw.mask |= new_psw.addr & PSW32_ADDR_AMODE;
	vcpu->arch.sie_block->gpsw.addr = new_psw.addr & ~PSW32_ADDR_AMODE;
285 286 287 288 289

	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
290 291
	     PSW_MASK_EA))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
292 293 294 295 296 297 298 299 300 301 302 303

	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	u64 addr;
	psw_t new_psw;

	addr = kvm_s390_get_base_disp_s(vcpu);

304 305
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
306

307 308
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
309 310 311 312 313 314 315 316 317 318 319

	vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
	vcpu->arch.sie_block->gpsw.addr = new_psw.addr;

	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
	    (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
	      PSW_MASK_BA) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
320 321
	     PSW_MASK_EA))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
322 323 324 325 326

	handle_new_psw(vcpu);
	return 0;
}

327 328 329 330 331 332
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	int rc;

	vcpu->stat.instruction_stidp++;
333 334

	operand2 = kvm_s390_get_base_disp_s(vcpu);
335 336 337 338 339 340

	if (operand2 & 7) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

341
	rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2);
342
	if (rc) {
343 344 345 346 347 348 349 350 351 352 353
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
out:
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
354
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
355 356 357
	int cpus = 0;
	int n;

358
	spin_lock(&fi->lock);
359 360 361
	for (n = 0; n < KVM_MAX_VCPUS; n++)
		if (fi->local_int[n])
			cpus++;
362
	spin_unlock(&fi->lock);
363 364

	/* deal with other level 3 hypervisors */
365
	if (stsi(mem, 3, 2, 2))
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
385 386 387
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
388 389 390 391 392 393
	u64 operand2;
	unsigned long mem;

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

394
	operand2 = kvm_s390_get_base_disp_s(vcpu);
395 396 397 398 399 400

	if (operand2 & 0xfff && fc > 0)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
	case 0:
401
		vcpu->run->s.regs.gprs[0] = 3 << 28;
402 403 404 405 406 407 408
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		return 0;
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
			goto out_fail;
409
		if (stsi((void *) mem, fc, sel1, sel2))
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
			goto out_mem;
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
			goto out_fail;
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
			goto out_fail;
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	default:
		goto out_fail;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out_mem;
	}
428
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
429 430
	free_page(mem);
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
431
	vcpu->run->s.regs.gprs[0] = 0;
432 433 434 435 436 437 438 439 440
	return 0;
out_mem:
	free_page(mem);
out_fail:
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
	return 0;
}

441
static const intercept_handler_t b2_handlers[256] = {
442 443 444 445 446 447 448
	[0x02] = handle_stidp,
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
465 466
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
467
	[0xb2] = handle_lpswe,
468 469
};

470
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
471 472 473
{
	intercept_handler_t handler;

474 475
	/*
	 * a lot of B2 instructions are priviledged. We first check for
L
Lucas De Marchi 已提交
476
	 * the privileged ones, that we can handle in the kernel. If the
477 478 479 480
	 * kernel can handle this instruction, we check for the problem
	 * state bit and (a) handle the instruction or (b) send a code 2
	 * program check.
	 * Anything else goes to userspace.*/
481
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
482 483 484 485 486 487 488
	if (handler) {
		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
489
	return -EOPNOTSUPP;
490
}
491

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

	reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
	reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;

	/* This basically extracts the mask half of the psw. */
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
		vcpu->run->s.regs.gprs[reg2] |=
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
	}
	return 0;
}

static const intercept_handler_t b9_handlers[256] = {
	[0x8d] = handle_epsw,
512
	[0x9c] = handle_io_inst,
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler) {
		if ((handler != handle_epsw) &&
		    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
	return -EOPNOTSUPP;
}

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
static const intercept_handler_t eb_handlers[256] = {
	[0x8a] = handle_io_inst,
};

int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* All eb instructions that end up here are privileged. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

550 551
static int handle_tprot(struct kvm_vcpu *vcpu)
{
552
	u64 address1, address2;
553
	struct vm_area_struct *vma;
554
	unsigned long user_address;
555 556 557

	vcpu->stat.instruction_tprot++;

558 559
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

560 561 562 563 564 565 566 567 568 569
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
570 571 572
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
573
	vma = find_vma(current->mm, user_address);
574 575
	if (!vma)
		goto out_inject;
576 577 578 579 580 581 582 583
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
584 585 586 587

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
588 589 590 591 592 593 594 595 596 597
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

616
static const intercept_handler_t x01_handlers[256] = {
617 618 619 620 621 622 623 624 625 626 627 628
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}