priv.c 15.8 KB
Newer Older
1
/*
2
 * handling privileged instructions
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright IBM Corp. 2008
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19 20 21 22
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
23 24
#include <asm/ptrace.h>
#include <asm/compat.h>
25 26
#include "gaccess.h"
#include "kvm-s390.h"
27
#include "trace.h"
28 29 30 31 32 33 34 35 36

static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

37
	operand2 = kvm_s390_get_base_disp_s(vcpu);
38 39

	/* must be word boundary */
40 41
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
42 43

	/* get the value */
44 45
	if (get_guest(vcpu, address, (u32 __user *) operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 47 48 49 50

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
51 52
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53

54
	kvm_s390_set_prefix(vcpu, address);
55 56

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
57
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
58 59 60 61 62 63 64 65 66
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
67 68

	operand2 = kvm_s390_get_base_disp_s(vcpu);
69 70

	/* must be word boundary */
71 72
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
73 74 75 76 77

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
78 79
	if (put_guest(vcpu, address, (u32 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
80 81

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
82
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
83 84 85 86 87 88 89 90
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;

	vcpu->stat.instruction_stap++;
91 92

	useraddr = kvm_s390_get_base_disp_s(vcpu);
93

94 95
	if (useraddr & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
96

97 98
	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
99

100
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
101
	trace_kvm_s390_handle_stap(vcpu, useraddr);
102 103 104 105 106 107 108 109 110 111 112
	return 0;
}

static int handle_skey(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_storage_key++;
	vcpu->arch.sie_block->gpsw.addr -= 4;
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

113
static int handle_tpi(struct kvm_vcpu *vcpu)
114
{
115
	struct kvm_s390_interrupt_info *inti;
116
	u64 addr;
117 118 119
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
120 121
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
122
	cc = 0;
123
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
124 125 126 127 128 129 130 131
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
132 133 134
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
135 136 137 138 139
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
140 141 142 143
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
144
	}
145
	kfree(inti);
146
no_interrupt:
147
	/* Set condition code and we're done. */
148
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
149
	vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
150 151 152
	return 0;
}

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
		 * Set condition code 3 to stop the guest from issueing channel
		 * I/O instructions.
		 */
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
		return 0;
	}
}

207 208
static int handle_stfl(struct kvm_vcpu *vcpu)
{
209
	unsigned int facility_list;
210 211 212
	int rc;

	vcpu->stat.instruction_stfl++;
213
	/* only pass the facility bits, which we can handle */
214
	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
215 216 217

	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			   &facility_list, sizeof(facility_list));
218
	if (rc)
219 220 221
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
	trace_kvm_s390_handle_stfl(vcpu, facility_list);
222 223 224
	return 0;
}

225 226 227 228 229 230 231 232 233
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
234
#define PSW_ADDR_24 0x0000000000ffffffUL
235 236
#define PSW_ADDR_31 0x000000007fffffffUL

237 238 239 240 241 242 243 244 245 246 247 248 249 250
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

251 252
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
253
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
254
	psw_compat_t new_psw;
255
	u64 addr;
256

257
	if (gpsw->mask & PSW_MASK_PSTATE)
258 259 260
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
	addr = kvm_s390_get_base_disp_s(vcpu);
261 262 263 264 265 266
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
267 268 269 270
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
271
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
272 273 274 275 276 277 278
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
279
	u64 addr;
280 281

	addr = kvm_s390_get_base_disp_s(vcpu);
282 283 284 285
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
286 287
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
288
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
289 290 291 292
	handle_new_psw(vcpu);
	return 0;
}

293 294 295 296 297
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
298 299

	operand2 = kvm_s390_get_base_disp_s(vcpu);
300

301 302
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
303

304 305
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
306 307 308 309 310 311 312

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
313
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
314 315 316
	int cpus = 0;
	int n;

317
	spin_lock(&fi->lock);
318 319 320
	for (n = 0; n < KVM_MAX_VCPUS; n++)
		if (fi->local_int[n])
			cpus++;
321
	spin_unlock(&fi->lock);
322 323

	/* deal with other level 3 hypervisors */
324
	if (stsi(mem, 3, 2, 2))
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
344 345 346
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
347
	unsigned long mem = 0;
348
	u64 operand2;
349
	int rc = 0;
350 351 352 353

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

354
	operand2 = kvm_s390_get_base_disp_s(vcpu);
355 356 357 358 359 360

	if (operand2 & 0xfff && fc > 0)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
	case 0:
361
		vcpu->run->s.regs.gprs[0] = 3 << 28;
362 363 364 365 366 367
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		return 0;
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
368
			goto out_no_data;
369
		if (stsi((void *) mem, fc, sel1, sel2))
370
			goto out_no_data;
371 372 373
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
374
			goto out_no_data;
375 376
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
377
			goto out_no_data;
378 379 380
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	default:
381
		goto out_no_data;
382 383 384
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
385
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
386
		goto out_exception;
387
	}
388
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
389 390
	free_page(mem);
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
391
	vcpu->run->s.regs.gprs[0] = 0;
392
	return 0;
393
out_no_data:
394 395
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
396 397
out_exception:
	free_page(mem);
398
	return rc;
399 400
}

401
static const intercept_handler_t b2_handlers[256] = {
402 403 404 405 406 407 408
	[0x02] = handle_stidp,
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
425 426
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
427
	[0xb2] = handle_lpswe,
428 429
};

430
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
431 432 433
{
	intercept_handler_t handler;

434 435
	/*
	 * a lot of B2 instructions are priviledged. We first check for
L
Lucas De Marchi 已提交
436
	 * the privileged ones, that we can handle in the kernel. If the
437 438 439 440
	 * kernel can handle this instruction, we check for the problem
	 * state bit and (a) handle the instruction or (b) send a code 2
	 * program check.
	 * Anything else goes to userspace.*/
441
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
442 443 444 445 446 447 448
	if (handler) {
		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
449
	return -EOPNOTSUPP;
450
}
451

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

	reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
	reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;

	/* This basically extracts the mask half of the psw. */
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
		vcpu->run->s.regs.gprs[reg2] |=
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
	}
	return 0;
}

static const intercept_handler_t b9_handlers[256] = {
	[0x8d] = handle_epsw,
472
	[0x9c] = handle_io_inst,
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler) {
		if ((handler != handle_epsw) &&
		    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
	return -EOPNOTSUPP;
}

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
static const intercept_handler_t eb_handlers[256] = {
	[0x8a] = handle_io_inst,
};

int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* All eb instructions that end up here are privileged. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

510 511
static int handle_tprot(struct kvm_vcpu *vcpu)
{
512
	u64 address1, address2;
513
	struct vm_area_struct *vma;
514
	unsigned long user_address;
515 516 517

	vcpu->stat.instruction_tprot++;

518 519
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

520 521 522 523 524 525 526 527 528 529
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
530 531 532
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
533
	vma = find_vma(current->mm, user_address);
534 535
	if (!vma)
		goto out_inject;
536 537 538 539 540 541 542 543
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
544 545 546 547

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
548 549 550 551 552 553 554 555 556 557
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

576
static const intercept_handler_t x01_handlers[256] = {
577 578 579 580 581 582 583 584 585 586 587 588
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}