priv.c 15.8 KB
Newer Older
1
/*
2
 * handling privileged instructions
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright IBM Corp. 2008
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19 20 21 22
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
23 24
#include <asm/ptrace.h>
#include <asm/compat.h>
25 26
#include "gaccess.h"
#include "kvm-s390.h"
27
#include "trace.h"
28 29 30 31 32 33 34 35 36

static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

37
	operand2 = kvm_s390_get_base_disp_s(vcpu);
38 39

	/* must be word boundary */
40 41
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
42 43

	/* get the value */
44 45
	if (get_guest(vcpu, address, (u32 __user *) operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 47 48 49 50

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
51 52
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53

54
	kvm_s390_set_prefix(vcpu, address);
55 56

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
57
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
58 59 60 61 62 63 64 65 66
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
67 68

	operand2 = kvm_s390_get_base_disp_s(vcpu);
69 70

	/* must be word boundary */
71 72
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
73 74 75 76 77

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
78 79
	if (put_guest(vcpu, address, (u32 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
80 81

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
82
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
83 84 85 86 87 88 89 90
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;

	vcpu->stat.instruction_stap++;
91 92

	useraddr = kvm_s390_get_base_disp_s(vcpu);
93

94 95
	if (useraddr & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
96

97 98
	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
99

100
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
101
	trace_kvm_s390_handle_stap(vcpu, useraddr);
102 103 104 105 106 107
	return 0;
}

static int handle_skey(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_storage_key++;
108 109
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
110 111 112 113
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

114
static int handle_tpi(struct kvm_vcpu *vcpu)
115
{
116
	struct kvm_s390_interrupt_info *inti;
117
	u64 addr;
118 119 120
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
121 122
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
123
	cc = 0;
124
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
125 126 127 128 129 130 131 132
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
133 134 135
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
136 137 138 139 140
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
141 142 143 144
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
145
	}
146
	kfree(inti);
147
no_interrupt:
148
	/* Set condition code and we're done. */
149
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
150
	vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
151 152 153
	return 0;
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
		 * Set condition code 3 to stop the guest from issueing channel
		 * I/O instructions.
		 */
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
		return 0;
	}
}

208 209
static int handle_stfl(struct kvm_vcpu *vcpu)
{
210
	unsigned int facility_list;
211 212 213
	int rc;

	vcpu->stat.instruction_stfl++;
214
	/* only pass the facility bits, which we can handle */
215
	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
216 217 218

	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			   &facility_list, sizeof(facility_list));
219
	if (rc)
220 221 222
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
	trace_kvm_s390_handle_stfl(vcpu, facility_list);
223 224 225
	return 0;
}

226 227 228 229 230 231 232 233 234
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
235
#define PSW_ADDR_24 0x0000000000ffffffUL
236 237
#define PSW_ADDR_31 0x000000007fffffffUL

238 239 240 241 242 243 244 245 246 247 248 249 250 251
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

252 253
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
254
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
255
	psw_compat_t new_psw;
256
	u64 addr;
257

258
	if (gpsw->mask & PSW_MASK_PSTATE)
259 260 261
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
	addr = kvm_s390_get_base_disp_s(vcpu);
262 263 264 265 266 267
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
268 269 270 271
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
272
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
273 274 275 276 277 278 279
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
280
	u64 addr;
281 282

	addr = kvm_s390_get_base_disp_s(vcpu);
283 284 285 286
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
287 288
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
289
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
290 291 292 293
	handle_new_psw(vcpu);
	return 0;
}

294 295 296 297 298
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
299 300

	operand2 = kvm_s390_get_base_disp_s(vcpu);
301

302 303
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
304

305 306
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
307 308 309 310 311 312 313

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
314
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
315 316 317
	int cpus = 0;
	int n;

318
	spin_lock(&fi->lock);
319 320 321
	for (n = 0; n < KVM_MAX_VCPUS; n++)
		if (fi->local_int[n])
			cpus++;
322
	spin_unlock(&fi->lock);
323 324

	/* deal with other level 3 hypervisors */
325
	if (stsi(mem, 3, 2, 2))
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
345 346 347
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
348
	unsigned long mem = 0;
349
	u64 operand2;
350
	int rc = 0;
351 352 353 354

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

355
	operand2 = kvm_s390_get_base_disp_s(vcpu);
356 357 358 359 360 361

	if (operand2 & 0xfff && fc > 0)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
	case 0:
362
		vcpu->run->s.regs.gprs[0] = 3 << 28;
363 364 365 366 367 368
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		return 0;
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
369
			goto out_no_data;
370
		if (stsi((void *) mem, fc, sel1, sel2))
371
			goto out_no_data;
372 373 374
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
375
			goto out_no_data;
376 377
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
378
			goto out_no_data;
379 380 381
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	default:
382
		goto out_no_data;
383 384 385
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
386
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
387
		goto out_exception;
388
	}
389
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
390 391
	free_page(mem);
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
392
	vcpu->run->s.regs.gprs[0] = 0;
393
	return 0;
394
out_no_data:
395 396
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
397 398
out_exception:
	free_page(mem);
399
	return rc;
400 401
}

402
static const intercept_handler_t b2_handlers[256] = {
403 404 405 406 407 408 409
	[0x02] = handle_stidp,
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
426 427
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
428
	[0xb2] = handle_lpswe,
429 430
};

431
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
432 433 434
{
	intercept_handler_t handler;

435 436
	/*
	 * a lot of B2 instructions are priviledged. We first check for
L
Lucas De Marchi 已提交
437
	 * the privileged ones, that we can handle in the kernel. If the
438 439 440 441
	 * kernel can handle this instruction, we check for the problem
	 * state bit and (a) handle the instruction or (b) send a code 2
	 * program check.
	 * Anything else goes to userspace.*/
442
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
443 444 445 446 447 448 449
	if (handler) {
		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
450
	return -EOPNOTSUPP;
451
}
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

	reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
	reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;

	/* This basically extracts the mask half of the psw. */
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
		vcpu->run->s.regs.gprs[reg2] |=
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
	}
	return 0;
}

static const intercept_handler_t b9_handlers[256] = {
	[0x8d] = handle_epsw,
473
	[0x9c] = handle_io_inst,
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler) {
		if ((handler != handle_epsw) &&
		    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
	return -EOPNOTSUPP;
}

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
static const intercept_handler_t eb_handlers[256] = {
	[0x8a] = handle_io_inst,
};

int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* All eb instructions that end up here are privileged. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

511 512
static int handle_tprot(struct kvm_vcpu *vcpu)
{
513
	u64 address1, address2;
514
	struct vm_area_struct *vma;
515
	unsigned long user_address;
516 517 518

	vcpu->stat.instruction_tprot++;

519 520
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

521 522 523 524 525 526 527 528 529 530
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
531 532 533
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
534
	vma = find_vma(current->mm, user_address);
535 536
	if (!vma)
		goto out_inject;
537 538 539 540 541 542 543 544
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
545 546 547 548

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
549 550 551 552 553 554 555 556 557 558
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

577
static const intercept_handler_t x01_handlers[256] = {
578 579 580 581 582 583 584 585 586 587 588 589
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}