priv.c 20.4 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25 26
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
27 28
#include <asm/ptrace.h>
#include <asm/compat.h>
29 30
#include "gaccess.h"
#include "kvm-s390.h"
31
#include "trace.h"
32 33 34 35 36 37 38 39 40

static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

41 42 43
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

44
	operand2 = kvm_s390_get_base_disp_s(vcpu);
45 46

	/* must be word boundary */
47 48
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
49 50

	/* get the value */
51 52
	if (get_guest(vcpu, address, (u32 __user *) operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53 54 55 56 57

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
58 59
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
60

61
	kvm_s390_set_prefix(vcpu, address);
62 63

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
64
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
65 66 67 68 69 70 71 72 73
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
74

75 76 77
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

78
	operand2 = kvm_s390_get_base_disp_s(vcpu);
79 80

	/* must be word boundary */
81 82
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
83 84 85 86 87

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
88 89
	if (put_guest(vcpu, address, (u32 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90 91

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
93 94 95 96 97 98 99 100
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;

	vcpu->stat.instruction_stap++;
101

102 103 104
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

105
	useraddr = kvm_s390_get_base_disp_s(vcpu);
106

107 108
	if (useraddr & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
109

110 111
	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112

113
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
114
	trace_kvm_s390_handle_stap(vcpu, useraddr);
115 116 117 118 119 120
	return 0;
}

static int handle_skey(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_storage_key++;
121 122 123 124

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

125 126
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
127 128 129 130
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

131
static int handle_tpi(struct kvm_vcpu *vcpu)
132
{
133
	struct kvm_s390_interrupt_info *inti;
134
	u64 addr;
135 136 137
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
138 139
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140
	cc = 0;
141
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
142 143 144 145 146 147 148 149
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
150 151 152 153
		if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
		    || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
		    || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
154 155 156 157 158
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
159 160 161 162
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
163
	}
164
	kfree(inti);
165
no_interrupt:
166
	/* Set condition code and we're done. */
167
	kvm_s390_set_psw_cc(vcpu, cc);
168 169 170
	return 0;
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

203 204 205
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
		 * Set condition code 3 to stop the guest from issueing channel
		 * I/O instructions.
		 */
222
		kvm_s390_set_psw_cc(vcpu, 3);
223 224 225 226
		return 0;
	}
}

227 228 229 230 231
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;

	vcpu->stat.instruction_stfl++;
232 233 234 235

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

236
	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
237
			   vfacilities, 4);
238
	if (rc)
239
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
240 241 242
	VCPU_EVENT(vcpu, 5, "store facility list value %x",
		   *(unsigned int *) vfacilities);
	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
243 244 245
	return 0;
}

246 247 248 249 250 251 252 253 254
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
255
#define PSW_ADDR_24 0x0000000000ffffffUL
256 257
#define PSW_ADDR_31 0x000000007fffffffUL

258 259 260 261 262 263 264 265 266 267 268 269 270 271
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

272 273
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
274
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
275
	psw_compat_t new_psw;
276
	u64 addr;
277

278
	if (gpsw->mask & PSW_MASK_PSTATE)
279 280
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

281
	addr = kvm_s390_get_base_disp_s(vcpu);
282 283 284 285 286 287
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
288 289 290 291
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
292
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
293 294 295 296 297 298 299
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
300
	u64 addr;
301

302 303 304
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

305
	addr = kvm_s390_get_base_disp_s(vcpu);
306 307 308 309
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
310 311
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
312
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
313 314 315 316
	handle_new_psw(vcpu);
	return 0;
}

317 318 319 320 321
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
322

323 324 325
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

326
	operand2 = kvm_s390_get_base_disp_s(vcpu);
327

328 329
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
330

331 332
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
333 334 335 336 337 338 339

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
340
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
341 342 343
	int cpus = 0;
	int n;

344
	spin_lock(&fi->lock);
345 346 347
	for (n = 0; n < KVM_MAX_VCPUS; n++)
		if (fi->local_int[n])
			cpus++;
348
	spin_unlock(&fi->lock);
349 350

	/* deal with other level 3 hypervisors */
351
	if (stsi(mem, 3, 2, 2))
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
371 372 373
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
374
	unsigned long mem = 0;
375
	u64 operand2;
376
	int rc = 0;
377 378 379 380

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

381 382 383
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

384
	if (fc > 3) {
385
		kvm_s390_set_psw_cc(vcpu, 3);
386 387
		return 0;
	}
388

389 390
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
391 392
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

393
	if (fc == 0) {
394
		vcpu->run->s.regs.gprs[0] = 3 << 28;
395
		kvm_s390_set_psw_cc(vcpu, 0);
396
		return 0;
397 398 399 400 401 402 403 404
	}

	operand2 = kvm_s390_get_base_disp_s(vcpu);

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
405 406 407 408
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
409
			goto out_no_data;
410
		if (stsi((void *) mem, fc, sel1, sel2))
411
			goto out_no_data;
412 413 414
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
415
			goto out_no_data;
416 417
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
418
			goto out_no_data;
419 420 421 422 423
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
424
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
425
		goto out_exception;
426
	}
427
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
428
	free_page(mem);
429
	kvm_s390_set_psw_cc(vcpu, 0);
430
	vcpu->run->s.regs.gprs[0] = 0;
431
	return 0;
432
out_no_data:
433
	kvm_s390_set_psw_cc(vcpu, 3);
434 435
out_exception:
	free_page(mem);
436
	return rc;
437 438
}

439
static const intercept_handler_t b2_handlers[256] = {
440 441 442 443 444 445 446
	[0x02] = handle_stidp,
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
463 464
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
465
	[0xb2] = handle_lpswe,
466 467
};

468
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
469 470 471
{
	intercept_handler_t handler;

472
	/*
473 474 475 476
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
477
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
478 479 480
	if (handler)
		return handler(vcpu);

481
	return -EOPNOTSUPP;
482
}
483

484 485 486 487
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

488
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
489 490

	/* This basically extracts the mask half of the psw. */
491
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
492 493
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
494
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
495
		vcpu->run->s.regs.gprs[reg2] |=
496
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
497 498 499 500
	}
	return 0;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!MACHINE_HAS_PFMF)
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
524
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
525 526 527 528 529

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
530
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	/* We dont support EDAT2
	case 0x00002000:
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
		break;*/
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
	while (start < end) {
		unsigned long useraddr;

		useraddr = gmap_translate(start, vcpu->arch.gmap);
		if (IS_ERR((void *)useraddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}

578 579
static const intercept_handler_t b9_handlers[256] = {
	[0x8d] = handle_epsw,
580
	[0x9c] = handle_io_inst,
581
	[0xaf] = handle_pfmf,
582 583 584 585 586 587 588 589
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
590 591 592
	if (handler)
		return handler(vcpu);

593 594 595
	return -EOPNOTSUPP;
}

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	u32 val = 0;
	int reg, rc;

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rs(vcpu);

	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);

	reg = reg1;
	do {
		rc = get_guest(vcpu, val, (u32 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
		vcpu->arch.sie_block->gcr[reg] |= val;
		useraddr += 4;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	int reg, rc;

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rsy(vcpu);

	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	reg = reg1;

	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);

	do {
		rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
			       (u64 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		useraddr += 8;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

671
static const intercept_handler_t eb_handlers[256] = {
672
	[0x2f] = handle_lctlg,
673 674 675
	[0x8a] = handle_io_inst,
};

676
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
677 678 679 680 681 682 683 684 685
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

686 687
static int handle_tprot(struct kvm_vcpu *vcpu)
{
688
	u64 address1, address2;
689
	struct vm_area_struct *vma;
690
	unsigned long user_address;
691 692 693

	vcpu->stat.instruction_tprot++;

694 695 696
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

697 698
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

699 700 701 702 703 704 705 706 707 708
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
709 710 711
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
712
	vma = find_vma(current->mm, user_address);
713 714
	if (!vma)
		goto out_inject;
715 716 717 718 719 720 721 722
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
723 724 725 726

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
727 728 729 730 731 732 733 734 735 736
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

737 738 739 740 741
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
742
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
743 744 745 746 747 748 749 750 751 752 753

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

754
static const intercept_handler_t x01_handlers[256] = {
755 756 757 758 759 760 761 762 763 764 765 766
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}