priv.c 24.1 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25 26
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
27 28
#include <asm/ptrace.h>
#include <asm/compat.h>
29 30
#include "gaccess.h"
#include "kvm-s390.h"
31
#include "trace.h"
32

33 34 35 36 37
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu *cpup;
	s64 hostclk, val;
38
	int i, rc;
39 40 41 42 43 44 45 46
	u64 op2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	op2 = kvm_s390_get_base_disp_s(vcpu);
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 48 49
	rc = read_guest(vcpu, op2, &val, sizeof(val));
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65

	if (store_tod_clock(&hostclk)) {
		kvm_s390_set_psw_cc(vcpu, 3);
		return 0;
	}
	val = (val - hostclk) & ~0x3fUL;

	mutex_lock(&vcpu->kvm->lock);
	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
		cpup->arch.sie_block->epoch = val;
	mutex_unlock(&vcpu->kvm->lock);

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

66 67 68
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
69 70
	u32 address;
	int rc;
71 72 73

	vcpu->stat.instruction_spx++;

74 75 76
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

77
	operand2 = kvm_s390_get_base_disp_s(vcpu);
78 79

	/* must be word boundary */
80 81
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82 83

	/* get the value */
84 85 86 87 88 89 90 91 92 93 94 95
	rc = read_guest(vcpu, operand2, &address, sizeof(address));
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
96
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
97

98
	kvm_s390_set_prefix(vcpu, address);
99 100

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
101
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
102 103 104 105 106 107 108
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;
109
	int rc;
110 111

	vcpu->stat.instruction_stpx++;
112

113 114 115
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

116
	operand2 = kvm_s390_get_base_disp_s(vcpu);
117 118

	/* must be word boundary */
119 120
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
121 122 123 124 125

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
126 127 128
	rc = write_guest(vcpu, operand2, &address, sizeof(address));
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
129 130

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
131
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
132 133 134 135 136
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
137 138 139
	u16 vcpu_id = vcpu->vcpu_id;
	u64 ga;
	int rc;
140 141

	vcpu->stat.instruction_stap++;
142

143 144 145
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

146
	ga = kvm_s390_get_base_disp_s(vcpu);
147

148
	if (ga & 1)
149
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
150

151 152 153
	rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
154

155 156
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
	trace_kvm_s390_handle_stap(vcpu, ga);
157 158 159
	return 0;
}

160 161 162 163 164 165 166 167 168 169 170
static void __skey_check_enable(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
		return;

	s390_enable_skey();
	trace_kvm_s390_skey_related_inst(vcpu);
	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
}


171 172
static int handle_skey(struct kvm_vcpu *vcpu)
{
173 174
	__skey_check_enable(vcpu);

175
	vcpu->stat.instruction_storage_key++;
176 177 178 179

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

180 181
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
182 183 184 185
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

186 187 188 189 190 191 192 193 194 195 196 197 198
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	psw_t *psw = &vcpu->arch.sie_block->gpsw;

	vcpu->stat.instruction_ipte_interlock++;
	if (psw_bits(*psw).p)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
	psw->addr = __rewind_psw(*psw, 4);
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
199 200 201 202 203 204 205 206 207 208 209 210
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_real_to_abs(vcpu, addr);

211
	if (kvm_is_error_gpa(vcpu->kvm, addr))
T
Thomas Huth 已提交
212 213 214 215 216
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
217
	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
T
Thomas Huth 已提交
218 219 220 221 222 223
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

224
static int handle_tpi(struct kvm_vcpu *vcpu)
225
{
226
	struct kvm_s390_interrupt_info *inti;
H
Heiko Carstens 已提交
227 228 229
	unsigned long len;
	u32 tpi_data[3];
	int cc, rc;
230
	u64 addr;
231

H
Heiko Carstens 已提交
232
	rc = 0;
233
	addr = kvm_s390_get_base_disp_s(vcpu);
234 235
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
236
	cc = 0;
237
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
238 239 240
	if (!inti)
		goto no_interrupt;
	cc = 1;
H
Heiko Carstens 已提交
241 242 243
	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
	tpi_data[1] = inti->io.io_int_parm;
	tpi_data[2] = inti->io.io_int_word;
244 245 246 247 248
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
H
Heiko Carstens 已提交
249 250 251 252
		len = sizeof(tpi_data) - 4;
		rc = write_guest(vcpu, addr, &tpi_data, len);
		if (rc)
			return kvm_s390_inject_prog_cond(vcpu, rc);
253 254 255 256 257
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
H
Heiko Carstens 已提交
258 259 260
		len = sizeof(tpi_data);
		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
			rc = -EFAULT;
261
	}
262 263 264 265 266 267 268 269 270
	/*
	 * If we encounter a problem storing the interruption code, the
	 * instruction is suppressed from the guest's view: reinject the
	 * interrupt.
	 */
	if (!rc)
		kfree(inti);
	else
		kvm_s390_reinject_io_int(vcpu->kvm, inti);
271
no_interrupt:
272
	/* Set condition code and we're done. */
H
Heiko Carstens 已提交
273 274 275
	if (!rc)
		kvm_s390_set_psw_cc(vcpu, cc);
	return rc ? -EFAULT : 0;
276 277
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

310 311 312
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

313 314 315 316 317 318 319 320 321 322 323 324 325
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
326
		 * Set condition code 3 to stop the guest from issuing channel
327 328
		 * I/O instructions.
		 */
329
		kvm_s390_set_psw_cc(vcpu, 3);
330 331 332 333
		return 0;
	}
}

334 335 336 337 338
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;

	vcpu->stat.instruction_stfl++;
339 340 341 342

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

343 344
	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			    vfacilities, 4);
345
	if (rc)
346
		return rc;
347 348 349
	VCPU_EVENT(vcpu, 5, "store facility list value %x",
		   *(unsigned int *) vfacilities);
	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
350 351 352
	return 0;
}

353 354 355 356 357 358 359 360 361
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
362
#define PSW_ADDR_24 0x0000000000ffffffUL
363 364
#define PSW_ADDR_31 0x000000007fffffffUL

365 366 367 368 369 370 371 372 373 374 375 376 377 378
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

379 380
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
381
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
382
	psw_compat_t new_psw;
383
	u64 addr;
384

385
	if (gpsw->mask & PSW_MASK_PSTATE)
386 387
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

388
	addr = kvm_s390_get_base_disp_s(vcpu);
389 390 391 392 393 394
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
395 396 397 398
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
399
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
400 401 402 403 404 405 406
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
407
	u64 addr;
408

409 410 411
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

412
	addr = kvm_s390_get_base_disp_s(vcpu);
413 414 415 416
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
417 418
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
419
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
420 421 422 423
	handle_new_psw(vcpu);
	return 0;
}

424 425 426 427 428
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
429

430 431 432
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

433
	operand2 = kvm_s390_get_base_disp_s(vcpu);
434

435 436
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
437

438 439
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
440 441 442 443 444 445 446 447 448 449

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

450
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
451 452

	/* deal with other level 3 hypervisors */
453
	if (stsi(mem, 3, 2, 2))
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
473 474 475
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
476
	unsigned long mem = 0;
477
	u64 operand2;
478
	int rc = 0;
479 480 481 482

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

483 484 485
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

486
	if (fc > 3) {
487
		kvm_s390_set_psw_cc(vcpu, 3);
488 489
		return 0;
	}
490

491 492
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
493 494
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

495
	if (fc == 0) {
496
		vcpu->run->s.regs.gprs[0] = 3 << 28;
497
		kvm_s390_set_psw_cc(vcpu, 0);
498
		return 0;
499 500 501 502 503 504 505 506
	}

	operand2 = kvm_s390_get_base_disp_s(vcpu);

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
507 508 509 510
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
511
			goto out_no_data;
512
		if (stsi((void *) mem, fc, sel1, sel2))
513
			goto out_no_data;
514 515 516
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
517
			goto out_no_data;
518 519
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
520
			goto out_no_data;
521 522 523 524 525
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
526
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
527
		goto out_exception;
528
	}
529
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
530
	free_page(mem);
531
	kvm_s390_set_psw_cc(vcpu, 0);
532
	vcpu->run->s.regs.gprs[0] = 0;
533
	return 0;
534
out_no_data:
535
	kvm_s390_set_psw_cc(vcpu, 3);
536 537
out_exception:
	free_page(mem);
538
	return rc;
539 540
}

541
static const intercept_handler_t b2_handlers[256] = {
542
	[0x02] = handle_stidp,
543
	[0x04] = handle_set_clock,
544 545 546
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
547
	[0x21] = handle_ipte_interlock,
548 549 550
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
T
Thomas Huth 已提交
551
	[0x2c] = handle_test_block,
552 553 554 555 556 557 558 559 560 561 562 563 564
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
565
	[0x50] = handle_ipte_interlock,
566 567 568
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
569 570
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
571
	[0xb2] = handle_lpswe,
572 573
};

574
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
575 576 577
{
	intercept_handler_t handler;

578
	/*
579 580 581 582
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
583
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
584 585 586
	if (handler)
		return handler(vcpu);

587
	return -EOPNOTSUPP;
588
}
589

590 591 592 593
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

594
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
595 596

	/* This basically extracts the mask half of the psw. */
597
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
598 599
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
600
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
601
		vcpu->run->s.regs.gprs[reg2] |=
602
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
603 604 605 606
	}
	return 0;
}

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!MACHINE_HAS_PFMF)
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
630
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
631 632 633 634 635

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
636
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	/* We dont support EDAT2
	case 0x00002000:
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
		break;*/
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
	while (start < end) {
		unsigned long useraddr;

		useraddr = gmap_translate(start, vcpu->arch.gmap);
		if (IS_ERR((void *)useraddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
671
			__skey_check_enable(vcpu);
672 673 674 675 676 677 678 679 680 681 682 683 684
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}

685 686 687 688 689 690 691 692 693 694 695
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
	unsigned long *cbrlo, cbrle;
	struct gmap *gmap;
	int i;

	VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
696
	if (!kvm_s390_cmma_enabled(vcpu->kvm))
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Rewind PSW to repeat the ESSA instruction */
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
	for (i = 0; i < entries; ++i) {
		cbrle = cbrlo[i];
		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
			/* invalid entry */
			break;
		/* try to free backing */
		__gmap_zap(cbrle, gmap);
	}
	up_read(&gmap->mm->mmap_sem);
	if (i < entries)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	return 0;
}

725
static const intercept_handler_t b9_handlers[256] = {
726
	[0x8a] = handle_ipte_interlock,
727
	[0x8d] = handle_epsw,
728 729
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
730
	[0xab] = handle_essa,
731
	[0xaf] = handle_pfmf,
732 733 734 735 736 737 738 739
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
740 741 742
	if (handler)
		return handler(vcpu);

743 744 745
	return -EOPNOTSUPP;
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	u32 val = 0;
	int reg, rc;

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rs(vcpu);

	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);

	reg = reg1;
	do {
		rc = get_guest(vcpu, val, (u32 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
		vcpu->arch.sie_block->gcr[reg] |= val;
		useraddr += 4;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	int reg, rc;

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rsy(vcpu);

	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	reg = reg1;

	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);

	do {
		rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
			       (u64 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		useraddr += 8;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

821
static const intercept_handler_t eb_handlers[256] = {
822
	[0x2f] = handle_lctlg,
823 824
};

825
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
826 827 828 829 830 831 832 833 834
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

835 836
static int handle_tprot(struct kvm_vcpu *vcpu)
{
837
	u64 address1, address2;
838
	struct vm_area_struct *vma;
839
	unsigned long user_address;
840 841 842

	vcpu->stat.instruction_tprot++;

843 844 845
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

846 847
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

848 849 850 851 852 853 854 855 856 857
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
858 859 860
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
861
	vma = find_vma(current->mm, user_address);
862 863
	if (!vma)
		goto out_inject;
864 865 866 867 868 869 870 871
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
872 873 874 875

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
876 877 878 879 880 881 882 883 884 885
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

886 887 888 889 890
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
891
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
892 893 894 895 896 897 898 899 900 901 902

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

903
static const intercept_handler_t x01_handlers[256] = {
904 905 906 907 908 909 910 911 912 913 914 915
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}