priv.c 24.0 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25 26
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
27 28
#include <asm/ptrace.h>
#include <asm/compat.h>
29 30
#include "gaccess.h"
#include "kvm-s390.h"
31
#include "trace.h"
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu *cpup;
	s64 hostclk, val;
	u64 op2;
	int i;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	op2 = kvm_s390_get_base_disp_s(vcpu);
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (get_guest(vcpu, val, (u64 __user *) op2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	if (store_tod_clock(&hostclk)) {
		kvm_s390_set_psw_cc(vcpu, 3);
		return 0;
	}
	val = (val - hostclk) & ~0x3fUL;

	mutex_lock(&vcpu->kvm->lock);
	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
		cpup->arch.sie_block->epoch = val;
	mutex_unlock(&vcpu->kvm->lock);

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

65 66 67
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
68 69
	u32 address;
	int rc;
70 71 72

	vcpu->stat.instruction_spx++;

73 74 75
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

76
	operand2 = kvm_s390_get_base_disp_s(vcpu);
77 78

	/* must be word boundary */
79 80
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 82

	/* get the value */
83 84 85 86 87 88 89 90 91 92 93 94
	rc = read_guest(vcpu, operand2, &address, sizeof(address));
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
95
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
96

97
	kvm_s390_set_prefix(vcpu, address);
98 99

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
100
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
101 102 103 104 105 106 107 108 109
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
110

111 112 113
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

114
	operand2 = kvm_s390_get_base_disp_s(vcpu);
115 116

	/* must be word boundary */
117 118
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
119 120 121 122 123

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
124 125
	if (put_guest(vcpu, address, (u32 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
126 127

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
128
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
129 130 131 132 133 134 135 136
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;

	vcpu->stat.instruction_stap++;
137

138 139 140
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

141
	useraddr = kvm_s390_get_base_disp_s(vcpu);
142

143 144
	if (useraddr & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
145

146 147
	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
148

149
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
150
	trace_kvm_s390_handle_stap(vcpu, useraddr);
151 152 153
	return 0;
}

154 155 156 157 158 159 160 161 162 163 164
static void __skey_check_enable(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
		return;

	s390_enable_skey();
	trace_kvm_s390_skey_related_inst(vcpu);
	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
}


165 166
static int handle_skey(struct kvm_vcpu *vcpu)
{
167 168
	__skey_check_enable(vcpu);

169
	vcpu->stat.instruction_storage_key++;
170 171 172 173

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

174 175
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
176 177 178 179
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

180 181 182 183 184 185 186 187 188 189 190 191 192
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	psw_t *psw = &vcpu->arch.sie_block->gpsw;

	vcpu->stat.instruction_ipte_interlock++;
	if (psw_bits(*psw).p)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
	psw->addr = __rewind_psw(*psw, 4);
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	unsigned long hva;
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_real_to_abs(vcpu, addr);

	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(hva))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
	if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

220
static int handle_tpi(struct kvm_vcpu *vcpu)
221
{
222
	struct kvm_s390_interrupt_info *inti;
223
	u64 addr;
224 225 226
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
227 228
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
229
	cc = 0;
230
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
231 232 233 234 235 236 237 238
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
239 240 241 242
		if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
		    || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
		    || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
243 244 245 246 247
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
248 249 250 251
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
252
	}
253
	kfree(inti);
254
no_interrupt:
255
	/* Set condition code and we're done. */
256
	kvm_s390_set_psw_cc(vcpu, cc);
257 258 259
	return 0;
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

292 293 294
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

295 296 297 298 299 300 301 302 303 304 305 306 307
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
308
		 * Set condition code 3 to stop the guest from issuing channel
309 310
		 * I/O instructions.
		 */
311
		kvm_s390_set_psw_cc(vcpu, 3);
312 313 314 315
		return 0;
	}
}

316 317 318 319 320
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;

	vcpu->stat.instruction_stfl++;
321 322 323 324

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

325 326
	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			    vfacilities, 4);
327
	if (rc)
328
		return rc;
329 330 331
	VCPU_EVENT(vcpu, 5, "store facility list value %x",
		   *(unsigned int *) vfacilities);
	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
332 333 334
	return 0;
}

335 336 337 338 339 340 341 342 343
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
344
#define PSW_ADDR_24 0x0000000000ffffffUL
345 346
#define PSW_ADDR_31 0x000000007fffffffUL

347 348 349 350 351 352 353 354 355 356 357 358 359 360
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

361 362
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
363
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
364
	psw_compat_t new_psw;
365
	u64 addr;
366

367
	if (gpsw->mask & PSW_MASK_PSTATE)
368 369
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

370
	addr = kvm_s390_get_base_disp_s(vcpu);
371 372 373 374 375 376
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
377 378 379 380
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
381
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
382 383 384 385 386 387 388
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
389
	u64 addr;
390

391 392 393
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

394
	addr = kvm_s390_get_base_disp_s(vcpu);
395 396 397 398
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
399 400
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
401
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
402 403 404 405
	handle_new_psw(vcpu);
	return 0;
}

406 407 408 409 410
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
411

412 413 414
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

415
	operand2 = kvm_s390_get_base_disp_s(vcpu);
416

417 418
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
419

420 421
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
422 423 424 425 426 427 428 429 430 431

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

432
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
433 434

	/* deal with other level 3 hypervisors */
435
	if (stsi(mem, 3, 2, 2))
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
455 456 457
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
458
	unsigned long mem = 0;
459
	u64 operand2;
460
	int rc = 0;
461 462 463 464

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

465 466 467
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

468
	if (fc > 3) {
469
		kvm_s390_set_psw_cc(vcpu, 3);
470 471
		return 0;
	}
472

473 474
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
475 476
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

477
	if (fc == 0) {
478
		vcpu->run->s.regs.gprs[0] = 3 << 28;
479
		kvm_s390_set_psw_cc(vcpu, 0);
480
		return 0;
481 482 483 484 485 486 487 488
	}

	operand2 = kvm_s390_get_base_disp_s(vcpu);

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
489 490 491 492
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
493
			goto out_no_data;
494
		if (stsi((void *) mem, fc, sel1, sel2))
495
			goto out_no_data;
496 497 498
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
499
			goto out_no_data;
500 501
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
502
			goto out_no_data;
503 504 505 506 507
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
508
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
509
		goto out_exception;
510
	}
511
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
512
	free_page(mem);
513
	kvm_s390_set_psw_cc(vcpu, 0);
514
	vcpu->run->s.regs.gprs[0] = 0;
515
	return 0;
516
out_no_data:
517
	kvm_s390_set_psw_cc(vcpu, 3);
518 519
out_exception:
	free_page(mem);
520
	return rc;
521 522
}

523
static const intercept_handler_t b2_handlers[256] = {
524
	[0x02] = handle_stidp,
525
	[0x04] = handle_set_clock,
526 527 528
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
529
	[0x21] = handle_ipte_interlock,
530 531 532
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
T
Thomas Huth 已提交
533
	[0x2c] = handle_test_block,
534 535 536 537 538 539 540 541 542 543 544 545 546
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
547
	[0x50] = handle_ipte_interlock,
548 549 550
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
551 552
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
553
	[0xb2] = handle_lpswe,
554 555
};

556
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
557 558 559
{
	intercept_handler_t handler;

560
	/*
561 562 563 564
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
565
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
566 567 568
	if (handler)
		return handler(vcpu);

569
	return -EOPNOTSUPP;
570
}
571

572 573 574 575
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

576
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
577 578

	/* This basically extracts the mask half of the psw. */
579
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
580 581
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
582
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
583
		vcpu->run->s.regs.gprs[reg2] |=
584
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
585 586 587 588
	}
	return 0;
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!MACHINE_HAS_PFMF)
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
612
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
613 614 615 616 617

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
618
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	/* We dont support EDAT2
	case 0x00002000:
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
		break;*/
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
	while (start < end) {
		unsigned long useraddr;

		useraddr = gmap_translate(start, vcpu->arch.gmap);
		if (IS_ERR((void *)useraddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
653
			__skey_check_enable(vcpu);
654 655 656 657 658 659 660 661 662 663 664 665 666
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}

667 668 669 670 671 672 673 674 675 676 677
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
	unsigned long *cbrlo, cbrle;
	struct gmap *gmap;
	int i;

	VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
678
	if (!kvm_s390_cmma_enabled(vcpu->kvm))
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Rewind PSW to repeat the ESSA instruction */
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
	for (i = 0; i < entries; ++i) {
		cbrle = cbrlo[i];
		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
			/* invalid entry */
			break;
		/* try to free backing */
		__gmap_zap(cbrle, gmap);
	}
	up_read(&gmap->mm->mmap_sem);
	if (i < entries)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	return 0;
}

707
static const intercept_handler_t b9_handlers[256] = {
708
	[0x8a] = handle_ipte_interlock,
709
	[0x8d] = handle_epsw,
710 711
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
712
	[0xab] = handle_essa,
713
	[0xaf] = handle_pfmf,
714 715 716 717 718 719 720 721
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
722 723 724
	if (handler)
		return handler(vcpu);

725 726 727
	return -EOPNOTSUPP;
}

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	u32 val = 0;
	int reg, rc;

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rs(vcpu);

	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);

	reg = reg1;
	do {
		rc = get_guest(vcpu, val, (u32 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
		vcpu->arch.sie_block->gcr[reg] |= val;
		useraddr += 4;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	int reg, rc;

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rsy(vcpu);

	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	reg = reg1;

	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);

	do {
		rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
			       (u64 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		useraddr += 8;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

803
static const intercept_handler_t eb_handlers[256] = {
804
	[0x2f] = handle_lctlg,
805 806
};

807
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
808 809 810 811 812 813 814 815 816
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

817 818
static int handle_tprot(struct kvm_vcpu *vcpu)
{
819
	u64 address1, address2;
820
	struct vm_area_struct *vma;
821
	unsigned long user_address;
822 823 824

	vcpu->stat.instruction_tprot++;

825 826 827
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

828 829
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

830 831 832 833 834 835 836 837 838 839
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
840 841 842
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
843
	vma = find_vma(current->mm, user_address);
844 845
	if (!vma)
		goto out_inject;
846 847 848 849 850 851 852 853
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
854 855 856 857

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
858 859 860 861 862 863 864 865 866 867
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

868 869 870 871 872
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
873
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
874 875 876 877 878 879 880 881 882 883 884

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

885
static const intercept_handler_t x01_handlers[256] = {
886 887 888 889 890 891 892 893 894 895 896 897
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}