priv.c 23.4 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25 26
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
27 28
#include <asm/ptrace.h>
#include <asm/compat.h>
29 30
#include "gaccess.h"
#include "kvm-s390.h"
31
#include "trace.h"
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu *cpup;
	s64 hostclk, val;
	u64 op2;
	int i;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	op2 = kvm_s390_get_base_disp_s(vcpu);
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (get_guest(vcpu, val, (u64 __user *) op2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	if (store_tod_clock(&hostclk)) {
		kvm_s390_set_psw_cc(vcpu, 3);
		return 0;
	}
	val = (val - hostclk) & ~0x3fUL;

	mutex_lock(&vcpu->kvm->lock);
	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
		cpup->arch.sie_block->epoch = val;
	mutex_unlock(&vcpu->kvm->lock);

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

65 66 67 68 69 70 71 72
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

73 74 75
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

76
	operand2 = kvm_s390_get_base_disp_s(vcpu);
77 78

	/* must be word boundary */
79 80
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 82

	/* get the value */
83 84
	if (get_guest(vcpu, address, (u32 __user *) operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
85 86 87 88 89

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
90 91
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
92

93
	kvm_s390_set_prefix(vcpu, address);
94 95

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
96
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
97 98 99 100 101 102 103 104 105
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
106

107 108 109
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

110
	operand2 = kvm_s390_get_base_disp_s(vcpu);
111 112

	/* must be word boundary */
113 114
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
115 116 117 118 119

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
120 121
	if (put_guest(vcpu, address, (u32 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
122 123

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
124
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
125 126 127 128 129 130 131 132
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	u64 useraddr;

	vcpu->stat.instruction_stap++;
133

134 135 136
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

137
	useraddr = kvm_s390_get_base_disp_s(vcpu);
138

139 140
	if (useraddr & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
141

142 143
	if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
144

145
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
146
	trace_kvm_s390_handle_stap(vcpu, useraddr);
147 148 149
	return 0;
}

150 151 152 153 154 155 156 157 158 159 160
static void __skey_check_enable(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
		return;

	s390_enable_skey();
	trace_kvm_s390_skey_related_inst(vcpu);
	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
}


161 162
static int handle_skey(struct kvm_vcpu *vcpu)
{
163 164
	__skey_check_enable(vcpu);

165
	vcpu->stat.instruction_storage_key++;
166 167 168 169

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

170 171
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
172 173 174 175
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

T
Thomas Huth 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	unsigned long hva;
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_real_to_abs(vcpu, addr);

	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(hva))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
	if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

203
static int handle_tpi(struct kvm_vcpu *vcpu)
204
{
205
	struct kvm_s390_interrupt_info *inti;
206
	u64 addr;
207 208 209
	int cc;

	addr = kvm_s390_get_base_disp_s(vcpu);
210 211
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
212
	cc = 0;
213
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
214 215 216 217 218 219 220 221
	if (!inti)
		goto no_interrupt;
	cc = 1;
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
222 223 224 225
		if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
		    || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
		    || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
226 227 228 229 230
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
231 232 233 234
		put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
		put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
		put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
		put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
235
	}
236
	kfree(inti);
237
no_interrupt:
238
	/* Set condition code and we're done. */
239
	kvm_s390_set_psw_cc(vcpu, cc);
240 241 242
	return 0;
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
static int handle_tsch(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kvm_s390_get_io_int(vcpu->kvm, 0,
				   vcpu->run->s.regs.gprs[1]);

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

275 276 277
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

278 279 280 281 282 283 284 285 286 287 288 289 290
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
291
		 * Set condition code 3 to stop the guest from issuing channel
292 293
		 * I/O instructions.
		 */
294
		kvm_s390_set_psw_cc(vcpu, 3);
295 296 297 298
		return 0;
	}
}

299 300 301 302 303
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;

	vcpu->stat.instruction_stfl++;
304 305 306 307

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

308
	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
309
			   vfacilities, 4);
310
	if (rc)
311
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
312 313 314
	VCPU_EVENT(vcpu, 5, "store facility list value %x",
		   *(unsigned int *) vfacilities);
	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
315 316 317
	return 0;
}

318 319 320 321 322 323 324 325 326
static void handle_new_psw(struct kvm_vcpu *vcpu)
{
	/* Check whether the new psw is enabled for machine checks. */
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
		kvm_s390_deliver_pending_machine_checks(vcpu);
}

#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
327
#define PSW_ADDR_24 0x0000000000ffffffUL
328 329
#define PSW_ADDR_31 0x000000007fffffffUL

330 331 332 333 334 335 336 337 338 339 340 341 342 343
static int is_valid_psw(psw_t *psw) {
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
	return 1;
}

344 345
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
346
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
347
	psw_compat_t new_psw;
348
	u64 addr;
349

350
	if (gpsw->mask & PSW_MASK_PSTATE)
351 352
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

353
	addr = kvm_s390_get_base_disp_s(vcpu);
354 355 356 357 358 359
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
360 361 362 363
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
364
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
365 366 367 368 369 370 371
	handle_new_psw(vcpu);
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
372
	u64 addr;
373

374 375 376
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

377
	addr = kvm_s390_get_base_disp_s(vcpu);
378 379 380 381
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
382 383
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
384
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
385 386 387 388
	handle_new_psw(vcpu);
	return 0;
}

389 390 391 392 393
static int handle_stidp(struct kvm_vcpu *vcpu)
{
	u64 operand2;

	vcpu->stat.instruction_stidp++;
394

395 396 397
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

398
	operand2 = kvm_s390_get_base_disp_s(vcpu);
399

400 401
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
402

403 404
	if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
405 406 407 408 409 410 411 412 413 414

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

415
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
416 417

	/* deal with other level 3 hypervisors */
418
	if (stsi(mem, 3, 2, 2))
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
438 439 440
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
441
	unsigned long mem = 0;
442
	u64 operand2;
443
	int rc = 0;
444 445 446 447

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

448 449 450
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

451
	if (fc > 3) {
452
		kvm_s390_set_psw_cc(vcpu, 3);
453 454
		return 0;
	}
455

456 457
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
458 459
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

460
	if (fc == 0) {
461
		vcpu->run->s.regs.gprs[0] = 3 << 28;
462
		kvm_s390_set_psw_cc(vcpu, 0);
463
		return 0;
464 465 466 467 468 469 470 471
	}

	operand2 = kvm_s390_get_base_disp_s(vcpu);

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
472 473 474 475
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
476
			goto out_no_data;
477
		if (stsi((void *) mem, fc, sel1, sel2))
478
			goto out_no_data;
479 480 481
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
482
			goto out_no_data;
483 484
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
485
			goto out_no_data;
486 487 488 489 490
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
491
		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
492
		goto out_exception;
493
	}
494
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
495
	free_page(mem);
496
	kvm_s390_set_psw_cc(vcpu, 0);
497
	vcpu->run->s.regs.gprs[0] = 0;
498
	return 0;
499
out_no_data:
500
	kvm_s390_set_psw_cc(vcpu, 3);
501 502
out_exception:
	free_page(mem);
503
	return rc;
504 505
}

506
static const intercept_handler_t b2_handlers[256] = {
507
	[0x02] = handle_stidp,
508
	[0x04] = handle_set_clock,
509 510 511 512 513 514
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
T
Thomas Huth 已提交
515
	[0x2c] = handle_test_block,
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
532 533
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
534
	[0xb2] = handle_lpswe,
535 536
};

537
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
538 539 540
{
	intercept_handler_t handler;

541
	/*
542 543 544 545
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
546
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
547 548 549
	if (handler)
		return handler(vcpu);

550
	return -EOPNOTSUPP;
551
}
552

553 554 555 556
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

557
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
558 559

	/* This basically extracts the mask half of the psw. */
560
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
561 562
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
563
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
564
		vcpu->run->s.regs.gprs[reg2] |=
565
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
566 567 568 569
	}
	return 0;
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!MACHINE_HAS_PFMF)
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
593
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
594 595 596 597 598

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
599
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	/* We dont support EDAT2
	case 0x00002000:
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
		break;*/
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
	while (start < end) {
		unsigned long useraddr;

		useraddr = gmap_translate(start, vcpu->arch.gmap);
		if (IS_ERR((void *)useraddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
634
			__skey_check_enable(vcpu);
635 636 637 638 639 640 641 642 643 644 645 646 647
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}

648 649 650 651 652 653 654 655 656 657 658
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
	unsigned long *cbrlo, cbrle;
	struct gmap *gmap;
	int i;

	VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
659
	if (!kvm_s390_cmma_enabled(vcpu->kvm))
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Rewind PSW to repeat the ESSA instruction */
	vcpu->arch.sie_block->gpsw.addr =
		__rewind_psw(vcpu->arch.sie_block->gpsw, 4);
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
	for (i = 0; i < entries; ++i) {
		cbrle = cbrlo[i];
		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
			/* invalid entry */
			break;
		/* try to free backing */
		__gmap_zap(cbrle, gmap);
	}
	up_read(&gmap->mm->mmap_sem);
	if (i < entries)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	return 0;
}

688 689
static const intercept_handler_t b9_handlers[256] = {
	[0x8d] = handle_epsw,
690
	[0xab] = handle_essa,
691
	[0xaf] = handle_pfmf,
692 693 694 695 696 697 698 699
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
700 701 702
	if (handler)
		return handler(vcpu);

703 704 705
	return -EOPNOTSUPP;
}

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	u32 val = 0;
	int reg, rc;

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rs(vcpu);

	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);

	reg = reg1;
	do {
		rc = get_guest(vcpu, val, (u32 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
		vcpu->arch.sie_block->gcr[reg] |= val;
		useraddr += 4;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	int reg, rc;

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	useraddr = kvm_s390_get_base_disp_rsy(vcpu);

	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	reg = reg1;

	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);

	do {
		rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
			       (u64 __user *) useraddr);
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		useraddr += 8;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);

	return 0;
}

781
static const intercept_handler_t eb_handlers[256] = {
782
	[0x2f] = handle_lctlg,
783 784
};

785
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
786 787 788 789 790 791 792 793 794
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

795 796
static int handle_tprot(struct kvm_vcpu *vcpu)
{
797
	u64 address1, address2;
798
	struct vm_area_struct *vma;
799
	unsigned long user_address;
800 801 802

	vcpu->stat.instruction_tprot++;

803 804 805
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

806 807
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);

808 809 810 811 812 813 814 815 816 817
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;

	down_read(&current->mm->mmap_sem);
818 819 820
	user_address = __gmap_translate(address1, vcpu->arch.gmap);
	if (IS_ERR_VALUE(user_address))
		goto out_inject;
821
	vma = find_vma(current->mm, user_address);
822 823
	if (!vma)
		goto out_inject;
824 825 826 827 828 829 830 831
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
832 833 834 835

out_inject:
	up_read(&current->mm->mmap_sem);
	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
836 837 838 839 840 841 842 843 844 845
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

846 847 848 849 850
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
851
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
852 853 854 855 856 857 858 859 860 861 862

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

863
static const intercept_handler_t x01_handlers[256] = {
864 865 866 867 868 869 870 871 872 873 874 875
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}