priv.c 27.9 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25 26
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
27 28
#include <asm/ptrace.h>
#include <asm/compat.h>
29 30
#include "gaccess.h"
#include "kvm-s390.h"
31
#include "trace.h"
32

33 34 35
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
36
	int rc;
37
	ar_t ar;
38
	u64 op2, val;
39 40 41 42

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

43
	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
44 45
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
46
	rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
47 48
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
49

50
	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
51
	kvm_s390_set_tod_clock(vcpu->kvm, val);
52 53 54 55 56

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

57 58 59
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
60 61
	u32 address;
	int rc;
62
	ar_t ar;
63 64 65

	vcpu->stat.instruction_spx++;

66 67 68
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

69
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
70 71

	/* must be word boundary */
72 73
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
74 75

	/* get the value */
76
	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
77 78 79 80 81 82 83 84 85 86 87
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
88
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
89

90
	kvm_s390_set_prefix(vcpu, address);
91
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
92 93 94 95 96 97 98
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;
99
	int rc;
100
	ar_t ar;
101 102

	vcpu->stat.instruction_stpx++;
103

104 105 106
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

107
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
108 109

	/* must be word boundary */
110 111
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
112

113
	address = kvm_s390_get_prefix(vcpu);
114 115

	/* get the value */
116
	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
117 118
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
119

120
	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
121
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
122 123 124 125 126
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
127 128 129
	u16 vcpu_id = vcpu->vcpu_id;
	u64 ga;
	int rc;
130
	ar_t ar;
131 132

	vcpu->stat.instruction_stap++;
133

134 135 136
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

137
	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
138

139
	if (ga & 1)
140
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
141

142
	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
143 144
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
145

146
	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
147
	trace_kvm_s390_handle_stap(vcpu, ga);
148 149 150
	return 0;
}

151
static int __skey_check_enable(struct kvm_vcpu *vcpu)
152
{
153
	int rc = 0;
154
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
155
		return rc;
156

157
	rc = s390_enable_skey();
158
	VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
159 160
	trace_kvm_s390_skey_related_inst(vcpu);
	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
161
	return rc;
162 163 164
}


165 166
static int handle_skey(struct kvm_vcpu *vcpu)
{
167
	int rc = __skey_check_enable(vcpu);
168

169 170
	if (rc)
		return rc;
171
	vcpu->stat.instruction_storage_key++;
172 173 174 175

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

176
	kvm_s390_rewind_psw(vcpu, 4);
177 178 179 180
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

181 182 183
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_ipte_interlock++;
184
	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
185 186
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
187
	kvm_s390_rewind_psw(vcpu, 4);
188 189 190 191
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
192 193 194 195 196 197 198 199 200 201
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
202
	addr = kvm_s390_logical_to_effective(vcpu, addr);
203
	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
204
		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
T
Thomas Huth 已提交
205 206
	addr = kvm_s390_real_to_abs(vcpu, addr);

207
	if (kvm_is_error_gpa(vcpu->kvm, addr))
T
Thomas Huth 已提交
208 209 210 211 212
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
213
	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
T
Thomas Huth 已提交
214 215 216 217 218 219
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

220
static int handle_tpi(struct kvm_vcpu *vcpu)
221
{
222
	struct kvm_s390_interrupt_info *inti;
H
Heiko Carstens 已提交
223 224
	unsigned long len;
	u32 tpi_data[3];
225
	int rc;
226
	u64 addr;
227
	ar_t ar;
228

229
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
230 231
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
232

233
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
234 235 236 237 238
	if (!inti) {
		kvm_s390_set_psw_cc(vcpu, 0);
		return 0;
	}

H
Heiko Carstens 已提交
239 240 241
	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
	tpi_data[1] = inti->io.io_int_parm;
	tpi_data[2] = inti->io.io_int_word;
242 243 244 245 246
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
H
Heiko Carstens 已提交
247
		len = sizeof(tpi_data) - 4;
248
		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
249 250 251 252
		if (rc) {
			rc = kvm_s390_inject_prog_cond(vcpu, rc);
			goto reinject_interrupt;
		}
253 254 255 256 257
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
H
Heiko Carstens 已提交
258
		len = sizeof(tpi_data);
259 260
		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
			/* failed writes to the low core are not recoverable */
H
Heiko Carstens 已提交
261
			rc = -EFAULT;
262 263
			goto reinject_interrupt;
		}
264
	}
265 266 267 268 269 270

	/* irq was successfully handed to the guest */
	kfree(inti);
	kvm_s390_set_psw_cc(vcpu, 1);
	return 0;
reinject_interrupt:
271 272 273 274 275
	/*
	 * If we encounter a problem storing the interruption code, the
	 * instruction is suppressed from the guest's view: reinject the
	 * interrupt.
	 */
276 277 278 279
	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
		kfree(inti);
		rc = -EFAULT;
	}
280
	/* don't set the cc, a pgm irq was injected or we drop to user space */
H
Heiko Carstens 已提交
281
	return rc ? -EFAULT : 0;
282 283
}

284 285
static int handle_tsch(struct kvm_vcpu *vcpu)
{
286 287
	struct kvm_s390_interrupt_info *inti = NULL;
	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
288

289 290 291 292
	/* a valid schid has at least one bit set */
	if (vcpu->run->s.regs.gprs[1])
		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
					   vcpu->run->s.regs.gprs[1]);
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

319 320 321
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

322 323 324 325 326 327 328 329 330 331 332 333 334
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
335
		 * Set condition code 3 to stop the guest from issuing channel
336 337
		 * I/O instructions.
		 */
338
		kvm_s390_set_psw_cc(vcpu, 3);
339 340 341 342
		return 0;
	}
}

343 344 345
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;
346
	unsigned int fac;
347 348

	vcpu->stat.instruction_stfl++;
349 350 351 352

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

353 354 355 356
	/*
	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
	 * into a u32 memory representation. They will remain bits 0-31.
	 */
357
	fac = *vcpu->kvm->arch.model.fac->list >> 32;
358
	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
359
			    &fac, sizeof(fac));
360
	if (rc)
361
		return rc;
362
	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
363
	trace_kvm_s390_handle_stfl(vcpu, fac);
364 365 366
	return 0;
}

367 368
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
369
#define PSW_ADDR_24 0x0000000000ffffffUL
370 371
#define PSW_ADDR_31 0x000000007fffffffUL

T
Thomas Huth 已提交
372 373
int is_valid_psw(psw_t *psw)
{
374 375 376 377 378 379 380 381 382 383
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
T
Thomas Huth 已提交
384 385
	if (psw->addr & 1)
		return 0;
386 387 388
	return 1;
}

389 390
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
391
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
392
	psw_compat_t new_psw;
393
	u64 addr;
394
	int rc;
395
	ar_t ar;
396

397
	if (gpsw->mask & PSW_MASK_PSTATE)
398 399
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

400
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
401 402
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
403

404
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
405 406
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
407 408
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
409 410 411 412
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
413
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
414 415 416 417 418 419
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
420
	u64 addr;
421
	int rc;
422
	ar_t ar;
423

424 425 426
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

427
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
428 429
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
430
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
431 432
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
433 434
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
435
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
436 437 438
	return 0;
}

439 440
static int handle_stidp(struct kvm_vcpu *vcpu)
{
441
	u64 stidp_data = vcpu->arch.stidp_data;
442
	u64 operand2;
443
	int rc;
444
	ar_t ar;
445 446

	vcpu->stat.instruction_stidp++;
447

448 449 450
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

451
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
452

453 454
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
455

456
	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
457 458
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
459

460
	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
461 462 463 464 465 466 467 468
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

469
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
470 471

	/* deal with other level 3 hypervisors */
472
	if (stsi(mem, 3, 2, 2))
473 474 475 476 477 478
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

479
	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
480 481 482 483 484 485 486 487 488 489 490
	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

491 492 493 494 495 496 497 498 499 500 501
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
				 u8 fc, u8 sel1, u16 sel2)
{
	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
	vcpu->run->s390_stsi.addr = addr;
	vcpu->run->s390_stsi.ar = ar;
	vcpu->run->s390_stsi.fc = fc;
	vcpu->run->s390_stsi.sel1 = sel1;
	vcpu->run->s390_stsi.sel2 = sel2;
}

502 503
static int handle_stsi(struct kvm_vcpu *vcpu)
{
504 505 506
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
507
	unsigned long mem = 0;
508
	u64 operand2;
509
	int rc = 0;
510
	ar_t ar;
511 512

	vcpu->stat.instruction_stsi++;
513
	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
514

515 516 517
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

518
	if (fc > 3) {
519
		kvm_s390_set_psw_cc(vcpu, 3);
520 521
		return 0;
	}
522

523 524
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
525 526
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

527
	if (fc == 0) {
528
		vcpu->run->s.regs.gprs[0] = 3 << 28;
529
		kvm_s390_set_psw_cc(vcpu, 0);
530
		return 0;
531 532
	}

533
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
534 535 536 537 538

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
539 540 541 542
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
543
			goto out_no_data;
544
		if (stsi((void *) mem, fc, sel1, sel2))
545
			goto out_no_data;
546 547 548
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
549
			goto out_no_data;
550 551
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
552
			goto out_no_data;
553 554 555 556
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

557
	rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
558 559 560
	if (rc) {
		rc = kvm_s390_inject_prog_cond(vcpu, rc);
		goto out;
561
	}
562 563 564 565
	if (vcpu->kvm->arch.user_stsi) {
		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
		rc = -EREMOTE;
	}
566
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
567
	free_page(mem);
568
	kvm_s390_set_psw_cc(vcpu, 0);
569
	vcpu->run->s.regs.gprs[0] = 0;
570
	return rc;
571
out_no_data:
572
	kvm_s390_set_psw_cc(vcpu, 3);
573
out:
574
	free_page(mem);
575
	return rc;
576 577
}

578
static const intercept_handler_t b2_handlers[256] = {
579
	[0x02] = handle_stidp,
580
	[0x04] = handle_set_clock,
581 582 583
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
584
	[0x21] = handle_ipte_interlock,
585 586 587
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
T
Thomas Huth 已提交
588
	[0x2c] = handle_test_block,
589 590 591 592 593 594 595 596 597 598 599 600 601
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
602
	[0x50] = handle_ipte_interlock,
603 604 605
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
606 607
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
608
	[0xb2] = handle_lpswe,
609 610
};

611
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
612 613 614
{
	intercept_handler_t handler;

615
	/*
616 617 618 619
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
620
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
621 622 623
	if (handler)
		return handler(vcpu);

624
	return -EOPNOTSUPP;
625
}
626

627 628 629 630
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

631
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
632 633

	/* This basically extracts the mask half of the psw. */
634
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
635 636
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
637
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
638
		vcpu->run->s.regs.gprs[reg2] |=
639
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
640 641 642 643
	}
	return 0;
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

663
	if (!test_kvm_facility(vcpu->kvm, 8))
664 665 666
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
667
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
668 669 670 671 672

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
673
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
674 675 676 677 678 679 680
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
681
	start = kvm_s390_logical_to_effective(vcpu, start);
T
Thomas Huth 已提交
682

683 684 685 686 687 688 689 690
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	case 0x00002000:
691 692 693 694 695
		/* only support 2G frame size if EDAT2 is available and we are
		   not in 24-bit addressing mode */
		if (!test_kvm_facility(vcpu->kvm, 78) ||
		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
696
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
697
		break;
698 699 700
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
701 702

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
703
		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
704 705 706
			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
	}

707
	while (start < end) {
T
Thomas Huth 已提交
708 709 710 711 712 713 714 715 716
		unsigned long useraddr, abs_addr;

		/* Translate guest address to host address */
		if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
			abs_addr = kvm_s390_real_to_abs(vcpu, start);
		else
			abs_addr = start;
		useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
		if (kvm_is_error_hva(useraddr))
717 718 719 720 721 722 723 724
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
725 726 727 728
			int rc = __skey_check_enable(vcpu);

			if (rc)
				return rc;
729 730 731 732 733 734 735 736 737 738 739 740 741
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}

742 743 744 745 746 747 748 749
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
	unsigned long *cbrlo, cbrle;
	struct gmap *gmap;
	int i;

750
	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
751 752
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
753
	if (!vcpu->kvm->arch.use_cmma)
754 755 756 757 758 759 760 761 762
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Rewind PSW to repeat the ESSA instruction */
763
	kvm_s390_rewind_psw(vcpu, 4);
764 765 766 767 768 769 770 771 772
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
	for (i = 0; i < entries; ++i) {
		cbrle = cbrlo[i];
		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
			/* invalid entry */
			break;
		/* try to free backing */
773
		__gmap_zap(gmap, cbrle);
774 775 776 777 778 779 780
	}
	up_read(&gmap->mm->mmap_sem);
	if (i < entries)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	return 0;
}

781
static const intercept_handler_t b9_handlers[256] = {
782
	[0x8a] = handle_ipte_interlock,
783
	[0x8d] = handle_epsw,
784 785
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
786
	[0xab] = handle_essa,
787
	[0xaf] = handle_pfmf,
788 789 790 791 792 793 794 795
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
796 797 798
	if (handler)
		return handler(vcpu);

799 800 801
	return -EOPNOTSUPP;
}

802 803 804 805
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
806 807
	int reg, rc, nr_regs;
	u32 ctl_array[16];
808
	u64 ga;
809
	ar_t ar;
810 811 812 813 814 815

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

816
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
817

818
	if (ga & 3)
819 820
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

821
	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
822
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
823

824
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
825
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
826 827
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
828
	reg = reg1;
829
	nr_regs = 0;
830 831
	do {
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
832
		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
833 834 835 836
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
837
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
838 839 840
	return 0;
}

841 842 843 844
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
845 846
	int reg, rc, nr_regs;
	u32 ctl_array[16];
847
	u64 ga;
848
	ar_t ar;
849 850 851 852 853 854

	vcpu->stat.instruction_stctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

855
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
856 857 858 859

	if (ga & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

860
	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
861 862 863
	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);

	reg = reg1;
864
	nr_regs = 0;
865
	do {
866
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
867 868 869 870
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
871
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
872
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
873 874
}

875 876 877 878
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
879 880 881
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
882
	ar_t ar;
883 884 885 886 887 888

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

889
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
890

891
	if (ga & 7)
892 893
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

894
	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
895
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
896

897
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
898
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
899 900 901 902
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
	reg = reg1;
	nr_regs = 0;
903
	do {
904
		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
905 906 907 908
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
909
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
910 911 912
	return 0;
}

913 914 915 916
static int handle_stctg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
917 918 919
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
920
	ar_t ar;
921 922 923 924 925 926

	vcpu->stat.instruction_stctg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

927
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
928 929 930 931

	if (ga & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

932
	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
933 934
	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);

935 936
	reg = reg1;
	nr_regs = 0;
937
	do {
938
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
939 940 941 942
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
943
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
944
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
945 946
}

947
static const intercept_handler_t eb_handlers[256] = {
948
	[0x2f] = handle_lctlg,
949
	[0x25] = handle_stctg,
950 951
};

952
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
953 954 955 956 957 958 959 960 961
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

962 963
static int handle_tprot(struct kvm_vcpu *vcpu)
{
964
	u64 address1, address2;
965 966 967
	unsigned long hva, gpa;
	int ret = 0, cc = 0;
	bool writable;
968
	ar_t ar;
969 970 971

	vcpu->stat.instruction_tprot++;

972 973 974
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

975
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
976

977 978 979 980 981 982
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
983
		ipte_lock(vcpu);
984
	ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
985 986 987
	if (ret == PGM_PROTECTION) {
		/* Write protected? Try again with read-only... */
		cc = 1;
988
		ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
989 990 991 992 993 994 995 996 997 998 999
	}
	if (ret) {
		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
			ret = kvm_s390_inject_program_int(vcpu, ret);
		} else if (ret > 0) {
			/* Translation not available */
			kvm_s390_set_psw_cc(vcpu, 3);
			ret = 0;
		}
		goto out_unlock;
	}
1000

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
	if (kvm_is_error_hva(hva)) {
		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	} else {
		if (!writable)
			cc = 1;		/* Write not permitted ==> read-only */
		kvm_s390_set_psw_cc(vcpu, cc);
		/* Note: CC2 only occurs for storage keys (not supported yet) */
	}
out_unlock:
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		ipte_unlock(vcpu);
	return ret;
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

1024 1025 1026 1027 1028
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1029
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

1041
static const intercept_handler_t x01_handlers[256] = {
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}