priv.c 31.7 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/facility.h>
20 21 22 23
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
24 25
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
26
#include <asm/gmap.h>
27
#include <asm/io.h>
28 29
#include <asm/ptrace.h>
#include <asm/compat.h>
30
#include <asm/sclp.h>
31 32
#include "gaccess.h"
#include "kvm-s390.h"
33
#include "trace.h"
34

35 36 37
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
38
	int rc;
39
	ar_t ar;
40
	u64 op2, val;
41 42 43 44

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

45
	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
46 47
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48
	rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
49 50
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
51

52
	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
53
	kvm_s390_set_tod_clock(vcpu->kvm, val);
54 55 56 57 58

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

59 60 61
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
62 63
	u32 address;
	int rc;
64
	ar_t ar;
65 66 67

	vcpu->stat.instruction_spx++;

68 69 70
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

71
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
72 73

	/* must be word boundary */
74 75
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
76 77

	/* get the value */
78
	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
79 80 81 82 83 84 85 86 87 88 89
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
90
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
91

92
	kvm_s390_set_prefix(vcpu, address);
93
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
94 95 96 97 98 99 100
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;
101
	int rc;
102
	ar_t ar;
103 104

	vcpu->stat.instruction_stpx++;
105

106 107 108
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

109
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
110 111

	/* must be word boundary */
112 113
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
114

115
	address = kvm_s390_get_prefix(vcpu);
116 117

	/* get the value */
118
	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
119 120
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
121

122
	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
123
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
124 125 126 127 128
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
129 130 131
	u16 vcpu_id = vcpu->vcpu_id;
	u64 ga;
	int rc;
132
	ar_t ar;
133 134

	vcpu->stat.instruction_stap++;
135

136 137 138
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

139
	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
140

141
	if (ga & 1)
142
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
143

144
	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
145 146
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
147

148
	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
149
	trace_kvm_s390_handle_stap(vcpu, ga);
150 151 152
	return 0;
}

153
static int __skey_check_enable(struct kvm_vcpu *vcpu)
154
{
155
	int rc = 0;
156 157

	trace_kvm_s390_skey_related_inst(vcpu);
158
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
159
		return rc;
160

161
	rc = s390_enable_skey();
162 163 164
	VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
	if (!rc)
		vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
165
	return rc;
166 167
}

168
static int try_handle_skey(struct kvm_vcpu *vcpu)
169
{
170
	int rc;
171

172 173
	vcpu->stat.instruction_storage_key++;
	rc = __skey_check_enable(vcpu);
174 175
	if (rc)
		return rc;
176 177 178 179 180 181
	if (sclp.has_skey) {
		/* with storage-key facility, SIE interprets it for us */
		kvm_s390_retry_instr(vcpu);
		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
		return -EAGAIN;
	}
182 183
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
184 185
	return 0;
}
186

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static int handle_iske(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	unsigned char key;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = get_guest_storage_key(current->mm, addr, &key);
	up_read(&current->mm->mmap_sem);
	if (rc)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
	vcpu->run->s.regs.gprs[reg1] |= key;
	return 0;
}

static int handle_rrbe(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = reset_guest_reference_bit(current->mm, addr);
	up_read(&current->mm->mmap_sem);
	if (rc < 0)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	kvm_s390_set_psw_cc(vcpu, rc);
	return 0;
}

#define SSKE_NQ 0x8
#define SSKE_MR 0x4
#define SSKE_MC 0x2
#define SSKE_MB 0x1
static int handle_sske(struct kvm_vcpu *vcpu)
{
	unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
	unsigned long start, end;
	unsigned char key, oldkey;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	if (!test_kvm_facility(vcpu->kvm, 8))
		m3 &= ~SSKE_MB;
	if (!test_kvm_facility(vcpu->kvm, 10))
		m3 &= ~(SSKE_MC | SSKE_MR);
	if (!test_kvm_facility(vcpu->kvm, 14))
		m3 &= ~SSKE_NQ;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	start = kvm_s390_logical_to_effective(vcpu, start);
	if (m3 & SSKE_MB) {
		/* start already designates an absolute address */
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
	} else {
		start = kvm_s390_real_to_abs(vcpu, start);
		end = start + PAGE_SIZE;
	}

	while (start != end) {
		unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));

		if (kvm_is_error_hva(addr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		down_read(&current->mm->mmap_sem);
		rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
						m3 & SSKE_NQ, m3 & SSKE_MR,
						m3 & SSKE_MC);
		up_read(&current->mm->mmap_sem);
		if (rc < 0)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		start += PAGE_SIZE;
	};

	if (m3 & (SSKE_MC | SSKE_MR)) {
		if (m3 & SSKE_MB) {
			/* skey in reg1 is unpredictable */
			kvm_s390_set_psw_cc(vcpu, 3);
		} else {
			kvm_s390_set_psw_cc(vcpu, rc);
			vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
			vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
		}
	}
	if (m3 & SSKE_MB) {
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
		else
			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
		end = kvm_s390_logical_to_effective(vcpu, end);
		vcpu->run->s.regs.gprs[reg2] |= end;
	}
316 317 318
	return 0;
}

319 320 321
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_ipte_interlock++;
322
	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
323 324
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
325
	kvm_s390_retry_instr(vcpu);
326 327 328 329
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
330 331 332 333 334 335 336 337 338 339
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
340
	addr = kvm_s390_logical_to_effective(vcpu, addr);
341
	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
342
		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
T
Thomas Huth 已提交
343 344
	addr = kvm_s390_real_to_abs(vcpu, addr);

345
	if (kvm_is_error_gpa(vcpu->kvm, addr))
T
Thomas Huth 已提交
346 347 348 349 350
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
351
	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
T
Thomas Huth 已提交
352 353 354 355 356 357
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

358
static int handle_tpi(struct kvm_vcpu *vcpu)
359
{
360
	struct kvm_s390_interrupt_info *inti;
H
Heiko Carstens 已提交
361 362
	unsigned long len;
	u32 tpi_data[3];
363
	int rc;
364
	u64 addr;
365
	ar_t ar;
366

367
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
368 369
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
370

371
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
372 373 374 375 376
	if (!inti) {
		kvm_s390_set_psw_cc(vcpu, 0);
		return 0;
	}

H
Heiko Carstens 已提交
377 378 379
	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
	tpi_data[1] = inti->io.io_int_parm;
	tpi_data[2] = inti->io.io_int_word;
380 381 382 383 384
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
H
Heiko Carstens 已提交
385
		len = sizeof(tpi_data) - 4;
386
		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
387 388 389 390
		if (rc) {
			rc = kvm_s390_inject_prog_cond(vcpu, rc);
			goto reinject_interrupt;
		}
391 392 393 394 395
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
H
Heiko Carstens 已提交
396
		len = sizeof(tpi_data);
397 398
		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
			/* failed writes to the low core are not recoverable */
H
Heiko Carstens 已提交
399
			rc = -EFAULT;
400 401
			goto reinject_interrupt;
		}
402
	}
403 404 405 406 407 408

	/* irq was successfully handed to the guest */
	kfree(inti);
	kvm_s390_set_psw_cc(vcpu, 1);
	return 0;
reinject_interrupt:
409 410 411 412 413
	/*
	 * If we encounter a problem storing the interruption code, the
	 * instruction is suppressed from the guest's view: reinject the
	 * interrupt.
	 */
414 415 416 417
	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
		kfree(inti);
		rc = -EFAULT;
	}
418
	/* don't set the cc, a pgm irq was injected or we drop to user space */
H
Heiko Carstens 已提交
419
	return rc ? -EFAULT : 0;
420 421
}

422 423
static int handle_tsch(struct kvm_vcpu *vcpu)
{
424 425
	struct kvm_s390_interrupt_info *inti = NULL;
	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
426

427 428 429 430
	/* a valid schid has at least one bit set */
	if (vcpu->run->s.regs.gprs[1])
		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
					   vcpu->run->s.regs.gprs[1]);
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

457 458 459
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

460 461 462 463 464 465 466 467 468 469 470 471 472
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
473
		 * Set condition code 3 to stop the guest from issuing channel
474 475
		 * I/O instructions.
		 */
476
		kvm_s390_set_psw_cc(vcpu, 3);
477 478 479 480
		return 0;
	}
}

481 482 483
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;
484
	unsigned int fac;
485 486

	vcpu->stat.instruction_stfl++;
487 488 489 490

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

491 492 493 494
	/*
	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
	 * into a u32 memory representation. They will remain bits 0-31.
	 */
495
	fac = *vcpu->kvm->arch.model.fac_list >> 32;
496
	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
497
			    &fac, sizeof(fac));
498
	if (rc)
499
		return rc;
500
	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
501
	trace_kvm_s390_handle_stfl(vcpu, fac);
502 503 504
	return 0;
}

505 506
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
507
#define PSW_ADDR_24 0x0000000000ffffffUL
508 509
#define PSW_ADDR_31 0x000000007fffffffUL

T
Thomas Huth 已提交
510 511
int is_valid_psw(psw_t *psw)
{
512 513 514 515 516 517 518 519 520 521
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
T
Thomas Huth 已提交
522 523
	if (psw->addr & 1)
		return 0;
524 525 526
	return 1;
}

527 528
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
529
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
530
	psw_compat_t new_psw;
531
	u64 addr;
532
	int rc;
533
	ar_t ar;
534

535
	if (gpsw->mask & PSW_MASK_PSTATE)
536 537
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

538
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
539 540
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
541

542
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
543 544
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
545 546
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
547 548 549 550
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
551
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
552 553 554 555 556 557
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
558
	u64 addr;
559
	int rc;
560
	ar_t ar;
561

562 563 564
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

565
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
566 567
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
568
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
569 570
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
571 572
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
573
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
574 575 576
	return 0;
}

577 578
static int handle_stidp(struct kvm_vcpu *vcpu)
{
579
	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
580
	u64 operand2;
581
	int rc;
582
	ar_t ar;
583 584

	vcpu->stat.instruction_stidp++;
585

586 587 588
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

589
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
590

591 592
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
593

594
	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
595 596
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
597

598
	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
599 600 601 602 603 604 605 606
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

607
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
608 609

	/* deal with other level 3 hypervisors */
610
	if (stsi(mem, 3, 2, 2))
611 612 613 614 615 616
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

617
	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
618 619 620 621 622 623 624 625 626 627 628
	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

629 630 631 632 633 634 635 636 637 638 639
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
				 u8 fc, u8 sel1, u16 sel2)
{
	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
	vcpu->run->s390_stsi.addr = addr;
	vcpu->run->s390_stsi.ar = ar;
	vcpu->run->s390_stsi.fc = fc;
	vcpu->run->s390_stsi.sel1 = sel1;
	vcpu->run->s390_stsi.sel2 = sel2;
}

640 641
static int handle_stsi(struct kvm_vcpu *vcpu)
{
642 643 644
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
645
	unsigned long mem = 0;
646
	u64 operand2;
647
	int rc = 0;
648
	ar_t ar;
649 650

	vcpu->stat.instruction_stsi++;
651
	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
652

653 654 655
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

656
	if (fc > 3) {
657
		kvm_s390_set_psw_cc(vcpu, 3);
658 659
		return 0;
	}
660

661 662
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
663 664
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

665
	if (fc == 0) {
666
		vcpu->run->s.regs.gprs[0] = 3 << 28;
667
		kvm_s390_set_psw_cc(vcpu, 0);
668
		return 0;
669 670
	}

671
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
672 673 674 675 676

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
677 678 679 680
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
681
			goto out_no_data;
682
		if (stsi((void *) mem, fc, sel1, sel2))
683
			goto out_no_data;
684 685 686
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
687
			goto out_no_data;
688 689
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
690
			goto out_no_data;
691 692 693 694
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

695
	rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
696 697 698
	if (rc) {
		rc = kvm_s390_inject_prog_cond(vcpu, rc);
		goto out;
699
	}
700 701 702 703
	if (vcpu->kvm->arch.user_stsi) {
		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
		rc = -EREMOTE;
	}
704
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
705
	free_page(mem);
706
	kvm_s390_set_psw_cc(vcpu, 0);
707
	vcpu->run->s.regs.gprs[0] = 0;
708
	return rc;
709
out_no_data:
710
	kvm_s390_set_psw_cc(vcpu, 3);
711
out:
712
	free_page(mem);
713
	return rc;
714 715
}

716
static const intercept_handler_t b2_handlers[256] = {
717
	[0x02] = handle_stidp,
718
	[0x04] = handle_set_clock,
719 720 721
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
722
	[0x21] = handle_ipte_interlock,
723 724 725
	[0x29] = handle_iske,
	[0x2a] = handle_rrbe,
	[0x2b] = handle_sske,
T
Thomas Huth 已提交
726
	[0x2c] = handle_test_block,
727 728 729 730 731 732 733 734 735 736 737 738 739
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
740
	[0x50] = handle_ipte_interlock,
741 742 743
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
744 745
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
746
	[0xb2] = handle_lpswe,
747 748
};

749
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
750 751 752
{
	intercept_handler_t handler;

753
	/*
754 755 756 757
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
758
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
759 760 761
	if (handler)
		return handler(vcpu);

762
	return -EOPNOTSUPP;
763
}
764

765 766 767 768
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

769
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
770 771

	/* This basically extracts the mask half of the psw. */
772
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
773 774
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
775
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
776
		vcpu->run->s.regs.gprs[reg2] |=
777
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
778 779 780 781
	}
	return 0;
}

782 783 784 785 786 787 788 789 790 791 792 793
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
794
	bool mr = false, mc = false, nq;
795 796
	int reg1, reg2;
	unsigned long start, end;
797
	unsigned char key;
798 799 800 801 802

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

803
	if (!test_kvm_facility(vcpu->kvm, 8))
804 805 806
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
807
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
808 809 810 811

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

812 813 814
	/* Only provide non-quiescing support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
	    !test_kvm_facility(vcpu->kvm, 14))
815 816
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

817 818 819 820 821 822 823 824 825
	/* Only provide conditional-SSKE support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
	    test_kvm_facility(vcpu->kvm, 10)) {
		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
	}

	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
826
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
827
	start = kvm_s390_logical_to_effective(vcpu, start);
T
Thomas Huth 已提交
828

829 830 831 832 833
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
	}

834 835
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
836 837
		/* only 4k frames specify a real address */
		start = kvm_s390_real_to_abs(vcpu, start);
838 839 840 841 842 843
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	case 0x00002000:
844 845 846 847 848
		/* only support 2G frame size if EDAT2 is available and we are
		   not in 24-bit addressing mode */
		if (!test_kvm_facility(vcpu->kvm, 78) ||
		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
849
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
850
		break;
851 852 853
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
854

855
	while (start != end) {
856
		unsigned long useraddr;
T
Thomas Huth 已提交
857 858

		/* Translate guest address to host address */
859
		useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
T
Thomas Huth 已提交
860
		if (kvm_is_error_hva(useraddr))
861 862 863 864 865 866 867 868
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
869 870 871 872
			int rc = __skey_check_enable(vcpu);

			if (rc)
				return rc;
873
			down_read(&current->mm->mmap_sem);
874 875
			rc = cond_set_guest_storage_key(current->mm, useraddr,
							key, NULL, nq, mr, mc);
876
			up_read(&current->mm->mmap_sem);
877
			if (rc < 0)
878 879 880 881 882
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
883 884 885 886 887 888 889 890 891
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
			vcpu->run->s.regs.gprs[reg2] = end;
		} else {
			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
			end = kvm_s390_logical_to_effective(vcpu, end);
			vcpu->run->s.regs.gprs[reg2] |= end;
		}
	}
892 893 894
	return 0;
}

895 896 897 898
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
899
	unsigned long *cbrlo;
900 901 902
	struct gmap *gmap;
	int i;

903
	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
904 905
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
906
	if (!vcpu->kvm->arch.use_cmma)
907 908 909 910 911 912 913 914
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

915 916
	/* Retry the ESSA instruction */
	kvm_s390_retry_instr(vcpu);
917 918 919
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
920 921
	for (i = 0; i < entries; ++i)
		__gmap_zap(gmap, cbrlo[i]);
922 923 924 925
	up_read(&gmap->mm->mmap_sem);
	return 0;
}

926
static const intercept_handler_t b9_handlers[256] = {
927
	[0x8a] = handle_ipte_interlock,
928
	[0x8d] = handle_epsw,
929 930
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
931
	[0xab] = handle_essa,
932
	[0xaf] = handle_pfmf,
933 934 935 936 937 938 939 940
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
941 942 943
	if (handler)
		return handler(vcpu);

944 945 946
	return -EOPNOTSUPP;
}

947 948 949 950
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
951 952
	int reg, rc, nr_regs;
	u32 ctl_array[16];
953
	u64 ga;
954
	ar_t ar;
955 956 957 958 959 960

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

961
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
962

963
	if (ga & 3)
964 965
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

966
	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
967
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
968

969
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
970
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
971 972
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
973
	reg = reg1;
974
	nr_regs = 0;
975 976
	do {
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
977
		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
978 979 980 981
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
982
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
983 984 985
	return 0;
}

986 987 988 989
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
990 991
	int reg, rc, nr_regs;
	u32 ctl_array[16];
992
	u64 ga;
993
	ar_t ar;
994 995 996 997 998 999

	vcpu->stat.instruction_stctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1000
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1001 1002 1003 1004

	if (ga & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1005
	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1006 1007 1008
	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);

	reg = reg1;
1009
	nr_regs = 0;
1010
	do {
1011
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1012 1013 1014 1015
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1016
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1017
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1018 1019
}

1020 1021 1022 1023
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1024 1025 1026
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1027
	ar_t ar;
1028 1029 1030 1031 1032 1033

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1034
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1035

1036
	if (ga & 7)
1037 1038
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1039
	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1040
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1041

1042
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1043
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1044 1045 1046 1047
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
	reg = reg1;
	nr_regs = 0;
1048
	do {
1049
		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1050 1051 1052 1053
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1054
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1055 1056 1057
	return 0;
}

1058 1059 1060 1061
static int handle_stctg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1062 1063 1064
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1065
	ar_t ar;
1066 1067 1068 1069 1070 1071

	vcpu->stat.instruction_stctg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1072
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1073 1074 1075 1076

	if (ga & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1077
	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1078 1079
	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);

1080 1081
	reg = reg1;
	nr_regs = 0;
1082
	do {
1083
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1084 1085 1086 1087
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1088
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1089
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1090 1091
}

1092
static const intercept_handler_t eb_handlers[256] = {
1093
	[0x2f] = handle_lctlg,
1094
	[0x25] = handle_stctg,
1095 1096
};

1097
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1098 1099 1100 1101 1102 1103 1104 1105 1106
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

1107 1108
static int handle_tprot(struct kvm_vcpu *vcpu)
{
1109
	u64 address1, address2;
1110 1111 1112
	unsigned long hva, gpa;
	int ret = 0, cc = 0;
	bool writable;
1113
	ar_t ar;
1114 1115 1116

	vcpu->stat.instruction_tprot++;

1117 1118 1119
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1120
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1121

1122 1123 1124 1125 1126 1127
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1128
		ipte_lock(vcpu);
1129
	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1130 1131 1132
	if (ret == PGM_PROTECTION) {
		/* Write protected? Try again with read-only... */
		cc = 1;
1133 1134
		ret = guest_translate_address(vcpu, address1, ar, &gpa,
					      GACC_FETCH);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	}
	if (ret) {
		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
			ret = kvm_s390_inject_program_int(vcpu, ret);
		} else if (ret > 0) {
			/* Translation not available */
			kvm_s390_set_psw_cc(vcpu, 3);
			ret = 0;
		}
		goto out_unlock;
	}
1146

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
	if (kvm_is_error_hva(hva)) {
		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	} else {
		if (!writable)
			cc = 1;		/* Write not permitted ==> read-only */
		kvm_s390_set_psw_cc(vcpu, cc);
		/* Note: CC2 only occurs for storage keys (not supported yet) */
	}
out_unlock:
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		ipte_unlock(vcpu);
	return ret;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

1170 1171 1172 1173 1174
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1175
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

1187
static const intercept_handler_t x01_handlers[256] = {
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}