priv.c 32.4 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18 19
#include <linux/mm_types.h>

20
#include <asm/asm-offsets.h>
21
#include <asm/facility.h>
22 23 24 25
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
26 27
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
28
#include <asm/gmap.h>
29
#include <asm/io.h>
30 31
#include <asm/ptrace.h>
#include <asm/compat.h>
32
#include <asm/sclp.h>
33 34
#include "gaccess.h"
#include "kvm-s390.h"
35
#include "trace.h"
36

F
Fan Zhang 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
static int handle_ri(struct kvm_vcpu *vcpu)
{
	if (test_kvm_facility(vcpu->kvm, 64)) {
		vcpu->arch.sie_block->ecb3 |= 0x01;
		kvm_s390_retry_instr(vcpu);
		return 0;
	} else
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}

int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
{
	if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
		return handle_ri(vcpu);
	else
		return -EOPNOTSUPP;
}

55 56 57
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
58
	int rc;
59
	u8 ar;
60
	u64 op2, val;
61 62 63 64

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

65
	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
66 67
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
68
	rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
69 70
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
71

72
	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
73
	kvm_s390_set_tod_clock(vcpu->kvm, val);
74 75 76 77 78

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

79 80 81
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
82 83
	u32 address;
	int rc;
84
	u8 ar;
85 86 87

	vcpu->stat.instruction_spx++;

88 89 90
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

91
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
92 93

	/* must be word boundary */
94 95
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
96 97

	/* get the value */
98
	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
99 100 101 102 103 104 105 106 107 108 109
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
110
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
111

112
	kvm_s390_set_prefix(vcpu, address);
113
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
114 115 116 117 118 119 120
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;
121
	int rc;
122
	u8 ar;
123 124

	vcpu->stat.instruction_stpx++;
125

126 127 128
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

129
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
130 131

	/* must be word boundary */
132 133
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
134

135
	address = kvm_s390_get_prefix(vcpu);
136 137

	/* get the value */
138
	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
139 140
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
141

142
	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
143
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
144 145 146 147 148
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
149 150 151
	u16 vcpu_id = vcpu->vcpu_id;
	u64 ga;
	int rc;
152
	u8 ar;
153 154

	vcpu->stat.instruction_stap++;
155

156 157 158
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

159
	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
160

161
	if (ga & 1)
162
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
163

164
	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
165 166
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
167

168
	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
169
	trace_kvm_s390_handle_stap(vcpu, ga);
170 171 172
	return 0;
}

173
static int __skey_check_enable(struct kvm_vcpu *vcpu)
174
{
175
	int rc = 0;
176 177

	trace_kvm_s390_skey_related_inst(vcpu);
178
	if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
179
		return rc;
180

181
	rc = s390_enable_skey();
182 183 184
	VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
	if (!rc)
		vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
185
	return rc;
186 187
}

188
static int try_handle_skey(struct kvm_vcpu *vcpu)
189
{
190
	int rc;
191

192 193
	vcpu->stat.instruction_storage_key++;
	rc = __skey_check_enable(vcpu);
194 195
	if (rc)
		return rc;
196 197 198 199 200 201
	if (sclp.has_skey) {
		/* with storage-key facility, SIE interprets it for us */
		kvm_s390_retry_instr(vcpu);
		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
		return -EAGAIN;
	}
202 203
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
204 205
	return 0;
}
206

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static int handle_iske(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	unsigned char key;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = get_guest_storage_key(current->mm, addr, &key);
	up_read(&current->mm->mmap_sem);
	if (rc)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
	vcpu->run->s.regs.gprs[reg1] |= key;
	return 0;
}

static int handle_rrbe(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = reset_guest_reference_bit(current->mm, addr);
	up_read(&current->mm->mmap_sem);
	if (rc < 0)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	kvm_s390_set_psw_cc(vcpu, rc);
	return 0;
}

#define SSKE_NQ 0x8
#define SSKE_MR 0x4
#define SSKE_MC 0x2
#define SSKE_MB 0x1
static int handle_sske(struct kvm_vcpu *vcpu)
{
	unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
	unsigned long start, end;
	unsigned char key, oldkey;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	if (!test_kvm_facility(vcpu->kvm, 8))
		m3 &= ~SSKE_MB;
	if (!test_kvm_facility(vcpu->kvm, 10))
		m3 &= ~(SSKE_MC | SSKE_MR);
	if (!test_kvm_facility(vcpu->kvm, 14))
		m3 &= ~SSKE_NQ;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	start = kvm_s390_logical_to_effective(vcpu, start);
	if (m3 & SSKE_MB) {
		/* start already designates an absolute address */
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
	} else {
		start = kvm_s390_real_to_abs(vcpu, start);
		end = start + PAGE_SIZE;
	}

	while (start != end) {
		unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));

		if (kvm_is_error_hva(addr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		down_read(&current->mm->mmap_sem);
		rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
						m3 & SSKE_NQ, m3 & SSKE_MR,
						m3 & SSKE_MC);
		up_read(&current->mm->mmap_sem);
		if (rc < 0)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		start += PAGE_SIZE;
316
	}
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

	if (m3 & (SSKE_MC | SSKE_MR)) {
		if (m3 & SSKE_MB) {
			/* skey in reg1 is unpredictable */
			kvm_s390_set_psw_cc(vcpu, 3);
		} else {
			kvm_s390_set_psw_cc(vcpu, rc);
			vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
			vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
		}
	}
	if (m3 & SSKE_MB) {
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
		else
			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
		end = kvm_s390_logical_to_effective(vcpu, end);
		vcpu->run->s.regs.gprs[reg2] |= end;
	}
336 337 338
	return 0;
}

339 340 341
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_ipte_interlock++;
342
	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
343 344
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
345
	kvm_s390_retry_instr(vcpu);
346 347 348 349
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
350 351 352 353 354 355 356 357 358 359
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
360
	addr = kvm_s390_logical_to_effective(vcpu, addr);
361
	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
362
		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
T
Thomas Huth 已提交
363 364
	addr = kvm_s390_real_to_abs(vcpu, addr);

365
	if (kvm_is_error_gpa(vcpu->kvm, addr))
T
Thomas Huth 已提交
366 367 368 369 370
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
371
	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
T
Thomas Huth 已提交
372 373 374 375 376 377
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

378
static int handle_tpi(struct kvm_vcpu *vcpu)
379
{
380
	struct kvm_s390_interrupt_info *inti;
H
Heiko Carstens 已提交
381 382
	unsigned long len;
	u32 tpi_data[3];
383
	int rc;
384
	u64 addr;
385
	u8 ar;
386

387
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
388 389
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
390

391
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
392 393 394 395 396
	if (!inti) {
		kvm_s390_set_psw_cc(vcpu, 0);
		return 0;
	}

H
Heiko Carstens 已提交
397 398 399
	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
	tpi_data[1] = inti->io.io_int_parm;
	tpi_data[2] = inti->io.io_int_word;
400 401 402 403 404
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
H
Heiko Carstens 已提交
405
		len = sizeof(tpi_data) - 4;
406
		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
407 408 409 410
		if (rc) {
			rc = kvm_s390_inject_prog_cond(vcpu, rc);
			goto reinject_interrupt;
		}
411 412 413 414 415
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
H
Heiko Carstens 已提交
416
		len = sizeof(tpi_data);
417 418
		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
			/* failed writes to the low core are not recoverable */
H
Heiko Carstens 已提交
419
			rc = -EFAULT;
420 421
			goto reinject_interrupt;
		}
422
	}
423 424 425 426 427 428

	/* irq was successfully handed to the guest */
	kfree(inti);
	kvm_s390_set_psw_cc(vcpu, 1);
	return 0;
reinject_interrupt:
429 430 431 432 433
	/*
	 * If we encounter a problem storing the interruption code, the
	 * instruction is suppressed from the guest's view: reinject the
	 * interrupt.
	 */
434 435 436 437
	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
		kfree(inti);
		rc = -EFAULT;
	}
438
	/* don't set the cc, a pgm irq was injected or we drop to user space */
H
Heiko Carstens 已提交
439
	return rc ? -EFAULT : 0;
440 441
}

442 443
static int handle_tsch(struct kvm_vcpu *vcpu)
{
444 445
	struct kvm_s390_interrupt_info *inti = NULL;
	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
446

447 448 449 450
	/* a valid schid has at least one bit set */
	if (vcpu->run->s.regs.gprs[1])
		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
					   vcpu->run->s.regs.gprs[1]);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

477 478 479
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

480 481 482 483 484 485 486 487 488 489 490 491 492
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
493
		 * Set condition code 3 to stop the guest from issuing channel
494 495
		 * I/O instructions.
		 */
496
		kvm_s390_set_psw_cc(vcpu, 3);
497 498 499 500
		return 0;
	}
}

501 502 503
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;
504
	unsigned int fac;
505 506

	vcpu->stat.instruction_stfl++;
507 508 509 510

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

511 512 513 514
	/*
	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
	 * into a u32 memory representation. They will remain bits 0-31.
	 */
515
	fac = *vcpu->kvm->arch.model.fac_list >> 32;
516
	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
517
			    &fac, sizeof(fac));
518
	if (rc)
519
		return rc;
520
	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
521
	trace_kvm_s390_handle_stfl(vcpu, fac);
522 523 524
	return 0;
}

525 526
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
527
#define PSW_ADDR_24 0x0000000000ffffffUL
528 529
#define PSW_ADDR_31 0x000000007fffffffUL

T
Thomas Huth 已提交
530 531
int is_valid_psw(psw_t *psw)
{
532 533 534 535 536 537 538 539 540 541
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
T
Thomas Huth 已提交
542 543
	if (psw->addr & 1)
		return 0;
544 545 546
	return 1;
}

547 548
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
549
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
550
	psw_compat_t new_psw;
551
	u64 addr;
552
	int rc;
553
	u8 ar;
554

555
	if (gpsw->mask & PSW_MASK_PSTATE)
556 557
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

558
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
559 560
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
561

562
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
563 564
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
565 566
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
567 568 569 570
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
571
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
572 573 574 575 576 577
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
578
	u64 addr;
579
	int rc;
580
	u8 ar;
581

582 583 584
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

585
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
586 587
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
588
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
589 590
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
591 592
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
593
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
594 595 596
	return 0;
}

597 598
static int handle_stidp(struct kvm_vcpu *vcpu)
{
599
	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
600
	u64 operand2;
601
	int rc;
602
	u8 ar;
603 604

	vcpu->stat.instruction_stidp++;
605

606 607 608
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

609
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
610

611 612
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
613

614
	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
615 616
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
617

618
	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
619 620 621 622 623 624 625 626
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

627
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
628 629

	/* deal with other level 3 hypervisors */
630
	if (stsi(mem, 3, 2, 2))
631 632 633 634 635 636
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

637
	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
638 639 640 641 642 643 644 645 646 647 648
	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

649
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
650 651 652 653 654 655 656 657 658 659
				 u8 fc, u8 sel1, u16 sel2)
{
	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
	vcpu->run->s390_stsi.addr = addr;
	vcpu->run->s390_stsi.ar = ar;
	vcpu->run->s390_stsi.fc = fc;
	vcpu->run->s390_stsi.sel1 = sel1;
	vcpu->run->s390_stsi.sel2 = sel2;
}

660 661
static int handle_stsi(struct kvm_vcpu *vcpu)
{
662 663 664
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
665
	unsigned long mem = 0;
666
	u64 operand2;
667
	int rc = 0;
668
	u8 ar;
669 670

	vcpu->stat.instruction_stsi++;
671
	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
672

673 674 675
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

676
	if (fc > 3) {
677
		kvm_s390_set_psw_cc(vcpu, 3);
678 679
		return 0;
	}
680

681 682
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
683 684
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

685
	if (fc == 0) {
686
		vcpu->run->s.regs.gprs[0] = 3 << 28;
687
		kvm_s390_set_psw_cc(vcpu, 0);
688
		return 0;
689 690
	}

691
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
692 693 694 695 696

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
697 698 699 700
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
701
			goto out_no_data;
702
		if (stsi((void *) mem, fc, sel1, sel2))
703
			goto out_no_data;
704 705 706
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
707
			goto out_no_data;
708 709
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
710
			goto out_no_data;
711 712 713 714
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

715
	rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
716 717 718
	if (rc) {
		rc = kvm_s390_inject_prog_cond(vcpu, rc);
		goto out;
719
	}
720 721 722 723
	if (vcpu->kvm->arch.user_stsi) {
		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
		rc = -EREMOTE;
	}
724
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
725
	free_page(mem);
726
	kvm_s390_set_psw_cc(vcpu, 0);
727
	vcpu->run->s.regs.gprs[0] = 0;
728
	return rc;
729
out_no_data:
730
	kvm_s390_set_psw_cc(vcpu, 3);
731
out:
732
	free_page(mem);
733
	return rc;
734 735
}

736
static const intercept_handler_t b2_handlers[256] = {
737
	[0x02] = handle_stidp,
738
	[0x04] = handle_set_clock,
739 740 741
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
742
	[0x14] = kvm_s390_handle_vsie,
743
	[0x21] = handle_ipte_interlock,
744 745 746
	[0x29] = handle_iske,
	[0x2a] = handle_rrbe,
	[0x2b] = handle_sske,
T
Thomas Huth 已提交
747
	[0x2c] = handle_test_block,
748 749 750 751 752 753 754 755 756 757 758 759 760
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
761
	[0x50] = handle_ipte_interlock,
762 763 764
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
765 766
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
767
	[0xb2] = handle_lpswe,
768 769
};

770
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
771 772 773
{
	intercept_handler_t handler;

774
	/*
775 776 777 778
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
779
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
780 781 782
	if (handler)
		return handler(vcpu);

783
	return -EOPNOTSUPP;
784
}
785

786 787 788 789
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

790
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
791 792

	/* This basically extracts the mask half of the psw. */
793
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
794 795
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
796
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
797
		vcpu->run->s.regs.gprs[reg2] |=
798
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
799 800 801 802
	}
	return 0;
}

803 804 805 806 807 808 809 810 811 812 813 814
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
815
	bool mr = false, mc = false, nq;
816 817
	int reg1, reg2;
	unsigned long start, end;
818
	unsigned char key;
819 820 821 822 823

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

824
	if (!test_kvm_facility(vcpu->kvm, 8))
825 826 827
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
828
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
829 830 831 832

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

833 834 835
	/* Only provide non-quiescing support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
	    !test_kvm_facility(vcpu->kvm, 14))
836 837
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

838 839 840 841 842 843 844 845 846
	/* Only provide conditional-SSKE support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
	    test_kvm_facility(vcpu->kvm, 10)) {
		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
	}

	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
847
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
848
	start = kvm_s390_logical_to_effective(vcpu, start);
T
Thomas Huth 已提交
849

850 851 852 853 854
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
	}

855 856
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
857 858
		/* only 4k frames specify a real address */
		start = kvm_s390_real_to_abs(vcpu, start);
859 860 861 862 863 864
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	case 0x00002000:
865 866 867 868 869
		/* only support 2G frame size if EDAT2 is available and we are
		   not in 24-bit addressing mode */
		if (!test_kvm_facility(vcpu->kvm, 78) ||
		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
871
		break;
872 873 874
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
875

876
	while (start != end) {
877
		unsigned long useraddr;
T
Thomas Huth 已提交
878 879

		/* Translate guest address to host address */
880
		useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
T
Thomas Huth 已提交
881
		if (kvm_is_error_hva(useraddr))
882 883 884 885 886 887 888 889
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
890 891 892 893
			int rc = __skey_check_enable(vcpu);

			if (rc)
				return rc;
894
			down_read(&current->mm->mmap_sem);
895 896
			rc = cond_set_guest_storage_key(current->mm, useraddr,
							key, NULL, nq, mr, mc);
897
			up_read(&current->mm->mmap_sem);
898
			if (rc < 0)
899 900 901 902 903
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
904 905 906 907 908 909 910 911 912
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
			vcpu->run->s.regs.gprs[reg2] = end;
		} else {
			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
			end = kvm_s390_logical_to_effective(vcpu, end);
			vcpu->run->s.regs.gprs[reg2] |= end;
		}
	}
913 914 915
	return 0;
}

916 917 918 919
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
920
	unsigned long *cbrlo;
921 922 923
	struct gmap *gmap;
	int i;

924
	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
925 926
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
927
	if (!vcpu->kvm->arch.use_cmma)
928 929 930 931 932 933 934 935
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

936 937
	/* Retry the ESSA instruction */
	kvm_s390_retry_instr(vcpu);
938 939 940
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
941 942
	for (i = 0; i < entries; ++i)
		__gmap_zap(gmap, cbrlo[i]);
943 944 945 946
	up_read(&gmap->mm->mmap_sem);
	return 0;
}

947
static const intercept_handler_t b9_handlers[256] = {
948
	[0x8a] = handle_ipte_interlock,
949
	[0x8d] = handle_epsw,
950 951
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
952
	[0xab] = handle_essa,
953
	[0xaf] = handle_pfmf,
954 955 956 957 958 959 960 961
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
962 963 964
	if (handler)
		return handler(vcpu);

965 966 967
	return -EOPNOTSUPP;
}

968 969 970 971
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
972 973
	int reg, rc, nr_regs;
	u32 ctl_array[16];
974
	u64 ga;
975
	u8 ar;
976 977 978 979 980 981

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

982
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
983

984
	if (ga & 3)
985 986
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

987
	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
988
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
989

990
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
991
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
992 993
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
994
	reg = reg1;
995
	nr_regs = 0;
996 997
	do {
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
998
		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
999 1000 1001 1002
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1003
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1004 1005 1006
	return 0;
}

1007 1008 1009 1010
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1011 1012
	int reg, rc, nr_regs;
	u32 ctl_array[16];
1013
	u64 ga;
1014
	u8 ar;
1015 1016 1017 1018 1019 1020

	vcpu->stat.instruction_stctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1021
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1022 1023 1024 1025

	if (ga & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1026
	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1027 1028 1029
	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);

	reg = reg1;
1030
	nr_regs = 0;
1031
	do {
1032
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1033 1034 1035 1036
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1037
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1038
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1039 1040
}

1041 1042 1043 1044
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1045 1046 1047
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1048
	u8 ar;
1049 1050 1051 1052 1053 1054

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1055
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1056

1057
	if (ga & 7)
1058 1059
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1060
	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1061
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1062

1063
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1064
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1065 1066 1067 1068
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
	reg = reg1;
	nr_regs = 0;
1069
	do {
1070
		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1071 1072 1073 1074
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1075
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1076 1077 1078
	return 0;
}

1079 1080 1081 1082
static int handle_stctg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1083 1084 1085
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1086
	u8 ar;
1087 1088 1089 1090 1091 1092

	vcpu->stat.instruction_stctg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1093
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1094 1095 1096 1097

	if (ga & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1098
	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1099 1100
	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);

1101 1102
	reg = reg1;
	nr_regs = 0;
1103
	do {
1104
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1105 1106 1107 1108
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1109
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1110
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1111 1112
}

1113
static const intercept_handler_t eb_handlers[256] = {
1114
	[0x2f] = handle_lctlg,
1115
	[0x25] = handle_stctg,
F
Fan Zhang 已提交
1116 1117 1118
	[0x60] = handle_ri,
	[0x61] = handle_ri,
	[0x62] = handle_ri,
1119 1120
};

1121
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1122 1123 1124 1125 1126 1127 1128 1129 1130
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

1131 1132
static int handle_tprot(struct kvm_vcpu *vcpu)
{
1133
	u64 address1, address2;
1134 1135 1136
	unsigned long hva, gpa;
	int ret = 0, cc = 0;
	bool writable;
1137
	u8 ar;
1138 1139 1140

	vcpu->stat.instruction_tprot++;

1141 1142 1143
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1144
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1145

1146 1147 1148 1149 1150 1151
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1152
		ipte_lock(vcpu);
1153
	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1154 1155 1156
	if (ret == PGM_PROTECTION) {
		/* Write protected? Try again with read-only... */
		cc = 1;
1157 1158
		ret = guest_translate_address(vcpu, address1, ar, &gpa,
					      GACC_FETCH);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
	}
	if (ret) {
		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
			ret = kvm_s390_inject_program_int(vcpu, ret);
		} else if (ret > 0) {
			/* Translation not available */
			kvm_s390_set_psw_cc(vcpu, 3);
			ret = 0;
		}
		goto out_unlock;
	}
1170

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
	if (kvm_is_error_hva(hva)) {
		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	} else {
		if (!writable)
			cc = 1;		/* Write not permitted ==> read-only */
		kvm_s390_set_psw_cc(vcpu, cc);
		/* Note: CC2 only occurs for storage keys (not supported yet) */
	}
out_unlock:
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		ipte_unlock(vcpu);
	return ret;
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

1194 1195 1196 1197 1198
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1199
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

1211 1212 1213 1214 1215 1216 1217
static int handle_ptff(struct kvm_vcpu *vcpu)
{
	/* we don't emulate any control instructions yet */
	kvm_s390_set_psw_cc(vcpu, 3);
	return 0;
}

1218
static const intercept_handler_t x01_handlers[256] = {
1219
	[0x04] = handle_ptff,
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}