priv.c 33.4 KB
Newer Older
1
/*
2
 * handling privileged instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2013
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16
#include <linux/errno.h>
17
#include <linux/compat.h>
18 19
#include <linux/mm_types.h>

20
#include <asm/asm-offsets.h>
21
#include <asm/facility.h>
22 23 24 25
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
26 27
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
28
#include <asm/gmap.h>
29
#include <asm/io.h>
30 31
#include <asm/ptrace.h>
#include <asm/compat.h>
32
#include <asm/sclp.h>
33 34
#include "gaccess.h"
#include "kvm-s390.h"
35
#include "trace.h"
36

F
Fan Zhang 已提交
37 38 39
static int handle_ri(struct kvm_vcpu *vcpu)
{
	if (test_kvm_facility(vcpu->kvm, 64)) {
40
		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
41
		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
F
Fan Zhang 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55
		kvm_s390_retry_instr(vcpu);
		return 0;
	} else
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}

int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
{
	if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
		return handle_ri(vcpu);
	else
		return -EOPNOTSUPP;
}

F
Fan Zhang 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static int handle_gs(struct kvm_vcpu *vcpu)
{
	if (test_kvm_facility(vcpu->kvm, 133)) {
		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
		preempt_disable();
		__ctl_set_bit(2, 4);
		current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
		restore_gs_cb(current->thread.gs_cb);
		preempt_enable();
		vcpu->arch.sie_block->ecb |= ECB_GS;
		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
		vcpu->arch.gs_enabled = 1;
		kvm_s390_retry_instr(vcpu);
		return 0;
	} else
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}

int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
{
	int code = vcpu->arch.sie_block->ipb & 0xff;

	if (code == 0x49 || code == 0x4d)
		return handle_gs(vcpu);
	else
		return -EOPNOTSUPP;
}
83 84 85
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
86
	int rc;
87
	u8 ar;
88
	u64 op2, val;
89 90 91 92

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

93
	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
94 95
	if (op2 & 7)	/* Operand must be on a doubleword boundary */
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
96
	rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
97 98
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
99

100
	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
101
	kvm_s390_set_tod_clock(vcpu->kvm, val);
102 103 104 105 106

	kvm_s390_set_psw_cc(vcpu, 0);
	return 0;
}

107 108 109
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
110 111
	u32 address;
	int rc;
112
	u8 ar;
113 114 115

	vcpu->stat.instruction_spx++;

116 117 118
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

119
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
120 121

	/* must be word boundary */
122 123
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
124 125

	/* get the value */
126
	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
127 128 129 130 131 132 133 134 135 136 137
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);

	address &= 0x7fffe000u;

	/*
	 * Make sure the new value is valid memory. We only need to check the
	 * first page, since address is 8k aligned and memory pieces are always
	 * at least 1MB aligned and have at least a size of 1MB.
	 */
	if (kvm_is_error_gpa(vcpu->kvm, address))
138
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
139

140
	kvm_s390_set_prefix(vcpu, address);
141
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
142 143 144 145 146 147 148
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	u64 operand2;
	u32 address;
149
	int rc;
150
	u8 ar;
151 152

	vcpu->stat.instruction_stpx++;
153

154 155 156
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

157
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
158 159

	/* must be word boundary */
160 161
	if (operand2 & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
162

163
	address = kvm_s390_get_prefix(vcpu);
164 165

	/* get the value */
166
	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
167 168
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
169

170
	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
171
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
172 173 174 175 176
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
177 178 179
	u16 vcpu_id = vcpu->vcpu_id;
	u64 ga;
	int rc;
180
	u8 ar;
181 182

	vcpu->stat.instruction_stap++;
183

184 185 186
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

187
	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
188

189
	if (ga & 1)
190
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
191

192
	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
193 194
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
195

196
	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
197
	trace_kvm_s390_handle_stap(vcpu, ga);
198 199 200
	return 0;
}

201
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
202
{
203
	int rc = 0;
204
	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
205 206

	trace_kvm_s390_skey_related_inst(vcpu);
207 208
	if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
	    !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
209
		return rc;
210

211
	rc = s390_enable_skey();
212
	VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
213 214 215 216 217 218 219
	if (!rc) {
		if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
			atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
		else
			sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
					     ICTL_RRBE);
	}
220
	return rc;
221 222
}

223
static int try_handle_skey(struct kvm_vcpu *vcpu)
224
{
225
	int rc;
226

227
	vcpu->stat.instruction_storage_key++;
228
	rc = kvm_s390_skey_check_enable(vcpu);
229 230
	if (rc)
		return rc;
231 232 233 234 235 236
	if (sclp.has_skey) {
		/* with storage-key facility, SIE interprets it for us */
		kvm_s390_retry_instr(vcpu);
		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
		return -EAGAIN;
	}
237 238
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
239 240
	return 0;
}
241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static int handle_iske(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	unsigned char key;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = get_guest_storage_key(current->mm, addr, &key);
	up_read(&current->mm->mmap_sem);
	if (rc)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
	vcpu->run->s.regs.gprs[reg1] |= key;
	return 0;
}

static int handle_rrbe(struct kvm_vcpu *vcpu)
{
	unsigned long addr;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_logical_to_effective(vcpu, addr);
	addr = kvm_s390_real_to_abs(vcpu, addr);
	addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(addr))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	down_read(&current->mm->mmap_sem);
	rc = reset_guest_reference_bit(current->mm, addr);
	up_read(&current->mm->mmap_sem);
	if (rc < 0)
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

	kvm_s390_set_psw_cc(vcpu, rc);
	return 0;
}

#define SSKE_NQ 0x8
#define SSKE_MR 0x4
#define SSKE_MC 0x2
#define SSKE_MB 0x1
static int handle_sske(struct kvm_vcpu *vcpu)
{
	unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
	unsigned long start, end;
	unsigned char key, oldkey;
	int reg1, reg2;
	int rc;

	rc = try_handle_skey(vcpu);
	if (rc)
		return rc != -EAGAIN ? rc : 0;

	if (!test_kvm_facility(vcpu->kvm, 8))
		m3 &= ~SSKE_MB;
	if (!test_kvm_facility(vcpu->kvm, 10))
		m3 &= ~(SSKE_MC | SSKE_MR);
	if (!test_kvm_facility(vcpu->kvm, 14))
		m3 &= ~SSKE_NQ;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	start = kvm_s390_logical_to_effective(vcpu, start);
	if (m3 & SSKE_MB) {
		/* start already designates an absolute address */
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
	} else {
		start = kvm_s390_real_to_abs(vcpu, start);
		end = start + PAGE_SIZE;
	}

	while (start != end) {
		unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));

		if (kvm_is_error_hva(addr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		down_read(&current->mm->mmap_sem);
		rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
						m3 & SSKE_NQ, m3 & SSKE_MR,
						m3 & SSKE_MC);
		up_read(&current->mm->mmap_sem);
		if (rc < 0)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		start += PAGE_SIZE;
351
	}
352 353 354 355 356 357 358 359 360 361 362 363

	if (m3 & (SSKE_MC | SSKE_MR)) {
		if (m3 & SSKE_MB) {
			/* skey in reg1 is unpredictable */
			kvm_s390_set_psw_cc(vcpu, 3);
		} else {
			kvm_s390_set_psw_cc(vcpu, rc);
			vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
			vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
		}
	}
	if (m3 & SSKE_MB) {
H
Heiko Carstens 已提交
364
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
365 366 367 368 369 370
			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
		else
			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
		end = kvm_s390_logical_to_effective(vcpu, end);
		vcpu->run->s.regs.gprs[reg2] |= end;
	}
371 372 373
	return 0;
}

374 375 376
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_ipte_interlock++;
377
	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
378 379
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
380
	kvm_s390_retry_instr(vcpu);
381 382 383 384
	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
	return 0;
}

T
Thomas Huth 已提交
385 386 387 388 389 390 391 392 393 394
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
395
	addr = kvm_s390_logical_to_effective(vcpu, addr);
396
	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
397
		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
T
Thomas Huth 已提交
398 399
	addr = kvm_s390_real_to_abs(vcpu, addr);

400
	if (kvm_is_error_gpa(vcpu->kvm, addr))
T
Thomas Huth 已提交
401 402 403 404 405
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
406
	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
T
Thomas Huth 已提交
407 408 409 410 411 412
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}

413
static int handle_tpi(struct kvm_vcpu *vcpu)
414
{
415
	struct kvm_s390_interrupt_info *inti;
H
Heiko Carstens 已提交
416 417
	unsigned long len;
	u32 tpi_data[3];
418
	int rc;
419
	u64 addr;
420
	u8 ar;
421

422
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
423 424
	if (addr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
425

426
	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
427 428 429 430 431
	if (!inti) {
		kvm_s390_set_psw_cc(vcpu, 0);
		return 0;
	}

H
Heiko Carstens 已提交
432 433 434
	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
	tpi_data[1] = inti->io.io_int_parm;
	tpi_data[2] = inti->io.io_int_word;
435 436 437 438 439
	if (addr) {
		/*
		 * Store the two-word I/O interruption code into the
		 * provided area.
		 */
H
Heiko Carstens 已提交
440
		len = sizeof(tpi_data) - 4;
441
		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
442 443 444 445
		if (rc) {
			rc = kvm_s390_inject_prog_cond(vcpu, rc);
			goto reinject_interrupt;
		}
446 447 448 449 450
	} else {
		/*
		 * Store the three-word I/O interruption code into
		 * the appropriate lowcore area.
		 */
H
Heiko Carstens 已提交
451
		len = sizeof(tpi_data);
452 453
		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
			/* failed writes to the low core are not recoverable */
H
Heiko Carstens 已提交
454
			rc = -EFAULT;
455 456
			goto reinject_interrupt;
		}
457
	}
458 459 460 461 462 463

	/* irq was successfully handed to the guest */
	kfree(inti);
	kvm_s390_set_psw_cc(vcpu, 1);
	return 0;
reinject_interrupt:
464 465 466 467 468
	/*
	 * If we encounter a problem storing the interruption code, the
	 * instruction is suppressed from the guest's view: reinject the
	 * interrupt.
	 */
469 470 471 472
	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
		kfree(inti);
		rc = -EFAULT;
	}
473
	/* don't set the cc, a pgm irq was injected or we drop to user space */
H
Heiko Carstens 已提交
474
	return rc ? -EFAULT : 0;
475 476
}

477 478
static int handle_tsch(struct kvm_vcpu *vcpu)
{
479 480
	struct kvm_s390_interrupt_info *inti = NULL;
	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
481

482 483 484 485
	/* a valid schid has at least one bit set */
	if (vcpu->run->s.regs.gprs[1])
		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
					   vcpu->run->s.regs.gprs[1]);
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511

	/*
	 * Prepare exit to userspace.
	 * We indicate whether we dequeued a pending I/O interrupt
	 * so that userspace can re-inject it if the instruction gets
	 * a program check. While this may re-order the pending I/O
	 * interrupts, this is no problem since the priority is kept
	 * intact.
	 */
	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
	vcpu->run->s390_tsch.dequeued = !!inti;
	if (inti) {
		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
	}
	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
	kfree(inti);
	return -EREMOTE;
}

static int handle_io_inst(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");

512 513 514
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

515 516 517 518 519 520 521 522 523 524 525 526 527
	if (vcpu->kvm->arch.css_support) {
		/*
		 * Most I/O instructions will be handled by userspace.
		 * Exceptions are tpi and the interrupt portion of tsch.
		 */
		if (vcpu->arch.sie_block->ipa == 0xb236)
			return handle_tpi(vcpu);
		if (vcpu->arch.sie_block->ipa == 0xb235)
			return handle_tsch(vcpu);
		/* Handle in userspace. */
		return -EOPNOTSUPP;
	} else {
		/*
528
		 * Set condition code 3 to stop the guest from issuing channel
529 530
		 * I/O instructions.
		 */
531
		kvm_s390_set_psw_cc(vcpu, 3);
532 533 534 535
		return 0;
	}
}

536 537 538
static int handle_stfl(struct kvm_vcpu *vcpu)
{
	int rc;
539
	unsigned int fac;
540 541

	vcpu->stat.instruction_stfl++;
542 543 544 545

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

546 547 548 549
	/*
	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
	 * into a u32 memory representation. They will remain bits 0-31.
	 */
550
	fac = *vcpu->kvm->arch.model.fac_list >> 32;
551
	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
552
			    &fac, sizeof(fac));
553
	if (rc)
554
		return rc;
555
	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
556
	trace_kvm_s390_handle_stfl(vcpu, fac);
557 558 559
	return 0;
}

560 561
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
562
#define PSW_ADDR_24 0x0000000000ffffffUL
563 564
#define PSW_ADDR_31 0x000000007fffffffUL

T
Thomas Huth 已提交
565 566
int is_valid_psw(psw_t *psw)
{
567 568 569 570 571 572 573 574 575 576
	if (psw->mask & PSW_MASK_UNASSIGNED)
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
		if (psw->addr & ~PSW_ADDR_31)
			return 0;
	}
	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
		return 0;
	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
		return 0;
T
Thomas Huth 已提交
577 578
	if (psw->addr & 1)
		return 0;
579 580 581
	return 1;
}

582 583
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
584
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
585
	psw_compat_t new_psw;
586
	u64 addr;
587
	int rc;
588
	u8 ar;
589

590
	if (gpsw->mask & PSW_MASK_PSTATE)
591 592
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

593
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
594 595
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
596

597
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
598 599
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
600 601
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
602 603 604 605
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
606
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
607 608 609 610 611 612
	return 0;
}

static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
613
	u64 addr;
614
	int rc;
615
	u8 ar;
616

617 618 619
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

620
	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
621 622
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
623
	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
624 625
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
626 627
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
628
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
629 630 631
	return 0;
}

632 633
static int handle_stidp(struct kvm_vcpu *vcpu)
{
634
	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
635
	u64 operand2;
636
	int rc;
637
	u8 ar;
638 639

	vcpu->stat.instruction_stidp++;
640

641 642 643
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

644
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
645

646 647
	if (operand2 & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
648

649
	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
650 651
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
652

653
	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
654 655 656 657 658 659 660 661
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
	int cpus = 0;
	int n;

662
	cpus = atomic_read(&vcpu->kvm->online_vcpus);
663 664

	/* deal with other level 3 hypervisors */
665
	if (stsi(mem, 3, 2, 2))
666 667 668 669 670 671
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

672
	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
673 674 675 676 677 678 679 680 681 682 683
	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

684
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
685 686 687 688 689 690 691 692 693 694
				 u8 fc, u8 sel1, u16 sel2)
{
	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
	vcpu->run->s390_stsi.addr = addr;
	vcpu->run->s390_stsi.ar = ar;
	vcpu->run->s390_stsi.fc = fc;
	vcpu->run->s390_stsi.sel1 = sel1;
	vcpu->run->s390_stsi.sel2 = sel2;
}

695 696
static int handle_stsi(struct kvm_vcpu *vcpu)
{
697 698 699
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
700
	unsigned long mem = 0;
701
	u64 operand2;
702
	int rc = 0;
703
	u8 ar;
704 705

	vcpu->stat.instruction_stsi++;
706
	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
707

708 709 710
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

711
	if (fc > 3) {
712
		kvm_s390_set_psw_cc(vcpu, 3);
713 714
		return 0;
	}
715

716 717
	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
718 719
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

720
	if (fc == 0) {
721
		vcpu->run->s.regs.gprs[0] = 3 << 28;
722
		kvm_s390_set_psw_cc(vcpu, 0);
723
		return 0;
724 725
	}

726
	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
727 728 729 730 731

	if (operand2 & 0xfff)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
732 733 734 735
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
736
			goto out_no_data;
737
		if (stsi((void *) mem, fc, sel1, sel2))
738
			goto out_no_data;
739 740 741
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
742
			goto out_no_data;
743 744
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
745
			goto out_no_data;
746 747 748 749
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	}

750
	rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
751 752 753
	if (rc) {
		rc = kvm_s390_inject_prog_cond(vcpu, rc);
		goto out;
754
	}
755 756 757 758
	if (vcpu->kvm->arch.user_stsi) {
		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
		rc = -EREMOTE;
	}
759
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
760
	free_page(mem);
761
	kvm_s390_set_psw_cc(vcpu, 0);
762
	vcpu->run->s.regs.gprs[0] = 0;
763
	return rc;
764
out_no_data:
765
	kvm_s390_set_psw_cc(vcpu, 3);
766
out:
767
	free_page(mem);
768
	return rc;
769 770
}

771
static const intercept_handler_t b2_handlers[256] = {
772
	[0x02] = handle_stidp,
773
	[0x04] = handle_set_clock,
774 775 776
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
777
	[0x14] = kvm_s390_handle_vsie,
778
	[0x21] = handle_ipte_interlock,
779 780 781
	[0x29] = handle_iske,
	[0x2a] = handle_rrbe,
	[0x2b] = handle_sske,
T
Thomas Huth 已提交
782
	[0x2c] = handle_test_block,
783 784 785 786 787 788 789 790 791 792 793 794 795
	[0x30] = handle_io_inst,
	[0x31] = handle_io_inst,
	[0x32] = handle_io_inst,
	[0x33] = handle_io_inst,
	[0x34] = handle_io_inst,
	[0x35] = handle_io_inst,
	[0x36] = handle_io_inst,
	[0x37] = handle_io_inst,
	[0x38] = handle_io_inst,
	[0x39] = handle_io_inst,
	[0x3a] = handle_io_inst,
	[0x3b] = handle_io_inst,
	[0x3c] = handle_io_inst,
796
	[0x50] = handle_ipte_interlock,
797
	[0x56] = handle_sthyi,
798 799 800
	[0x5f] = handle_io_inst,
	[0x74] = handle_io_inst,
	[0x76] = handle_io_inst,
801 802
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
803
	[0xb2] = handle_lpswe,
804 805
};

806
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
807 808 809
{
	intercept_handler_t handler;

810
	/*
811 812 813 814
	 * A lot of B2 instructions are priviledged. Here we check for
	 * the privileged ones, that we can handle in the kernel.
	 * Anything else goes to userspace.
	 */
815
	handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
816 817 818
	if (handler)
		return handler(vcpu);

819
	return -EOPNOTSUPP;
820
}
821

822 823 824 825
static int handle_epsw(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;

826
	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
827 828

	/* This basically extracts the mask half of the psw. */
829
	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
830 831
	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
	if (reg2) {
832
		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
833
		vcpu->run->s.regs.gprs[reg2] |=
834
			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
835 836 837 838
	}
	return 0;
}

839 840 841 842 843 844 845 846 847 848 849 850
#define PFMF_RESERVED   0xfffc0101UL
#define PFMF_SK         0x00020000UL
#define PFMF_CF         0x00010000UL
#define PFMF_UI         0x00008000UL
#define PFMF_FSC        0x00007000UL
#define PFMF_NQ         0x00000800UL
#define PFMF_MR         0x00000400UL
#define PFMF_MC         0x00000200UL
#define PFMF_KEY        0x000000feUL

static int handle_pfmf(struct kvm_vcpu *vcpu)
{
851
	bool mr = false, mc = false, nq;
852 853
	int reg1, reg2;
	unsigned long start, end;
854
	unsigned char key;
855 856 857 858 859

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

860
	if (!test_kvm_facility(vcpu->kvm, 8))
861 862 863
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
864
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
865 866 867 868

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

869 870 871
	/* Only provide non-quiescing support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
	    !test_kvm_facility(vcpu->kvm, 14))
872 873
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

874 875 876 877 878 879 880 881 882
	/* Only provide conditional-SSKE support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
	    test_kvm_facility(vcpu->kvm, 10)) {
		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
	}

	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
883
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
884
	start = kvm_s390_logical_to_effective(vcpu, start);
T
Thomas Huth 已提交
885

886 887 888 889 890
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
	}

891 892
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
893 894
		/* only 4k frames specify a real address */
		start = kvm_s390_real_to_abs(vcpu, start);
895 896 897 898 899 900
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	case 0x00002000:
901 902 903
		/* only support 2G frame size if EDAT2 is available and we are
		   not in 24-bit addressing mode */
		if (!test_kvm_facility(vcpu->kvm, 78) ||
H
Heiko Carstens 已提交
904
		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
905
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
906
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
907
		break;
908 909 910
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
911

912
	while (start != end) {
913
		unsigned long useraddr;
T
Thomas Huth 已提交
914 915

		/* Translate guest address to host address */
916
		useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
T
Thomas Huth 已提交
917
		if (kvm_is_error_hva(useraddr))
918 919 920 921 922 923 924 925
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
926
			int rc = kvm_s390_skey_check_enable(vcpu);
927 928 929

			if (rc)
				return rc;
930
			down_read(&current->mm->mmap_sem);
931 932
			rc = cond_set_guest_storage_key(current->mm, useraddr,
							key, NULL, nq, mr, mc);
933
			up_read(&current->mm->mmap_sem);
934
			if (rc < 0)
935 936 937 938 939
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
940
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
H
Heiko Carstens 已提交
941
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
942 943 944 945 946 947 948
			vcpu->run->s.regs.gprs[reg2] = end;
		} else {
			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
			end = kvm_s390_logical_to_effective(vcpu, end);
			vcpu->run->s.regs.gprs[reg2] |= end;
		}
	}
949 950 951
	return 0;
}

952 953 954 955
static int handle_essa(struct kvm_vcpu *vcpu)
{
	/* entries expected to be 1FF */
	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
956
	unsigned long *cbrlo;
957 958 959
	struct gmap *gmap;
	int i;

960
	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
961 962
	gmap = vcpu->arch.gmap;
	vcpu->stat.instruction_essa++;
963
	if (!vcpu->kvm->arch.use_cmma)
964 965 966 967 968 969 970 971
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

972 973
	/* Retry the ESSA instruction */
	kvm_s390_retry_instr(vcpu);
974 975 976
	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
	down_read(&gmap->mm->mmap_sem);
977 978
	for (i = 0; i < entries; ++i)
		__gmap_zap(gmap, cbrlo[i]);
979 980 981 982
	up_read(&gmap->mm->mmap_sem);
	return 0;
}

983
static const intercept_handler_t b9_handlers[256] = {
984
	[0x8a] = handle_ipte_interlock,
985
	[0x8d] = handle_epsw,
986 987
	[0x8e] = handle_ipte_interlock,
	[0x8f] = handle_ipte_interlock,
988
	[0xab] = handle_essa,
989
	[0xaf] = handle_pfmf,
990 991 992 993 994 995 996 997
};

int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	/* This is handled just as for the B2 instructions. */
	handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
998 999 1000
	if (handler)
		return handler(vcpu);

1001 1002 1003
	return -EOPNOTSUPP;
}

1004 1005 1006 1007
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1008 1009
	int reg, rc, nr_regs;
	u32 ctl_array[16];
1010
	u64 ga;
1011
	u8 ar;
1012 1013 1014 1015 1016 1017

	vcpu->stat.instruction_lctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1018
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1019

1020
	if (ga & 3)
1021 1022
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1023
	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1024
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1025

1026
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1027
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1028 1029
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
1030
	reg = reg1;
1031
	nr_regs = 0;
1032 1033
	do {
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1034
		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1035 1036 1037 1038
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1039
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1040 1041 1042
	return 0;
}

1043 1044 1045 1046
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1047 1048
	int reg, rc, nr_regs;
	u32 ctl_array[16];
1049
	u64 ga;
1050
	u8 ar;
1051 1052 1053 1054 1055 1056

	vcpu->stat.instruction_stctl++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1057
	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1058 1059 1060 1061

	if (ga & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1062
	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1063 1064 1065
	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);

	reg = reg1;
1066
	nr_regs = 0;
1067
	do {
1068
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1069 1070 1071 1072
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1073
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1074
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1075 1076
}

1077 1078 1079 1080
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1081 1082 1083
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1084
	u8 ar;
1085 1086 1087 1088 1089 1090

	vcpu->stat.instruction_lctlg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1091
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1092

1093
	if (ga & 7)
1094 1095
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1096
	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1097
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1098

1099
	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1100
	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1101 1102 1103 1104
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
	reg = reg1;
	nr_regs = 0;
1105
	do {
1106
		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1107 1108 1109 1110
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1111
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1112 1113 1114
	return 0;
}

1115 1116 1117 1118
static int handle_stctg(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1119 1120 1121
	int reg, rc, nr_regs;
	u64 ctl_array[16];
	u64 ga;
1122
	u8 ar;
1123 1124 1125 1126 1127 1128

	vcpu->stat.instruction_stctg++;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1129
	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1130 1131 1132 1133

	if (ga & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

1134
	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1135 1136
	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);

1137 1138
	reg = reg1;
	nr_regs = 0;
1139
	do {
1140
		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1141 1142 1143 1144
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
1145
	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1146
	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1147 1148
}

1149
static const intercept_handler_t eb_handlers[256] = {
1150
	[0x2f] = handle_lctlg,
1151
	[0x25] = handle_stctg,
F
Fan Zhang 已提交
1152 1153 1154
	[0x60] = handle_ri,
	[0x61] = handle_ri,
	[0x62] = handle_ri,
1155 1156
};

1157
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1158 1159 1160 1161 1162 1163 1164 1165 1166
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

1167 1168
static int handle_tprot(struct kvm_vcpu *vcpu)
{
1169
	u64 address1, address2;
1170 1171 1172
	unsigned long hva, gpa;
	int ret = 0, cc = 0;
	bool writable;
1173
	u8 ar;
1174 1175 1176

	vcpu->stat.instruction_tprot++;

1177 1178 1179
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

1180
	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1181

1182 1183 1184 1185 1186 1187
	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1188
		ipte_lock(vcpu);
1189
	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1190 1191 1192
	if (ret == PGM_PROTECTION) {
		/* Write protected? Try again with read-only... */
		cc = 1;
1193 1194
		ret = guest_translate_address(vcpu, address1, ar, &gpa,
					      GACC_FETCH);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	}
	if (ret) {
		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
			ret = kvm_s390_inject_program_int(vcpu, ret);
		} else if (ret > 0) {
			/* Translation not available */
			kvm_s390_set_psw_cc(vcpu, 3);
			ret = 0;
		}
		goto out_unlock;
	}
1206

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
	if (kvm_is_error_hva(hva)) {
		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	} else {
		if (!writable)
			cc = 1;		/* Write not permitted ==> read-only */
		kvm_s390_set_psw_cc(vcpu, cc);
		/* Note: CC2 only occurs for storage keys (not supported yet) */
	}
out_unlock:
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		ipte_unlock(vcpu);
	return ret;
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

1230 1231 1232 1233 1234
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1235
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

1247 1248 1249 1250 1251 1252 1253
static int handle_ptff(struct kvm_vcpu *vcpu)
{
	/* we don't emulate any control instructions yet */
	kvm_s390_set_psw_cc(vcpu, 3);
	return 0;
}

1254
static const intercept_handler_t x01_handlers[256] = {
1255
	[0x04] = handle_ptff,
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}