kvm-s390.c 66.4 KB
Newer Older
1
/*
2
 * hosting zSeries kernel virtual machines
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14
 *               Jason J. Herne <jjherne@us.ibm.com>
15 16 17 18 19
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
20
#include <linux/hrtimer.h>
21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
25
#include <linux/random.h>
26
#include <linux/slab.h>
27
#include <linux/timer.h>
28
#include <linux/vmalloc.h>
29
#include <asm/asm-offsets.h>
30 31
#include <asm/lowcore.h>
#include <asm/pgtable.h>
32
#include <asm/nmi.h>
33
#include <asm/switch_to.h>
34
#include <asm/isc.h>
35
#include <asm/sclp.h>
36
#include "kvm-s390.h"
37 38
#include "gaccess.h"

39 40 41 42
#define KMSG_COMPONENT "kvm-s390"
#undef pr_fmt
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

43 44
#define CREATE_TRACE_POINTS
#include "trace.h"
45
#include "trace-s390.h"
46

47
#define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
48 49 50
#define LOCAL_IRQS 32
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
			   (KVM_MAX_VCPUS + LOCAL_IRQS))
51

52 53 54 55
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
56
	{ "exit_null", VCPU_STAT(exit_null) },
57 58 59 60
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
61 62 63
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
64
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
65
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
66
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
67
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
68 69
	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
70
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
71
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
72 73 74 75 76 77 78
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
79
	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
80 81 82 83 84
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
85
	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
86 87
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
88
	{ "instruction_essa", VCPU_STAT(instruction_essa) },
89 90
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
91
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
92
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
93
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
94
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
95
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
96 97
	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
98
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
99 100
	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
101
	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
102 103 104
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
105 106 107
	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
108
	{ "diagnose_10", VCPU_STAT(diagnose_10) },
109
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
110
	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
111 112 113
	{ "diagnose_258", VCPU_STAT(diagnose_258) },
	{ "diagnose_308", VCPU_STAT(diagnose_308) },
	{ "diagnose_500", VCPU_STAT(diagnose_500) },
114 115 116
	{ NULL }
};

117 118
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
119
	0xffe6fffbfcfdfc40UL,
120
	0x005e800000000000UL,
121
};
122

123
unsigned long kvm_s390_fac_list_mask_size(void)
124
{
125 126
	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
	return ARRAY_SIZE(kvm_s390_fac_list_mask);
127 128
}

129 130
static struct gmap_notifier gmap_notifier;

131
/* Section: not file related */
132
int kvm_arch_hardware_enable(void)
133 134
{
	/* every s390 is virtualization enabled ;-) */
135
	return 0;
136 137
}

138 139
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);

140 141
int kvm_arch_hardware_setup(void)
{
142 143
	gmap_notifier.notifier_call = kvm_gmap_notifier;
	gmap_register_ipte_notifier(&gmap_notifier);
144 145 146 147 148
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
149
	gmap_unregister_ipte_notifier(&gmap_notifier);
150 151 152 153
}

int kvm_arch_init(void *opaque)
{
154 155
	/* Register floating interrupt controller interface. */
	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
156 157 158 159 160 161 162 163 164 165 166
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

167
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
168
{
169 170
	int r;

171
	switch (ext) {
172
	case KVM_CAP_S390_PSW:
173
	case KVM_CAP_S390_GMAP:
174
	case KVM_CAP_SYNC_MMU:
175 176 177
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
178
	case KVM_CAP_ASYNC_PF:
179
	case KVM_CAP_SYNC_REGS:
180
	case KVM_CAP_ONE_REG:
181
	case KVM_CAP_ENABLE_CAP:
182
	case KVM_CAP_S390_CSS_SUPPORT:
C
Cornelia Huck 已提交
183
	case KVM_CAP_IOEVENTFD:
184
	case KVM_CAP_DEVICE_CTRL:
185
	case KVM_CAP_ENABLE_CAP_VM:
186
	case KVM_CAP_S390_IRQCHIP:
187
	case KVM_CAP_VM_ATTRIBUTES:
188
	case KVM_CAP_MP_STATE:
189
	case KVM_CAP_S390_INJECT_IRQ:
190
	case KVM_CAP_S390_USER_SIGP:
191
	case KVM_CAP_S390_USER_STSI:
192
	case KVM_CAP_S390_SKEYS:
193
	case KVM_CAP_S390_IRQ_STATE:
194 195
		r = 1;
		break;
196 197 198
	case KVM_CAP_S390_MEM_OP:
		r = MEM_OP_MAX_SIZE;
		break;
199 200 201 202
	case KVM_CAP_NR_VCPUS:
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
203 204 205
	case KVM_CAP_NR_MEMSLOTS:
		r = KVM_USER_MEM_SLOTS;
		break;
206
	case KVM_CAP_S390_COW:
207
		r = MACHINE_HAS_ESOP;
208
		break;
209 210 211
	case KVM_CAP_S390_VECTOR_REGISTERS:
		r = MACHINE_HAS_VX;
		break;
212
	default:
213
		r = 0;
214
	}
215
	return r;
216 217
}

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
	gfn_t cur_gfn, last_gfn;
	unsigned long address;
	struct gmap *gmap = kvm->arch.gmap;

	down_read(&gmap->mm->mmap_sem);
	/* Loop over all guest pages */
	last_gfn = memslot->base_gfn + memslot->npages;
	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
		address = gfn_to_hva_memslot(memslot, cur_gfn);

		if (gmap_test_and_clear_dirty(address, gmap))
			mark_page_dirty(kvm, cur_gfn);
	}
	up_read(&gmap->mm->mmap_sem);
}

237 238 239 240 241 242 243
/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
244 245
	int r;
	unsigned long n;
246
	struct kvm_memslots *slots;
247 248 249 250 251 252 253 254 255
	struct kvm_memory_slot *memslot;
	int is_dirty = 0;

	mutex_lock(&kvm->slots_lock);

	r = -EINVAL;
	if (log->slot >= KVM_USER_MEM_SLOTS)
		goto out;

256 257
	slots = kvm_memslots(kvm);
	memslot = id_to_memslot(slots, log->slot);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

	kvm_s390_sync_dirty_log(kvm, memslot);
	r = kvm_get_dirty_log(kvm, log, &is_dirty);
	if (r)
		goto out;

	/* Clear the dirty log */
	if (is_dirty) {
		n = kvm_dirty_bitmap_bytes(memslot);
		memset(memslot->dirty_bitmap, 0, n);
	}
	r = 0;
out:
	mutex_unlock(&kvm->slots_lock);
	return r;
276 277
}

278 279 280 281 282 283 284 285
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
286 287 288 289
	case KVM_CAP_S390_IRQCHIP:
		kvm->arch.use_irqchip = 1;
		r = 0;
		break;
290 291 292 293
	case KVM_CAP_S390_USER_SIGP:
		kvm->arch.user_sigp = 1;
		r = 0;
		break;
294
	case KVM_CAP_S390_VECTOR_REGISTERS:
295 296 297 298 299 300
		if (MACHINE_HAS_VX) {
			set_kvm_facility(kvm->arch.model.fac->mask, 129);
			set_kvm_facility(kvm->arch.model.fac->list, 129);
			r = 0;
		} else
			r = -EINVAL;
301
		break;
302 303 304 305
	case KVM_CAP_S390_USER_STSI:
		kvm->arch.user_stsi = 1;
		r = 0;
		break;
306 307 308 309 310 311 312
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->attr) {
	case KVM_S390_VM_MEM_LIMIT_SIZE:
		ret = 0;
		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
			ret = -EFAULT;
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
331 332 333 334 335
{
	int ret;
	unsigned int idx;
	switch (attr->attr) {
	case KVM_S390_VM_MEM_ENABLE_CMMA:
336 337 338 339 340
		/* enable CMMA only for z10 and later (EDAT_1) */
		ret = -EINVAL;
		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
			break;

341 342 343 344 345 346 347 348 349
		ret = -EBUSY;
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus) == 0) {
			kvm->arch.use_cmma = 1;
			ret = 0;
		}
		mutex_unlock(&kvm->lock);
		break;
	case KVM_S390_VM_MEM_CLR_CMMA:
350 351 352 353
		ret = -EINVAL;
		if (!kvm->arch.use_cmma)
			break;

354 355
		mutex_lock(&kvm->lock);
		idx = srcu_read_lock(&kvm->srcu);
356
		s390_reset_cmma(kvm->arch.gmap->mm);
357 358 359 360
		srcu_read_unlock(&kvm->srcu, idx);
		mutex_unlock(&kvm->lock);
		ret = 0;
		break;
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	case KVM_S390_VM_MEM_LIMIT_SIZE: {
		unsigned long new_limit;

		if (kvm_is_ucontrol(kvm))
			return -EINVAL;

		if (get_user(new_limit, (u64 __user *)attr->addr))
			return -EFAULT;

		if (new_limit > kvm->arch.gmap->asce_end)
			return -E2BIG;

		ret = -EBUSY;
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus) == 0) {
			/* gmap_alloc will round the limit up */
			struct gmap *new = gmap_alloc(current->mm, new_limit);

			if (!new) {
				ret = -ENOMEM;
			} else {
				gmap_free(kvm->arch.gmap);
				new->private = kvm;
				kvm->arch.gmap = new;
				ret = 0;
			}
		}
		mutex_unlock(&kvm->lock);
		break;
	}
391 392 393 394 395 396 397
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

398 399 400 401 402 403 404
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);

static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_vcpu *vcpu;
	int i;

405
	if (!test_kvm_facility(kvm, 76))
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
		return -EINVAL;

	mutex_lock(&kvm->lock);
	switch (attr->attr) {
	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
		kvm->arch.crypto.aes_kw = 1;
		break;
	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
		kvm->arch.crypto.dea_kw = 1;
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
		kvm->arch.crypto.aes_kw = 0;
		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
		kvm->arch.crypto.dea_kw = 0;
		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
		break;
	default:
		mutex_unlock(&kvm->lock);
		return -ENXIO;
	}

	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_s390_vcpu_crypto_setup(vcpu);
		exit_sie(vcpu);
	}
	mutex_unlock(&kvm->lock);
	return 0;
}

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high;

	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
					   sizeof(gtod_high)))
		return -EFAULT;

	if (gtod_high != 0)
		return -EINVAL;

	return 0;
}

static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_vcpu *cur_vcpu;
	unsigned int vcpu_idx;
	u64 host_tod, gtod;
	int r;

	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
		return -EFAULT;

	r = store_tod_clock(&host_tod);
	if (r)
		return r;

	mutex_lock(&kvm->lock);
	kvm->arch.epoch = gtod - host_tod;
475 476
	kvm_s390_vcpu_block_all(kvm);
	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
477
		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
478
	kvm_s390_vcpu_unblock_all(kvm);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	mutex_unlock(&kvm->lock);
	return 0;
}

static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_set_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_set_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high = 0;

	if (copy_to_user((void __user *)attr->addr, &gtod_high,
					 sizeof(gtod_high)))
		return -EFAULT;

	return 0;
}

static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u64 host_tod, gtod;
	int r;

	r = store_tod_clock(&host_tod);
	if (r)
		return r;

	gtod = host_tod + kvm->arch.epoch;
	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
		return -EFAULT;

	return 0;
}

static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_get_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_get_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_processor *proc;
	int ret = 0;

	mutex_lock(&kvm->lock);
	if (atomic_read(&kvm->online_vcpus)) {
		ret = -EBUSY;
		goto out;
	}
	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (!proc) {
		ret = -ENOMEM;
		goto out;
	}
	if (!copy_from_user(proc, (void __user *)attr->addr,
			    sizeof(*proc))) {
		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
		       sizeof(struct cpuid));
		kvm->arch.model.ibc = proc->ibc;
572
		memcpy(kvm->arch.model.fac->list, proc->fac_list,
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		       S390_ARCH_FAC_LIST_SIZE_BYTE);
	} else
		ret = -EFAULT;
	kfree(proc);
out:
	mutex_unlock(&kvm->lock);
	return ret;
}

static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->attr) {
	case KVM_S390_VM_CPU_PROCESSOR:
		ret = kvm_s390_set_processor(kvm, attr);
		break;
	}
	return ret;
}

static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_processor *proc;
	int ret = 0;

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (!proc) {
		ret = -ENOMEM;
		goto out;
	}
	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
	proc->ibc = kvm->arch.model.ibc;
606
	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
		ret = -EFAULT;
	kfree(proc);
out:
	return ret;
}

static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_machine *mach;
	int ret = 0;

	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
	if (!mach) {
		ret = -ENOMEM;
		goto out;
	}
	get_cpu_id((struct cpuid *) &mach->cpuid);
625
	mach->ibc = sclp.ibc;
626 627
	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
628
	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
629
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
		ret = -EFAULT;
	kfree(mach);
out:
	return ret;
}

static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->attr) {
	case KVM_S390_VM_CPU_PROCESSOR:
		ret = kvm_s390_get_processor(kvm, attr);
		break;
	case KVM_S390_VM_CPU_MACHINE:
		ret = kvm_s390_get_machine(kvm, attr);
		break;
	}
	return ret;
}

652 653 654 655 656
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->group) {
657
	case KVM_S390_VM_MEM_CTRL:
658
		ret = kvm_s390_set_mem_control(kvm, attr);
659
		break;
660 661 662
	case KVM_S390_VM_TOD:
		ret = kvm_s390_set_tod(kvm, attr);
		break;
663 664 665
	case KVM_S390_VM_CPU_MODEL:
		ret = kvm_s390_set_cpu_model(kvm, attr);
		break;
666 667 668
	case KVM_S390_VM_CRYPTO:
		ret = kvm_s390_vm_set_crypto(kvm, attr);
		break;
669 670 671 672 673 674 675 676 677 678
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
}

static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
679 680 681 682 683 684
	int ret;

	switch (attr->group) {
	case KVM_S390_VM_MEM_CTRL:
		ret = kvm_s390_get_mem_control(kvm, attr);
		break;
685 686 687
	case KVM_S390_VM_TOD:
		ret = kvm_s390_get_tod(kvm, attr);
		break;
688 689 690
	case KVM_S390_VM_CPU_MODEL:
		ret = kvm_s390_get_cpu_model(kvm, attr);
		break;
691 692 693 694 695 696
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
697 698 699 700 701 702 703
}

static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->group) {
704 705 706 707
	case KVM_S390_VM_MEM_CTRL:
		switch (attr->attr) {
		case KVM_S390_VM_MEM_ENABLE_CMMA:
		case KVM_S390_VM_MEM_CLR_CMMA:
708
		case KVM_S390_VM_MEM_LIMIT_SIZE:
709 710 711 712 713 714 715
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
716 717 718 719 720 721 722 723 724 725 726
	case KVM_S390_VM_TOD:
		switch (attr->attr) {
		case KVM_S390_VM_TOD_LOW:
		case KVM_S390_VM_TOD_HIGH:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
727 728 729 730 731 732 733 734 735 736 737
	case KVM_S390_VM_CPU_MODEL:
		switch (attr->attr) {
		case KVM_S390_VM_CPU_PROCESSOR:
		case KVM_S390_VM_CPU_MACHINE:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
738 739 740 741 742 743 744 745 746 747 748 749 750
	case KVM_S390_VM_CRYPTO:
		switch (attr->attr) {
		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
751 752 753 754 755 756 757 758
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
}

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
	uint8_t *keys;
	uint64_t hva;
	unsigned long curkey;
	int i, r = 0;

	if (args->flags != 0)
		return -EINVAL;

	/* Is this guest using storage keys? */
	if (!mm_use_skey(current->mm))
		return KVM_S390_GET_SKEYS_NONE;

	/* Enforce sane limit on memory allocation */
	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
		return -EINVAL;

	keys = kmalloc_array(args->count, sizeof(uint8_t),
			     GFP_KERNEL | __GFP_NOWARN);
	if (!keys)
		keys = vmalloc(sizeof(uint8_t) * args->count);
	if (!keys)
		return -ENOMEM;

	for (i = 0; i < args->count; i++) {
		hva = gfn_to_hva(kvm, args->start_gfn + i);
		if (kvm_is_error_hva(hva)) {
			r = -EFAULT;
			goto out;
		}

		curkey = get_guest_storage_key(current->mm, hva);
		if (IS_ERR_VALUE(curkey)) {
			r = curkey;
			goto out;
		}
		keys[i] = curkey;
	}

	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
			 sizeof(uint8_t) * args->count);
	if (r)
		r = -EFAULT;
out:
	kvfree(keys);
	return r;
}

static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
	uint8_t *keys;
	uint64_t hva;
	int i, r = 0;

	if (args->flags != 0)
		return -EINVAL;

	/* Enforce sane limit on memory allocation */
	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
		return -EINVAL;

	keys = kmalloc_array(args->count, sizeof(uint8_t),
			     GFP_KERNEL | __GFP_NOWARN);
	if (!keys)
		keys = vmalloc(sizeof(uint8_t) * args->count);
	if (!keys)
		return -ENOMEM;

	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
			   sizeof(uint8_t) * args->count);
	if (r) {
		r = -EFAULT;
		goto out;
	}

	/* Enable storage key handling for the guest */
836 837 838
	r = s390_enable_skey();
	if (r)
		goto out;
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862

	for (i = 0; i < args->count; i++) {
		hva = gfn_to_hva(kvm, args->start_gfn + i);
		if (kvm_is_error_hva(hva)) {
			r = -EFAULT;
			goto out;
		}

		/* Lowest order bit is reserved */
		if (keys[i] & 0x01) {
			r = -EINVAL;
			goto out;
		}

		r = set_guest_storage_key(current->mm, hva,
					  (unsigned long)keys[i], 0);
		if (r)
			goto out;
	}
out:
	kvfree(keys);
	return r;
}

863 864 865 866 867
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
868
	struct kvm_device_attr attr;
869 870 871
	int r;

	switch (ioctl) {
872 873 874 875 876 877 878 879 880
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
881 882 883 884 885 886 887 888
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			break;
		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
		break;
	}
889 890 891 892 893 894 895 896 897 898 899 900
	case KVM_CREATE_IRQCHIP: {
		struct kvm_irq_routing_entry routing;

		r = -EINVAL;
		if (kvm->arch.use_irqchip) {
			/* Set up dummy routing. */
			memset(&routing, 0, sizeof(routing));
			kvm_set_irq_routing(kvm, &routing, 0, 0);
			r = 0;
		}
		break;
	}
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
	case KVM_SET_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_set_attr(kvm, &attr);
		break;
	}
	case KVM_GET_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_get_attr(kvm, &attr);
		break;
	}
	case KVM_HAS_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_has_attr(kvm, &attr);
		break;
	}
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
	case KVM_S390_GET_SKEYS: {
		struct kvm_s390_skeys args;

		r = -EFAULT;
		if (copy_from_user(&args, argp,
				   sizeof(struct kvm_s390_skeys)))
			break;
		r = kvm_s390_get_skeys(kvm, &args);
		break;
	}
	case KVM_S390_SET_SKEYS: {
		struct kvm_s390_skeys args;

		r = -EFAULT;
		if (copy_from_user(&args, argp,
				   sizeof(struct kvm_s390_skeys)))
			break;
		r = kvm_s390_set_skeys(kvm, &args);
		break;
	}
942
	default:
943
		r = -ENOTTY;
944 945 946 947 948
	}

	return r;
}

949 950 951
static int kvm_s390_query_ap_config(u8 *config)
{
	u32 fcn_code = 0x04000000UL;
952
	u32 cc = 0;
953

954
	memset(config, 0, 128);
955 956 957 958
	asm volatile(
		"lgr 0,%1\n"
		"lgr 2,%2\n"
		".long 0xb2af0000\n"		/* PQAP(QCI) */
959
		"0: ipm %0\n"
960
		"srl %0,28\n"
961 962 963
		"1:\n"
		EX_TABLE(0b, 1b)
		: "+r" (cc)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
		: "r" (fcn_code), "r" (config)
		: "cc", "0", "2", "memory"
	);

	return cc;
}

static int kvm_s390_apxa_installed(void)
{
	u8 config[128];
	int cc;

	if (test_facility(2) && test_facility(12)) {
		cc = kvm_s390_query_ap_config(config);

		if (cc)
			pr_err("PQAP(QCI) failed with cc=%d", cc);
		else
			return config[0] & 0x40;
	}

	return 0;
}

static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;

	if (kvm_s390_apxa_installed())
		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
	else
		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}

998 999 1000 1001 1002 1003
static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
{
	get_cpu_id(cpu_id);
	cpu_id->version = 0xff;
}

1004 1005
static int kvm_s390_crypto_init(struct kvm *kvm)
{
1006
	if (!test_kvm_facility(kvm, 76))
1007 1008 1009 1010 1011 1012 1013
		return 0;

	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
					 GFP_KERNEL | GFP_DMA);
	if (!kvm->arch.crypto.crycb)
		return -ENOMEM;

1014
	kvm_s390_set_crycb_format(kvm);
1015

1016 1017 1018 1019 1020 1021 1022
	/* Enable AES/DEA protected key functions by default */
	kvm->arch.crypto.aes_kw = 1;
	kvm->arch.crypto.dea_kw = 1;
	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1023

1024 1025 1026
	return 0;
}

1027
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1028
{
1029
	int i, rc;
1030
	char debug_name[16];
1031
	static unsigned long sca_offset;
1032

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
	if (type & ~KVM_VM_S390_UCONTROL)
		goto out_err;
	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
		goto out_err;
#else
	if (type)
		goto out_err;
#endif

1044 1045
	rc = s390_enable_sie();
	if (rc)
1046
		goto out_err;
1047

1048 1049
	rc = -ENOMEM;

1050 1051
	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
1052
		goto out_err;
1053 1054 1055 1056
	spin_lock(&kvm_lock);
	sca_offset = (sca_offset + 16) & 0x7f0;
	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
	spin_unlock(&kvm_lock);
1057 1058 1059 1060 1061

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
1062
		goto out_err;
1063

1064 1065 1066
	/*
	 * The architectural maximum amount of facilities is 16 kbit. To store
	 * this amount, 2 kbyte of memory is required. Thus we need a full
1067 1068
	 * page to hold the guest facility list (arch.model.fac->list) and the
	 * facility mask (arch.model.fac->mask). Its address size has to be
1069 1070 1071
	 * 31 bits and word aligned.
	 */
	kvm->arch.model.fac =
1072
		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1073
	if (!kvm->arch.model.fac)
1074
		goto out_err;
1075

1076
	/* Populate the facility mask initially. */
1077
	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1078
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1079 1080
	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
		if (i < kvm_s390_fac_list_mask_size())
1081
			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1082
		else
1083
			kvm->arch.model.fac->mask[i] = 0UL;
1084 1085
	}

1086 1087 1088 1089
	/* Populate the facility list initially. */
	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);

1090
	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1091
	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1092

1093
	if (kvm_s390_crypto_init(kvm) < 0)
1094
		goto out_err;
1095

1096
	spin_lock_init(&kvm->arch.float_int.lock);
1097 1098
	for (i = 0; i < FIRQ_LIST_COUNT; i++)
		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1099
	init_waitqueue_head(&kvm->arch.ipte_wq);
1100
	mutex_init(&kvm->arch.ipte_mutex);
1101

1102 1103 1104
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

1105 1106 1107
	if (type & KVM_VM_S390_UCONTROL) {
		kvm->arch.gmap = NULL;
	} else {
1108
		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1109
		if (!kvm->arch.gmap)
1110
			goto out_err;
1111
		kvm->arch.gmap->private = kvm;
1112
		kvm->arch.gmap->pfault_enabled = 0;
1113
	}
1114 1115

	kvm->arch.css_support = 0;
1116
	kvm->arch.use_irqchip = 0;
1117
	kvm->arch.epoch = 0;
1118

1119 1120
	spin_lock_init(&kvm->arch.start_stop_lock);

1121
	return 0;
1122
out_err:
1123
	kfree(kvm->arch.crypto.crycb);
1124
	free_page((unsigned long)kvm->arch.model.fac);
1125
	debug_unregister(kvm->arch.dbf);
1126
	free_page((unsigned long)(kvm->arch.sca));
1127
	return rc;
1128 1129
}

1130 1131 1132
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1133
	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1134
	kvm_s390_clear_local_irqs(vcpu);
1135
	kvm_clear_async_pf_completion_queue(vcpu);
C
Carsten Otte 已提交
1136 1137 1138 1139 1140 1141 1142
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		clear_bit(63 - vcpu->vcpu_id,
			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		    (__u64) vcpu->arch.sie_block)
			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	}
1143
	smp_mb();
1144 1145 1146 1147

	if (kvm_is_ucontrol(vcpu->kvm))
		gmap_free(vcpu->arch.gmap);

1148
	if (vcpu->kvm->arch.use_cmma)
1149
		kvm_s390_vcpu_unsetup_cmma(vcpu);
1150
	free_page((unsigned long)(vcpu->arch.sie_block));
1151

1152
	kvm_vcpu_uninit(vcpu);
1153
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1154 1155 1156 1157 1158
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
1159
	struct kvm_vcpu *vcpu;
1160

1161 1162 1163 1164 1165 1166 1167 1168 1169
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
1170 1171
}

1172 1173
void kvm_arch_destroy_vm(struct kvm *kvm)
{
1174
	kvm_free_vcpus(kvm);
1175
	free_page((unsigned long)kvm->arch.model.fac);
1176
	free_page((unsigned long)(kvm->arch.sca));
1177
	debug_unregister(kvm->arch.dbf);
1178
	kfree(kvm->arch.crypto.crycb);
1179 1180
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
1181
	kvm_s390_destroy_adapters(kvm);
1182
	kvm_s390_clear_float_irqs(kvm);
1183 1184 1185
}

/* Section: vcpu related */
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{
	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
	if (!vcpu->arch.gmap)
		return -ENOMEM;
	vcpu->arch.gmap->private = vcpu->kvm;

	return 0;
}

1196 1197
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
1198 1199
	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
	kvm_clear_async_pf_completion_queue(vcpu);
1200 1201
	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
				    KVM_SYNC_GPRS |
1202
				    KVM_SYNC_ACRS |
1203 1204 1205
				    KVM_SYNC_CRS |
				    KVM_SYNC_ARCH0 |
				    KVM_SYNC_PFAULT;
1206 1207
	if (test_kvm_facility(vcpu->kvm, 129))
		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1208 1209 1210 1211

	if (kvm_is_ucontrol(vcpu->kvm))
		return __kvm_ucontrol_vcpu_init(vcpu);

1212 1213 1214 1215 1216
	return 0;
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
1217
	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1218
	if (test_kvm_facility(vcpu->kvm, 129))
1219 1220 1221
		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
	else
		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1222
	save_access_regs(vcpu->arch.host_acrs);
1223
	if (test_kvm_facility(vcpu->kvm, 129)) {
1224 1225 1226 1227 1228 1229
		restore_fp_ctl(&vcpu->run->s.regs.fpc);
		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
	} else {
		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
	}
1230
	restore_access_regs(vcpu->run->s.regs.acrs);
1231
	gmap_enable(vcpu->arch.gmap);
1232
	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1233 1234 1235 1236
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
1237
	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1238
	gmap_disable(vcpu->arch.gmap);
1239
	if (test_kvm_facility(vcpu->kvm, 129)) {
1240 1241 1242 1243 1244 1245
		save_fp_ctl(&vcpu->run->s.regs.fpc);
		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
	} else {
		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
	}
1246
	save_access_regs(vcpu->run->s.regs.acrs);
1247
	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1248
	if (test_kvm_facility(vcpu->kvm, 129))
1249 1250 1251
		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
	else
		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1252 1253 1254 1255 1256 1257 1258 1259
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
1260
	kvm_s390_set_prefix(vcpu, 0);
1261 1262 1263 1264 1265 1266 1267 1268 1269
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
1270
	vcpu->arch.sie_block->pp = 0;
1271 1272
	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
	kvm_clear_async_pf_completion_queue(vcpu);
1273 1274
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
		kvm_s390_vcpu_stop(vcpu);
1275
	kvm_s390_clear_local_irqs(vcpu);
1276 1277
}

1278
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1279
{
1280 1281 1282
	mutex_lock(&vcpu->kvm->lock);
	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
	mutex_unlock(&vcpu->kvm->lock);
1283 1284
	if (!kvm_is_ucontrol(vcpu->kvm))
		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1285 1286
}

1287 1288
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
1289
	if (!test_kvm_facility(vcpu->kvm, 76))
1290 1291
		return;

1292 1293 1294 1295 1296 1297 1298
	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);

	if (vcpu->kvm->arch.crypto.aes_kw)
		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
	if (vcpu->kvm->arch.crypto.dea_kw)
		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;

1299 1300 1301
	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
{
	free_page(vcpu->arch.sie_block->cbrlo);
	vcpu->arch.sie_block->cbrlo = 0;
}

int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
{
	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
	if (!vcpu->arch.sie_block->cbrlo)
		return -ENOMEM;

	vcpu->arch.sie_block->ecb2 |= 0x80;
	vcpu->arch.sie_block->ecb2 &= ~0x08;
	return 0;
}

1319 1320 1321 1322 1323 1324 1325 1326 1327
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;

	vcpu->arch.cpu_id = model->cpu_id;
	vcpu->arch.sie_block->ibc = model->ibc;
	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
}

1328 1329
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
1330
	int rc = 0;
1331

1332 1333
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
						    CPUSTAT_SM |
1334 1335
						    CPUSTAT_STOPPED);

1336 1337 1338
	if (test_kvm_facility(vcpu->kvm, 78))
		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
	else if (test_kvm_facility(vcpu->kvm, 8))
1339 1340
		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);

1341 1342
	kvm_s390_vcpu_setup_model(vcpu);

1343
	vcpu->arch.sie_block->ecb   = 6;
1344
	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1345 1346
		vcpu->arch.sie_block->ecb |= 0x10;

1347
	vcpu->arch.sie_block->ecb2  = 8;
1348
	vcpu->arch.sie_block->eca   = 0xC1002000U;
1349
	if (sclp.has_siif)
1350
		vcpu->arch.sie_block->eca |= 1;
1351
	if (sclp.has_sigpif)
1352
		vcpu->arch.sie_block->eca |= 0x10000000U;
1353
	if (test_kvm_facility(vcpu->kvm, 129)) {
1354 1355 1356
		vcpu->arch.sie_block->eca |= 0x00020000;
		vcpu->arch.sie_block->ecd |= 0x20000000;
	}
1357
	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1358

1359
	if (vcpu->kvm->arch.use_cmma) {
1360 1361 1362
		rc = kvm_s390_vcpu_setup_cmma(vcpu);
		if (rc)
			return rc;
1363
	}
1364
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1365
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1366

1367 1368
	kvm_s390_vcpu_crypto_setup(vcpu);

1369
	return rc;
1370 1371 1372 1373 1374
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
1375
	struct kvm_vcpu *vcpu;
1376
	struct sie_page *sie_page;
1377 1378 1379 1380 1381 1382
	int rc = -EINVAL;

	if (id >= KVM_MAX_VCPUS)
		goto out;

	rc = -ENOMEM;
1383

1384
	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1385
	if (!vcpu)
1386
		goto out;
1387

1388 1389
	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
	if (!sie_page)
1390 1391
		goto out_free_cpu;

1392 1393
	vcpu->arch.sie_block = &sie_page->sie_block;
	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1394
	vcpu->arch.host_vregs = &sie_page->vregs;
1395

1396
	vcpu->arch.sie_block->icpua = id;
C
Carsten Otte 已提交
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
	if (!kvm_is_ucontrol(kvm)) {
		if (!kvm->arch.sca) {
			WARN_ON_ONCE(1);
			goto out_free_cpu;
		}
		if (!kvm->arch.sca->cpu[id].sda)
			kvm->arch.sca->cpu[id].sda =
				(__u64) vcpu->arch.sie_block;
		vcpu->arch.sie_block->scaoh =
			(__u32)(((__u64)kvm->arch.sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
	}
1410

1411 1412
	spin_lock_init(&vcpu->arch.local_int.lock);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1413
	vcpu->arch.local_int.wq = &vcpu->wq;
1414
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1415

1416 1417
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
1418
		goto out_free_sie_block;
1419 1420
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);
1421
	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1422 1423

	return vcpu;
1424 1425
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
1426
out_free_cpu:
1427
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1428
out:
1429 1430 1431 1432 1433
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
1434
	return kvm_s390_vcpu_has_irq(vcpu, 0);
1435 1436
}

1437
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1438 1439
{
	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1440
	exit_sie(vcpu);
1441 1442
}

1443
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1444 1445 1446 1447
{
	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
}

1448 1449 1450
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1451
	exit_sie(vcpu);
1452 1453 1454 1455 1456 1457 1458
}

static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
/*
 * Kick a guest cpu out of SIE and wait until SIE is not running.
 * If the CPU is not running (e.g. waiting as idle) the function will
 * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
		cpu_relax();
}

1470 1471
/* Kick a guest cpu out of SIE to process a request synchronously */
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1472
{
1473 1474
	kvm_make_request(req, vcpu);
	kvm_s390_vcpu_request(vcpu);
1475 1476
}

1477 1478 1479 1480 1481 1482 1483 1484
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
{
	int i;
	struct kvm *kvm = gmap->private;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		/* match against both prefix pages */
1485
		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1486
			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1487
			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1488 1489 1490 1491
		}
	}
}

1492 1493 1494 1495 1496 1497 1498
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

1499 1500 1501 1502 1503 1504
static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;

	switch (reg->id) {
1505 1506 1507 1508 1509 1510 1511 1512
	case KVM_REG_S390_TODPR:
		r = put_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = put_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
1513 1514 1515 1516 1517 1518 1519 1520
	case KVM_REG_S390_CPU_TIMER:
		r = put_user(vcpu->arch.sie_block->cputm,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = put_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
	case KVM_REG_S390_PFTOKEN:
		r = put_user(vcpu->arch.pfault_token,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFCOMPARE:
		r = put_user(vcpu->arch.pfault_compare,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFSELECT:
		r = put_user(vcpu->arch.pfault_select,
			     (u64 __user *)reg->addr);
		break;
1533 1534 1535 1536
	case KVM_REG_S390_PP:
		r = put_user(vcpu->arch.sie_block->pp,
			     (u64 __user *)reg->addr);
		break;
1537 1538 1539 1540
	case KVM_REG_S390_GBEA:
		r = put_user(vcpu->arch.sie_block->gbea,
			     (u64 __user *)reg->addr);
		break;
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	default:
		break;
	}

	return r;
}

static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;

	switch (reg->id) {
1554 1555 1556 1557 1558 1559 1560 1561
	case KVM_REG_S390_TODPR:
		r = get_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = get_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
1562 1563 1564 1565 1566 1567 1568 1569
	case KVM_REG_S390_CPU_TIMER:
		r = get_user(vcpu->arch.sie_block->cputm,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = get_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
1570 1571 1572
	case KVM_REG_S390_PFTOKEN:
		r = get_user(vcpu->arch.pfault_token,
			     (u64 __user *)reg->addr);
1573 1574
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			kvm_clear_async_pf_completion_queue(vcpu);
1575 1576 1577 1578 1579 1580 1581 1582 1583
		break;
	case KVM_REG_S390_PFCOMPARE:
		r = get_user(vcpu->arch.pfault_compare,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFSELECT:
		r = get_user(vcpu->arch.pfault_select,
			     (u64 __user *)reg->addr);
		break;
1584 1585 1586 1587
	case KVM_REG_S390_PP:
		r = get_user(vcpu->arch.sie_block->pp,
			     (u64 __user *)reg->addr);
		break;
1588 1589 1590 1591
	case KVM_REG_S390_GBEA:
		r = get_user(vcpu->arch.sie_block->gbea,
			     (u64 __user *)reg->addr);
		break;
1592 1593 1594 1595 1596 1597
	default:
		break;
	}

	return r;
}
1598

1599 1600 1601 1602 1603 1604 1605 1606
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
1607
	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1608 1609 1610 1611 1612
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
1613
	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1614 1615 1616 1617 1618 1619
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
1620
	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1621
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1622
	restore_access_regs(vcpu->run->s.regs.acrs);
1623 1624 1625 1626 1627 1628
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
1629
	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1630 1631 1632 1633 1634 1635
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
1636 1637
	if (test_fp_ctl(fpu->fpc))
		return -EINVAL;
1638
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1639 1640 1641
	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

1656
	if (!is_vcpu_stopped(vcpu))
1657
		rc = -EBUSY;
1658 1659 1660 1661
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
1662 1663 1664 1665 1666 1667 1668 1669 1670
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

1671 1672 1673 1674
#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
			      KVM_GUESTDBG_USE_HW_BP | \
			      KVM_GUESTDBG_ENABLE)

J
Jan Kiszka 已提交
1675 1676
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
1677
{
1678 1679 1680 1681 1682
	int rc = 0;

	vcpu->guest_debug = 0;
	kvm_s390_clear_bp_data(vcpu);

1683
	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
		return -EINVAL;

	if (dbg->control & KVM_GUESTDBG_ENABLE) {
		vcpu->guest_debug = dbg->control;
		/* enforce guest PER */
		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);

		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
			rc = kvm_s390_import_bp_data(vcpu, dbg);
	} else {
		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
		vcpu->arch.guestdbg.last_bp = 0;
	}

	if (rc) {
		vcpu->guest_debug = 0;
		kvm_s390_clear_bp_data(vcpu);
		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
	}

	return rc;
1705 1706
}

1707 1708 1709
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
1710 1711 1712
	/* CHECK_STOP and LOAD are not supported yet */
	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
				       KVM_MP_STATE_OPERATING;
1713 1714 1715 1716 1717
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	int rc = 0;

	/* user space knows about this interface - let it control the state */
	vcpu->kvm->arch.user_cpu_state_ctrl = 1;

	switch (mp_state->mp_state) {
	case KVM_MP_STATE_STOPPED:
		kvm_s390_vcpu_stop(vcpu);
		break;
	case KVM_MP_STATE_OPERATING:
		kvm_s390_vcpu_start(vcpu);
		break;
	case KVM_MP_STATE_LOAD:
	case KVM_MP_STATE_CHECK_STOP:
		/* fall through - CHECK_STOP and LOAD are not supported yet */
	default:
		rc = -ENXIO;
	}

	return rc;
1738 1739
}

1740 1741 1742 1743 1744
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
}

1745 1746
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
1747 1748
	if (!vcpu->requests)
		return 0;
1749
retry:
1750
	kvm_s390_vcpu_request_handled(vcpu);
1751 1752 1753 1754 1755 1756 1757
	/*
	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
	 * This ensures that the ipte instruction for this request has
	 * already finished. We might race against a second unmapper that
	 * wants to set the blocking bit. Lets just retry the request loop.
	 */
1758
	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1759 1760
		int rc;
		rc = gmap_ipte_notify(vcpu->arch.gmap,
1761
				      kvm_s390_get_prefix(vcpu),
1762 1763 1764
				      PAGE_SIZE * 2);
		if (rc)
			return rc;
1765
		goto retry;
1766
	}
1767

1768 1769 1770 1771 1772
	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
		vcpu->arch.sie_block->ihcpu = 0xffff;
		goto retry;
	}

1773 1774 1775 1776 1777 1778 1779
	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
		if (!ibs_enabled(vcpu)) {
			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
			atomic_set_mask(CPUSTAT_IBS,
					&vcpu->arch.sie_block->cpuflags);
		}
		goto retry;
1780
	}
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790

	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
		if (ibs_enabled(vcpu)) {
			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
			atomic_clear_mask(CPUSTAT_IBS,
					  &vcpu->arch.sie_block->cpuflags);
		}
		goto retry;
	}

1791 1792 1793
	/* nothing to do, just clear the request */
	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);

1794 1795 1796
	return 0;
}

1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
/**
 * kvm_arch_fault_in_page - fault-in guest page if necessary
 * @vcpu: The corresponding virtual cpu
 * @gpa: Guest physical address
 * @writable: Whether the page should be writable or not
 *
 * Make sure that a guest page has been faulted-in on the host.
 *
 * Return: Zero on success, negative error code otherwise.
 */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1808
{
1809 1810
	return gmap_fault(vcpu->arch.gmap, gpa,
			  writable ? FAULT_FLAG_WRITE : 0);
1811 1812
}

1813 1814 1815 1816
static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
				      unsigned long token)
{
	struct kvm_s390_interrupt inti;
1817
	struct kvm_s390_irq irq;
1818 1819

	if (start_token) {
1820 1821 1822
		irq.u.ext.ext_params2 = token;
		irq.type = KVM_S390_INT_PFAULT_INIT;
		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1823 1824
	} else {
		inti.type = KVM_S390_INT_PFAULT_DONE;
1825
		inti.parm64 = token;
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
	}
}

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{
	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{
	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
}

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work)
{
	/* s390 will always inject the page directly */
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
	/*
	 * s390 will always inject the page directly,
	 * but we still want check_async_completion to cleanup
	 */
	return true;
}

static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
{
	hva_t hva;
	struct kvm_arch_async_pf arch;
	int rc;

	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
		return 0;
	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
	    vcpu->arch.pfault_compare)
		return 0;
	if (psw_extint_disabled(vcpu))
		return 0;
1872
	if (kvm_s390_vcpu_has_irq(vcpu, 0))
1873 1874 1875 1876 1877 1878
		return 0;
	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
		return 0;
	if (!vcpu->arch.gmap->pfault_enabled)
		return 0;

H
Heiko Carstens 已提交
1879 1880 1881
	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
	hva += current->thread.gmap_addr & ~PAGE_MASK;
	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1882 1883 1884 1885 1886 1887
		return 0;

	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
	return rc;
}

1888
static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1889
{
1890
	int rc, cpuflags;
1891

1892 1893 1894 1895 1896 1897 1898
	/*
	 * On s390 notifications for arriving pages will be delivered directly
	 * to the guest but the house keeping for completed pfaults is
	 * handled outside the worker.
	 */
	kvm_check_async_pf_completion(vcpu);

1899
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1900 1901 1902 1903

	if (need_resched())
		schedule();

1904
	if (test_cpu_flag(CIF_MCCK_PENDING))
1905 1906
		s390_handle_mcck();

1907 1908 1909 1910 1911
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		rc = kvm_s390_deliver_pending_interrupts(vcpu);
		if (rc)
			return rc;
	}
C
Carsten Otte 已提交
1912

1913 1914 1915 1916
	rc = kvm_s390_handle_requests(vcpu);
	if (rc)
		return rc;

1917 1918 1919 1920 1921
	if (guestdbg_enabled(vcpu)) {
		kvm_s390_backup_guest_per_regs(vcpu);
		kvm_s390_patch_guest_per_regs(vcpu);
	}

1922
	vcpu->arch.sie_block->icptcode = 0;
1923 1924 1925
	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
	trace_kvm_s390_sie_enter(vcpu, cpuflags);
1926

1927 1928 1929
	return 0;
}

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
{
	psw_t *psw = &vcpu->arch.sie_block->gpsw;
	u8 opcode;
	int rc;

	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
	trace_kvm_s390_sie_fault(vcpu);

	/*
	 * We want to inject an addressing exception, which is defined as a
	 * suppressing or terminating exception. However, since we came here
	 * by a DAT access exception, the PSW still points to the faulting
	 * instruction since DAT exceptions are nullifying. So we've got
	 * to look up the current opcode to get the length of the instruction
	 * to be able to forward the PSW.
	 */
1947
	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1948 1949 1950 1951 1952 1953 1954
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
	psw->addr = __rewind_psw(*psw, -insn_length(opcode));

	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}

1955 1956
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
1957
	int rc = -1;
1958 1959 1960 1961 1962

	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);

1963 1964 1965
	if (guestdbg_enabled(vcpu))
		kvm_s390_restore_guest_per_regs(vcpu);

1966
	if (exit_reason >= 0) {
1967
		rc = 0;
1968 1969 1970 1971 1972 1973
	} else if (kvm_is_ucontrol(vcpu->kvm)) {
		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
		vcpu->run->s390_ucontrol.trans_exc_code =
						current->thread.gmap_addr;
		vcpu->run->s390_ucontrol.pgm_code = 0x10;
		rc = -EREMOTE;
1974 1975

	} else if (current->thread.gmap_pfault) {
1976
		trace_kvm_s390_major_guest_pfault(vcpu);
1977
		current->thread.gmap_pfault = 0;
1978
		if (kvm_arch_setup_async_pf(vcpu)) {
1979
			rc = 0;
1980 1981 1982 1983
		} else {
			gpa_t gpa = current->thread.gmap_addr;
			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
		}
1984 1985
	}

1986 1987
	if (rc == -1)
		rc = vcpu_post_run_fault_in_sie(vcpu);
1988

1989
	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1990

1991 1992
	if (rc == 0) {
		if (kvm_is_ucontrol(vcpu->kvm))
1993 1994
			/* Don't exit for host interrupts. */
			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1995 1996 1997 1998
		else
			rc = kvm_handle_sie_intercept(vcpu);
	}

1999 2000 2001 2002 2003 2004 2005
	return rc;
}

static int __vcpu_run(struct kvm_vcpu *vcpu)
{
	int rc, exit_reason;

2006 2007 2008 2009 2010 2011
	/*
	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
	 * ning the guest), so that memslots (and other stuff) are protected
	 */
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

2012 2013 2014 2015
	do {
		rc = vcpu_pre_run(vcpu);
		if (rc)
			break;
2016

2017
		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2018 2019 2020 2021
		/*
		 * As PF_VCPU will be used in fault handler, between
		 * guest_enter and guest_exit should be no uaccess.
		 */
2022 2023 2024
		local_irq_disable();
		__kvm_guest_enter();
		local_irq_enable();
2025 2026
		exit_reason = sie64a(vcpu->arch.sie_block,
				     vcpu->run->s.regs.gprs);
2027 2028 2029
		local_irq_disable();
		__kvm_guest_exit();
		local_irq_enable();
2030
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2031 2032

		rc = vcpu_post_run(vcpu, exit_reason);
2033
	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2034

2035
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2036
	return rc;
2037 2038
}

2039 2040 2041 2042 2043 2044 2045 2046
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2047 2048
		/* some control register changes require a tlb flush */
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	}
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
	}
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2061 2062
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			kvm_clear_async_pf_completion_queue(vcpu);
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
	}
	kvm_run->kvm_dirty_regs = 0;
}

static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
}

2083 2084
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
2085
	int rc;
2086 2087
	sigset_t sigsaved;

2088 2089 2090 2091 2092
	if (guestdbg_exit_pending(vcpu)) {
		kvm_s390_prepare_debug_exit(vcpu);
		return 0;
	}

2093 2094 2095
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

2096 2097 2098
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
		kvm_s390_vcpu_start(vcpu);
	} else if (is_vcpu_stopped(vcpu)) {
2099
		pr_err_ratelimited("can't run stopped vcpu %d\n",
2100 2101 2102
				   vcpu->vcpu_id);
		return -EINVAL;
	}
2103

2104
	sync_regs(vcpu, kvm_run);
2105

2106
	might_fault();
2107
	rc = __vcpu_run(vcpu);
2108

2109 2110
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
2111
		rc = -EINTR;
2112
	}
2113

2114 2115 2116 2117 2118
	if (guestdbg_exit_pending(vcpu) && !rc)  {
		kvm_s390_prepare_debug_exit(vcpu);
		rc = 0;
	}

2119
	if (rc == -EOPNOTSUPP) {
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
2133

2134
	store_regs(vcpu, kvm_run);
2135

2136 2137 2138 2139
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
2140
	return rc;
2141 2142 2143 2144 2145 2146 2147 2148
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
2149
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2150
{
2151
	unsigned char archmode = 1;
2152
	unsigned int px;
2153
	u64 clkcomp;
2154
	int rc;
2155

2156 2157
	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
		if (write_guest_abs(vcpu, 163, &archmode, 1))
2158
			return -EFAULT;
2159 2160 2161
		gpa = SAVE_AREA_BASE;
	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
		if (write_guest_real(vcpu, 163, &archmode, 1))
2162
			return -EFAULT;
2163 2164 2165 2166 2167 2168 2169 2170
		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
	}
	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
			     vcpu->arch.guest_fpregs.fprs, 128);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
			      vcpu->run->s.regs.gprs, 128);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
			      &vcpu->arch.sie_block->gpsw, 16);
2171
	px = kvm_s390_get_prefix(vcpu);
2172
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2173
			      &px, 4);
2174 2175 2176 2177 2178 2179 2180
	rc |= write_guest_abs(vcpu,
			      gpa + offsetof(struct save_area, fp_ctrl_reg),
			      &vcpu->arch.guest_fpregs.fpc, 4);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
			      &vcpu->arch.sie_block->todpr, 4);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
			      &vcpu->arch.sie_block->cputm, 8);
2181
	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2182 2183 2184 2185 2186 2187 2188
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
			      &clkcomp, 8);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
			      &vcpu->run->s.regs.acrs, 64);
	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
			      &vcpu->arch.sie_block->gcr, 128);
	return rc ? -EFAULT : 0;
2189 2190
}

2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	/*
	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
	 * copying in vcpu load/put. Lets update our copies before we save
	 * it into the save area
	 */
	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
	save_access_regs(vcpu->run->s.regs.acrs);

	return kvm_s390_store_status_unloaded(vcpu, addr);
}

E
Eric Farman 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
/*
 * store additional status at address
 */
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
					unsigned long gpa)
{
	/* Only bits 0-53 are used for address formation */
	if (!(gpa & ~0x3ff))
		return 0;

	return write_guest_abs(vcpu, gpa & ~0x3ff,
			       (void *)&vcpu->run->s.regs.vrs, 512);
}

int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	if (!test_kvm_facility(vcpu->kvm, 129))
		return 0;

	/*
	 * The guest VXRS are in the host VXRs due to the lazy
	 * copying in vcpu load/put. Let's update our copies before we save
	 * it into the save area.
	 */
	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);

	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}

2234 2235 2236
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2237
	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
}

static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
{
	unsigned int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		__disable_ibs_on_vcpu(vcpu);
	}
}

static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2253
	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2254 2255
}

2256 2257
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
2258 2259 2260 2261 2262
	int i, online_vcpus, started_vcpus = 0;

	if (!is_vcpu_stopped(vcpu))
		return;

2263
	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2264
	/* Only one cpu at a time may enter/leave the STOPPED state. */
2265
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

	for (i = 0; i < online_vcpus; i++) {
		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
			started_vcpus++;
	}

	if (started_vcpus == 0) {
		/* we're the only active VCPU -> speed it up */
		__enable_ibs_on_vcpu(vcpu);
	} else if (started_vcpus == 1) {
		/*
		 * As we are starting a second VCPU, we have to disable
		 * the IBS facility on all VCPUs to remove potentially
		 * oustanding ENABLE requests.
		 */
		__disable_ibs_on_all_vcpus(vcpu->kvm);
	}

2285
	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2286 2287 2288 2289
	/*
	 * Another VCPU might have used IBS while we were offline.
	 * Let's play safe and flush the VCPU at startup.
	 */
2290
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2291
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2292
	return;
2293 2294 2295 2296
}

void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
2297 2298 2299 2300 2301 2302
	int i, online_vcpus, started_vcpus = 0;
	struct kvm_vcpu *started_vcpu = NULL;

	if (is_vcpu_stopped(vcpu))
		return;

2303
	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2304
	/* Only one cpu at a time may enter/leave the STOPPED state. */
2305
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2306 2307
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

2308
	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2309
	kvm_s390_clear_stop_irq(vcpu);
2310

2311
	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
	__disable_ibs_on_vcpu(vcpu);

	for (i = 0; i < online_vcpus; i++) {
		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
			started_vcpus++;
			started_vcpu = vcpu->kvm->vcpus[i];
		}
	}

	if (started_vcpus == 1) {
		/*
		 * As we only have one VCPU left, we want to enable the
		 * IBS facility for that VCPU to speed it up.
		 */
		__enable_ibs_on_vcpu(started_vcpu);
	}

2329
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2330
	return;
2331 2332
}

2333 2334 2335 2336 2337 2338 2339 2340 2341
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
2342 2343 2344 2345 2346 2347 2348
	case KVM_CAP_S390_CSS_SUPPORT:
		if (!vcpu->kvm->arch.css_support) {
			vcpu->kvm->arch.css_support = 1;
			trace_kvm_s390_enable_css(vcpu->kvm);
		}
		r = 0;
		break;
2349 2350 2351 2352 2353 2354 2355
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414
static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
				  struct kvm_s390_mem_op *mop)
{
	void __user *uaddr = (void __user *)mop->buf;
	void *tmpbuf = NULL;
	int r, srcu_idx;
	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
				    | KVM_S390_MEMOP_F_CHECK_ONLY;

	if (mop->flags & ~supported_flags)
		return -EINVAL;

	if (mop->size > MEM_OP_MAX_SIZE)
		return -E2BIG;

	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
		tmpbuf = vmalloc(mop->size);
		if (!tmpbuf)
			return -ENOMEM;
	}

	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

	switch (mop->op) {
	case KVM_S390_MEMOP_LOGICAL_READ:
		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
			break;
		}
		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
		if (r == 0) {
			if (copy_to_user(uaddr, tmpbuf, mop->size))
				r = -EFAULT;
		}
		break;
	case KVM_S390_MEMOP_LOGICAL_WRITE:
		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
			break;
		}
		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
			r = -EFAULT;
			break;
		}
		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
		break;
	default:
		r = -EINVAL;
	}

	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);

	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);

	vfree(tmpbuf);
	return r;
}

2415 2416 2417 2418 2419
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
2420
	int idx;
2421
	long r;
2422

2423
	switch (ioctl) {
2424 2425 2426 2427 2428 2429 2430 2431 2432
	case KVM_S390_IRQ: {
		struct kvm_s390_irq s390irq;

		r = -EFAULT;
		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
		break;
	}
2433
	case KVM_S390_INTERRUPT: {
2434
		struct kvm_s390_interrupt s390int;
2435
		struct kvm_s390_irq s390irq;
2436

2437
		r = -EFAULT;
2438
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2439
			break;
2440 2441 2442
		if (s390int_to_s390irq(&s390int, &s390irq))
			return -EINVAL;
		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2443
		break;
2444
	}
2445
	case KVM_S390_STORE_STATUS:
2446
		idx = srcu_read_lock(&vcpu->kvm->srcu);
2447
		r = kvm_s390_vcpu_store_status(vcpu, arg);
2448
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2449
		break;
2450 2451 2452
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

2453
		r = -EFAULT;
2454
		if (copy_from_user(&psw, argp, sizeof(psw)))
2455 2456 2457
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
2458 2459
	}
	case KVM_S390_INITIAL_RESET:
2460 2461
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
		r = -EFAULT;
		if (copy_from_user(&reg, argp, sizeof(reg)))
			break;
		if (ioctl == KVM_SET_ONE_REG)
			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
		else
			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
		break;
	}
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_S390_UCAS_MAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
				     ucasmap.vcpu_addr, ucasmap.length);
		break;
	}
	case KVM_S390_UCAS_UNMAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
			ucasmap.length);
		break;
	}
#endif
2510
	case KVM_S390_VCPU_FAULT: {
2511
		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2512 2513
		break;
	}
2514 2515 2516 2517 2518 2519 2520 2521 2522
	case KVM_ENABLE_CAP:
	{
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			break;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
2523 2524 2525 2526 2527 2528 2529 2530 2531
	case KVM_S390_MEM_OP: {
		struct kvm_s390_mem_op mem_op;

		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
		else
			r = -EFAULT;
		break;
	}
2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
	case KVM_S390_SET_IRQ_STATE: {
		struct kvm_s390_irq_state irq_state;

		r = -EFAULT;
		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
			break;
		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
		    irq_state.len == 0 ||
		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
			r = -EINVAL;
			break;
		}
		r = kvm_s390_set_irq_state(vcpu,
					   (void __user *) irq_state.buf,
					   irq_state.len);
		break;
	}
	case KVM_S390_GET_IRQ_STATE: {
		struct kvm_s390_irq_state irq_state;

		r = -EFAULT;
		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
			break;
		if (irq_state.len == 0) {
			r = -EINVAL;
			break;
		}
		r = kvm_s390_get_irq_state(vcpu,
					   (__u8 __user *)  irq_state.buf,
					   irq_state.len);
		break;
	}
2564
	default:
2565
		r = -ENOTTY;
2566
	}
2567
	return r;
2568 2569
}

2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
		 && (kvm_is_ucontrol(vcpu->kvm))) {
		vmf->page = virt_to_page(vcpu->arch.sie_block);
		get_page(vmf->page);
		return 0;
	}
#endif
	return VM_FAULT_SIGBUS;
}

2583 2584
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages)
2585 2586 2587 2588
{
	return 0;
}

2589
/* Section: memory related */
2590 2591
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
2592
				   const struct kvm_userspace_memory_region *mem,
2593
				   enum kvm_mr_change change)
2594
{
2595 2596 2597 2598
	/* A few sanity checks. We can have memory slots which have to be
	   located/ended at a segment boundary (1MB). The memory in userland is
	   ok to be fragmented into various different vmas. It is okay to mmap()
	   and munmap() stuff in this slot after doing this call at any time */
2599

2600
	if (mem->userspace_addr & 0xffffful)
2601 2602
		return -EINVAL;

2603
	if (mem->memory_size & 0xffffful)
2604 2605
		return -EINVAL;

2606 2607 2608 2609
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
2610
				const struct kvm_userspace_memory_region *mem,
2611
				const struct kvm_memory_slot *old,
2612
				const struct kvm_memory_slot *new,
2613
				enum kvm_mr_change change)
2614
{
2615
	int rc;
2616

2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
	/* If the basics of the memslot do not change, we do not want
	 * to update the gmap. Every update causes several unnecessary
	 * segment translation exceptions. This is usually handled just
	 * fine by the normal fault handler + gmap, but it will also
	 * cause faults on the prefix page of running guest CPUs.
	 */
	if (old->userspace_addr == mem->userspace_addr &&
	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
	    old->npages * PAGE_SIZE == mem->memory_size)
		return;
2627 2628 2629 2630

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
2631
		pr_warn("failed to commit memory region\n");
2632
	return;
2633 2634 2635 2636
}

static int __init kvm_s390_init(void)
{
2637
	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2638 2639 2640 2641 2642 2643 2644 2645 2646
}

static void __exit kvm_s390_exit(void)
{
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);
2647 2648 2649 2650 2651 2652 2653 2654 2655

/*
 * Enable autoloading of the kvm module.
 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
 * since x86 takes a different approach.
 */
#include <linux/miscdevice.h>
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");