kvm-s390.c 76.5 KB
Newer Older
1
/*
2
 * hosting zSeries kernel virtual machines
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14
 *               Jason J. Herne <jjherne@us.ibm.com>
15 16 17 18 19
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
20
#include <linux/hrtimer.h>
21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
25
#include <linux/random.h>
26
#include <linux/slab.h>
27
#include <linux/timer.h>
28
#include <linux/vmalloc.h>
29
#include <asm/asm-offsets.h>
30
#include <asm/lowcore.h>
31
#include <asm/etr.h>
32
#include <asm/pgtable.h>
33
#include <asm/gmap.h>
34
#include <asm/nmi.h>
35
#include <asm/switch_to.h>
36
#include <asm/isc.h>
37
#include <asm/sclp.h>
38
#include "kvm-s390.h"
39 40
#include "gaccess.h"

41 42 43 44
#define KMSG_COMPONENT "kvm-s390"
#undef pr_fmt
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

45 46
#define CREATE_TRACE_POINTS
#include "trace.h"
47
#include "trace-s390.h"
48

49
#define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
50 51 52
#define LOCAL_IRQS 32
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
			   (KVM_MAX_VCPUS + LOCAL_IRQS))
53

54 55 56 57
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
58
	{ "exit_null", VCPU_STAT(exit_null) },
59 60 61 62
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
63 64 65
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
66
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
67
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
68
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
69
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
70
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
71 72
	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
73
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
74
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
75 76 77 78 79 80 81
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
82
	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
83 84 85 86 87
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
88
	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
89 90
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
91
	{ "instruction_essa", VCPU_STAT(instruction_essa) },
92 93
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
94
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
95
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
96
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
97
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
98
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
99 100
	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
101
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
102 103
	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
104
	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
105 106 107
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
108 109 110
	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
111
	{ "diagnose_10", VCPU_STAT(diagnose_10) },
112
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
113
	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
114 115 116
	{ "diagnose_258", VCPU_STAT(diagnose_258) },
	{ "diagnose_308", VCPU_STAT(diagnose_308) },
	{ "diagnose_500", VCPU_STAT(diagnose_500) },
117 118 119
	{ NULL }
};

120 121
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
122
	0xffe6fffbfcfdfc40UL,
123
	0x005e800000000000UL,
124
};
125

126
unsigned long kvm_s390_fac_list_mask_size(void)
127
{
128 129
	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
	return ARRAY_SIZE(kvm_s390_fac_list_mask);
130 131
}

132
static struct gmap_notifier gmap_notifier;
133
debug_info_t *kvm_s390_dbf;
134

135
/* Section: not file related */
136
int kvm_arch_hardware_enable(void)
137 138
{
	/* every s390 is virtualization enabled ;-) */
139
	return 0;
140 141
}

142 143
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
/*
 * This callback is executed during stop_machine(). All CPUs are therefore
 * temporarily stopped. In order not to change guest behavior, we have to
 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
 * so a CPU won't be stopped while calculating with the epoch.
 */
static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
			  void *v)
{
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;
	unsigned long long *delta = v;

	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm->arch.epoch -= *delta;
		kvm_for_each_vcpu(i, vcpu, kvm) {
			vcpu->arch.sie_block->epoch -= *delta;
162 163
			if (vcpu->arch.cputm_enabled)
				vcpu->arch.cputm_start += *delta;
164 165 166 167 168 169 170 171 172
		}
	}
	return NOTIFY_OK;
}

static struct notifier_block kvm_clock_notifier = {
	.notifier_call = kvm_clock_sync,
};

173 174
int kvm_arch_hardware_setup(void)
{
175 176
	gmap_notifier.notifier_call = kvm_gmap_notifier;
	gmap_register_ipte_notifier(&gmap_notifier);
177 178
	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
				       &kvm_clock_notifier);
179 180 181 182 183
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
184
	gmap_unregister_ipte_notifier(&gmap_notifier);
185 186
	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
					 &kvm_clock_notifier);
187 188 189 190
}

int kvm_arch_init(void *opaque)
{
191 192 193 194 195 196 197 198 199
	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
	if (!kvm_s390_dbf)
		return -ENOMEM;

	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
		debug_unregister(kvm_s390_dbf);
		return -ENOMEM;
	}

200 201
	/* Register floating interrupt controller interface. */
	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
202 203
}

204 205 206 207 208
void kvm_arch_exit(void)
{
	debug_unregister(kvm_s390_dbf);
}

209 210 211 212 213 214 215 216 217
/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

218
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
219
{
220 221
	int r;

222
	switch (ext) {
223
	case KVM_CAP_S390_PSW:
224
	case KVM_CAP_S390_GMAP:
225
	case KVM_CAP_SYNC_MMU:
226 227 228
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
229
	case KVM_CAP_ASYNC_PF:
230
	case KVM_CAP_SYNC_REGS:
231
	case KVM_CAP_ONE_REG:
232
	case KVM_CAP_ENABLE_CAP:
233
	case KVM_CAP_S390_CSS_SUPPORT:
C
Cornelia Huck 已提交
234
	case KVM_CAP_IOEVENTFD:
235
	case KVM_CAP_DEVICE_CTRL:
236
	case KVM_CAP_ENABLE_CAP_VM:
237
	case KVM_CAP_S390_IRQCHIP:
238
	case KVM_CAP_VM_ATTRIBUTES:
239
	case KVM_CAP_MP_STATE:
240
	case KVM_CAP_S390_INJECT_IRQ:
241
	case KVM_CAP_S390_USER_SIGP:
242
	case KVM_CAP_S390_USER_STSI:
243
	case KVM_CAP_S390_SKEYS:
244
	case KVM_CAP_S390_IRQ_STATE:
245 246
		r = 1;
		break;
247 248 249
	case KVM_CAP_S390_MEM_OP:
		r = MEM_OP_MAX_SIZE;
		break;
250 251
	case KVM_CAP_NR_VCPUS:
	case KVM_CAP_MAX_VCPUS:
252 253
		r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
				  : KVM_S390_BSCA_CPU_SLOTS;
254
		break;
255 256 257
	case KVM_CAP_NR_MEMSLOTS:
		r = KVM_USER_MEM_SLOTS;
		break;
258
	case KVM_CAP_S390_COW:
259
		r = MACHINE_HAS_ESOP;
260
		break;
261 262 263
	case KVM_CAP_S390_VECTOR_REGISTERS:
		r = MACHINE_HAS_VX;
		break;
264 265 266
	case KVM_CAP_S390_RI:
		r = test_facility(64);
		break;
267
	default:
268
		r = 0;
269
	}
270
	return r;
271 272
}

273 274 275 276 277 278 279 280 281 282 283 284
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
	gfn_t cur_gfn, last_gfn;
	unsigned long address;
	struct gmap *gmap = kvm->arch.gmap;

	/* Loop over all guest pages */
	last_gfn = memslot->base_gfn + memslot->npages;
	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
		address = gfn_to_hva_memslot(memslot, cur_gfn);

285
		if (test_and_clear_guest_dirty(gmap->mm, address))
286
			mark_page_dirty(kvm, cur_gfn);
287 288
		if (fatal_signal_pending(current))
			return;
289
		cond_resched();
290 291 292
	}
}

293
/* Section: vm related */
294 295
static void sca_del_vcpu(struct kvm_vcpu *vcpu);

296 297 298 299 300 301
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
302 303
	int r;
	unsigned long n;
304
	struct kvm_memslots *slots;
305 306 307 308 309 310 311 312 313
	struct kvm_memory_slot *memslot;
	int is_dirty = 0;

	mutex_lock(&kvm->slots_lock);

	r = -EINVAL;
	if (log->slot >= KVM_USER_MEM_SLOTS)
		goto out;

314 315
	slots = kvm_memslots(kvm);
	memslot = id_to_memslot(slots, log->slot);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

	kvm_s390_sync_dirty_log(kvm, memslot);
	r = kvm_get_dirty_log(kvm, log, &is_dirty);
	if (r)
		goto out;

	/* Clear the dirty log */
	if (is_dirty) {
		n = kvm_dirty_bitmap_bytes(memslot);
		memset(memslot->dirty_bitmap, 0, n);
	}
	r = 0;
out:
	mutex_unlock(&kvm->slots_lock);
	return r;
334 335
}

336 337 338 339 340 341 342 343
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
344
	case KVM_CAP_S390_IRQCHIP:
345
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
346 347 348
		kvm->arch.use_irqchip = 1;
		r = 0;
		break;
349
	case KVM_CAP_S390_USER_SIGP:
350
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
351 352 353
		kvm->arch.user_sigp = 1;
		r = 0;
		break;
354
	case KVM_CAP_S390_VECTOR_REGISTERS:
355 356 357 358
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus)) {
			r = -EBUSY;
		} else if (MACHINE_HAS_VX) {
359 360
			set_kvm_facility(kvm->arch.model.fac_mask, 129);
			set_kvm_facility(kvm->arch.model.fac_list, 129);
361 362 363
			r = 0;
		} else
			r = -EINVAL;
364
		mutex_unlock(&kvm->lock);
365 366
		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
			 r ? "(not available)" : "(success)");
367
		break;
368 369 370 371 372 373
	case KVM_CAP_S390_RI:
		r = -EINVAL;
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus)) {
			r = -EBUSY;
		} else if (test_facility(64)) {
374 375
			set_kvm_facility(kvm->arch.model.fac_mask, 64);
			set_kvm_facility(kvm->arch.model.fac_list, 64);
376 377 378 379 380 381
			r = 0;
		}
		mutex_unlock(&kvm->lock);
		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
			 r ? "(not available)" : "(success)");
		break;
382
	case KVM_CAP_S390_USER_STSI:
383
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
384 385 386
		kvm->arch.user_stsi = 1;
		r = 0;
		break;
387 388 389 390 391 392 393
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

394 395 396 397 398 399 400
static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->attr) {
	case KVM_S390_VM_MEM_LIMIT_SIZE:
		ret = 0;
401
		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
402 403
			 kvm->arch.mem_limit);
		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
404 405 406 407 408 409 410 411 412 413
			ret = -EFAULT;
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
414 415 416 417 418
{
	int ret;
	unsigned int idx;
	switch (attr->attr) {
	case KVM_S390_VM_MEM_ENABLE_CMMA:
419 420 421 422 423
		/* enable CMMA only for z10 and later (EDAT_1) */
		ret = -EINVAL;
		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
			break;

424
		ret = -EBUSY;
425
		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
426 427 428 429 430 431 432 433
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus) == 0) {
			kvm->arch.use_cmma = 1;
			ret = 0;
		}
		mutex_unlock(&kvm->lock);
		break;
	case KVM_S390_VM_MEM_CLR_CMMA:
434 435 436 437
		ret = -EINVAL;
		if (!kvm->arch.use_cmma)
			break;

438
		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
439 440
		mutex_lock(&kvm->lock);
		idx = srcu_read_lock(&kvm->srcu);
441
		s390_reset_cmma(kvm->arch.gmap->mm);
442 443 444 445
		srcu_read_unlock(&kvm->srcu, idx);
		mutex_unlock(&kvm->lock);
		ret = 0;
		break;
446 447 448 449 450 451 452 453 454
	case KVM_S390_VM_MEM_LIMIT_SIZE: {
		unsigned long new_limit;

		if (kvm_is_ucontrol(kvm))
			return -EINVAL;

		if (get_user(new_limit, (u64 __user *)attr->addr))
			return -EFAULT;

455 456
		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
		    new_limit > kvm->arch.mem_limit)
457 458
			return -E2BIG;

459 460 461 462 463 464 465
		if (!new_limit)
			return -EINVAL;

		/* gmap_alloc takes last usable address */
		if (new_limit != KVM_S390_NO_MEM_LIMIT)
			new_limit -= 1;

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		ret = -EBUSY;
		mutex_lock(&kvm->lock);
		if (atomic_read(&kvm->online_vcpus) == 0) {
			/* gmap_alloc will round the limit up */
			struct gmap *new = gmap_alloc(current->mm, new_limit);

			if (!new) {
				ret = -ENOMEM;
			} else {
				gmap_free(kvm->arch.gmap);
				new->private = kvm;
				kvm->arch.gmap = new;
				ret = 0;
			}
		}
		mutex_unlock(&kvm->lock);
482 483 484
		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
			 (void *) kvm->arch.gmap->asce);
485 486
		break;
	}
487 488 489 490 491 492 493
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

494 495 496 497 498 499 500
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);

static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_vcpu *vcpu;
	int i;

501
	if (!test_kvm_facility(kvm, 76))
502 503 504 505 506 507 508 509 510
		return -EINVAL;

	mutex_lock(&kvm->lock);
	switch (attr->attr) {
	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
		kvm->arch.crypto.aes_kw = 1;
511
		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
512 513 514 515 516 517
		break;
	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
		kvm->arch.crypto.dea_kw = 1;
518
		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
519 520 521 522 523
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
		kvm->arch.crypto.aes_kw = 0;
		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
524
		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
525 526 527 528 529
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
		kvm->arch.crypto.dea_kw = 0;
		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
530
		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
531 532 533 534 535 536 537 538 539 540 541 542 543 544
		break;
	default:
		mutex_unlock(&kvm->lock);
		return -ENXIO;
	}

	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_s390_vcpu_crypto_setup(vcpu);
		exit_sie(vcpu);
	}
	mutex_unlock(&kvm->lock);
	return 0;
}

545 546 547 548 549 550 551 552 553 554
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high;

	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
					   sizeof(gtod_high)))
		return -EFAULT;

	if (gtod_high != 0)
		return -EINVAL;
555
	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
556 557 558 559 560 561

	return 0;
}

static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
562
	u64 gtod;
563 564 565 566

	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
		return -EFAULT;

567
	kvm_s390_set_tod_clock(kvm, gtod);
568
	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	return 0;
}

static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_set_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_set_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high = 0;

	if (copy_to_user((void __user *)attr->addr, &gtod_high,
					 sizeof(gtod_high)))
		return -EFAULT;
600
	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
601 602 603 604 605 606

	return 0;
}

static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
607
	u64 gtod;
608

609
	gtod = kvm_s390_get_tod_clock_fast(kvm);
610 611
	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
		return -EFAULT;
612
	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	return 0;
}

static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_get_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_get_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_processor *proc;
	int ret = 0;

	mutex_lock(&kvm->lock);
	if (atomic_read(&kvm->online_vcpus)) {
		ret = -EBUSY;
		goto out;
	}
	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (!proc) {
		ret = -ENOMEM;
		goto out;
	}
	if (!copy_from_user(proc, (void __user *)attr->addr,
			    sizeof(*proc))) {
		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
		       sizeof(struct cpuid));
		kvm->arch.model.ibc = proc->ibc;
658
		memcpy(kvm->arch.model.fac_list, proc->fac_list,
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
		       S390_ARCH_FAC_LIST_SIZE_BYTE);
	} else
		ret = -EFAULT;
	kfree(proc);
out:
	mutex_unlock(&kvm->lock);
	return ret;
}

static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->attr) {
	case KVM_S390_VM_CPU_PROCESSOR:
		ret = kvm_s390_set_processor(kvm, attr);
		break;
	}
	return ret;
}

static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_processor *proc;
	int ret = 0;

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (!proc) {
		ret = -ENOMEM;
		goto out;
	}
	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
	proc->ibc = kvm->arch.model.ibc;
692 693
	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
		ret = -EFAULT;
	kfree(proc);
out:
	return ret;
}

static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_machine *mach;
	int ret = 0;

	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
	if (!mach) {
		ret = -ENOMEM;
		goto out;
	}
	get_cpu_id((struct cpuid *) &mach->cpuid);
712
	mach->ibc = sclp.ibc;
713
	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
714
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
715
	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
716
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
		ret = -EFAULT;
	kfree(mach);
out:
	return ret;
}

static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->attr) {
	case KVM_S390_VM_CPU_PROCESSOR:
		ret = kvm_s390_get_processor(kvm, attr);
		break;
	case KVM_S390_VM_CPU_MACHINE:
		ret = kvm_s390_get_machine(kvm, attr);
		break;
	}
	return ret;
}

739 740 741 742 743
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->group) {
744
	case KVM_S390_VM_MEM_CTRL:
745
		ret = kvm_s390_set_mem_control(kvm, attr);
746
		break;
747 748 749
	case KVM_S390_VM_TOD:
		ret = kvm_s390_set_tod(kvm, attr);
		break;
750 751 752
	case KVM_S390_VM_CPU_MODEL:
		ret = kvm_s390_set_cpu_model(kvm, attr);
		break;
753 754 755
	case KVM_S390_VM_CRYPTO:
		ret = kvm_s390_vm_set_crypto(kvm, attr);
		break;
756 757 758 759 760 761 762 763 764 765
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
}

static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
766 767 768 769 770 771
	int ret;

	switch (attr->group) {
	case KVM_S390_VM_MEM_CTRL:
		ret = kvm_s390_get_mem_control(kvm, attr);
		break;
772 773 774
	case KVM_S390_VM_TOD:
		ret = kvm_s390_get_tod(kvm, attr);
		break;
775 776 777
	case KVM_S390_VM_CPU_MODEL:
		ret = kvm_s390_get_cpu_model(kvm, attr);
		break;
778 779 780 781 782 783
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
784 785 786 787 788 789 790
}

static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->group) {
791 792 793 794
	case KVM_S390_VM_MEM_CTRL:
		switch (attr->attr) {
		case KVM_S390_VM_MEM_ENABLE_CMMA:
		case KVM_S390_VM_MEM_CLR_CMMA:
795
		case KVM_S390_VM_MEM_LIMIT_SIZE:
796 797 798 799 800 801 802
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
803 804 805 806 807 808 809 810 811 812 813
	case KVM_S390_VM_TOD:
		switch (attr->attr) {
		case KVM_S390_VM_TOD_LOW:
		case KVM_S390_VM_TOD_HIGH:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
814 815 816 817 818 819 820 821 822 823 824
	case KVM_S390_VM_CPU_MODEL:
		switch (attr->attr) {
		case KVM_S390_VM_CPU_PROCESSOR:
		case KVM_S390_VM_CPU_MACHINE:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
825 826 827 828 829 830 831 832 833 834 835 836 837
	case KVM_S390_VM_CRYPTO:
		switch (attr->attr) {
		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
			ret = 0;
			break;
		default:
			ret = -ENXIO;
			break;
		}
		break;
838 839 840 841 842 843 844 845
	default:
		ret = -ENXIO;
		break;
	}

	return ret;
}

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
	uint8_t *keys;
	uint64_t hva;
	unsigned long curkey;
	int i, r = 0;

	if (args->flags != 0)
		return -EINVAL;

	/* Is this guest using storage keys? */
	if (!mm_use_skey(current->mm))
		return KVM_S390_GET_SKEYS_NONE;

	/* Enforce sane limit on memory allocation */
	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
		return -EINVAL;

	keys = kmalloc_array(args->count, sizeof(uint8_t),
			     GFP_KERNEL | __GFP_NOWARN);
	if (!keys)
		keys = vmalloc(sizeof(uint8_t) * args->count);
	if (!keys)
		return -ENOMEM;

	for (i = 0; i < args->count; i++) {
		hva = gfn_to_hva(kvm, args->start_gfn + i);
		if (kvm_is_error_hva(hva)) {
			r = -EFAULT;
			goto out;
		}

		curkey = get_guest_storage_key(current->mm, hva);
		if (IS_ERR_VALUE(curkey)) {
			r = curkey;
			goto out;
		}
		keys[i] = curkey;
	}

	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
			 sizeof(uint8_t) * args->count);
	if (r)
		r = -EFAULT;
out:
	kvfree(keys);
	return r;
}

static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
	uint8_t *keys;
	uint64_t hva;
	int i, r = 0;

	if (args->flags != 0)
		return -EINVAL;

	/* Enforce sane limit on memory allocation */
	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
		return -EINVAL;

	keys = kmalloc_array(args->count, sizeof(uint8_t),
			     GFP_KERNEL | __GFP_NOWARN);
	if (!keys)
		keys = vmalloc(sizeof(uint8_t) * args->count);
	if (!keys)
		return -ENOMEM;

	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
			   sizeof(uint8_t) * args->count);
	if (r) {
		r = -EFAULT;
		goto out;
	}

	/* Enable storage key handling for the guest */
923 924 925
	r = s390_enable_skey();
	if (r)
		goto out;
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949

	for (i = 0; i < args->count; i++) {
		hva = gfn_to_hva(kvm, args->start_gfn + i);
		if (kvm_is_error_hva(hva)) {
			r = -EFAULT;
			goto out;
		}

		/* Lowest order bit is reserved */
		if (keys[i] & 0x01) {
			r = -EINVAL;
			goto out;
		}

		r = set_guest_storage_key(current->mm, hva,
					  (unsigned long)keys[i], 0);
		if (r)
			goto out;
	}
out:
	kvfree(keys);
	return r;
}

950 951 952 953 954
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
955
	struct kvm_device_attr attr;
956 957 958
	int r;

	switch (ioctl) {
959 960 961 962 963 964 965 966 967
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
968 969 970 971 972 973 974 975
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			break;
		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
		break;
	}
976 977 978 979 980 981 982
	case KVM_CREATE_IRQCHIP: {
		struct kvm_irq_routing_entry routing;

		r = -EINVAL;
		if (kvm->arch.use_irqchip) {
			/* Set up dummy routing. */
			memset(&routing, 0, sizeof(routing));
983
			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
984 985 986
		}
		break;
	}
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	case KVM_SET_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_set_attr(kvm, &attr);
		break;
	}
	case KVM_GET_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_get_attr(kvm, &attr);
		break;
	}
	case KVM_HAS_DEVICE_ATTR: {
		r = -EFAULT;
		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
			break;
		r = kvm_s390_vm_has_attr(kvm, &attr);
		break;
	}
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	case KVM_S390_GET_SKEYS: {
		struct kvm_s390_skeys args;

		r = -EFAULT;
		if (copy_from_user(&args, argp,
				   sizeof(struct kvm_s390_skeys)))
			break;
		r = kvm_s390_get_skeys(kvm, &args);
		break;
	}
	case KVM_S390_SET_SKEYS: {
		struct kvm_s390_skeys args;

		r = -EFAULT;
		if (copy_from_user(&args, argp,
				   sizeof(struct kvm_s390_skeys)))
			break;
		r = kvm_s390_set_skeys(kvm, &args);
		break;
	}
1028
	default:
1029
		r = -ENOTTY;
1030 1031 1032 1033 1034
	}

	return r;
}

1035 1036 1037
static int kvm_s390_query_ap_config(u8 *config)
{
	u32 fcn_code = 0x04000000UL;
1038
	u32 cc = 0;
1039

1040
	memset(config, 0, 128);
1041 1042 1043 1044
	asm volatile(
		"lgr 0,%1\n"
		"lgr 2,%2\n"
		".long 0xb2af0000\n"		/* PQAP(QCI) */
1045
		"0: ipm %0\n"
1046
		"srl %0,28\n"
1047 1048 1049
		"1:\n"
		EX_TABLE(0b, 1b)
		: "+r" (cc)
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		: "r" (fcn_code), "r" (config)
		: "cc", "0", "2", "memory"
	);

	return cc;
}

static int kvm_s390_apxa_installed(void)
{
	u8 config[128];
	int cc;

1062
	if (test_facility(12)) {
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		cc = kvm_s390_query_ap_config(config);

		if (cc)
			pr_err("PQAP(QCI) failed with cc=%d", cc);
		else
			return config[0] & 0x40;
	}

	return 0;
}

static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;

	if (kvm_s390_apxa_installed())
		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
	else
		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}

1084 1085 1086 1087 1088 1089
static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
{
	get_cpu_id(cpu_id);
	cpu_id->version = 0xff;
}

1090
static void kvm_s390_crypto_init(struct kvm *kvm)
1091
{
1092
	if (!test_kvm_facility(kvm, 76))
1093
		return;
1094

1095
	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
1096
	kvm_s390_set_crycb_format(kvm);
1097

1098 1099 1100 1101 1102 1103 1104
	/* Enable AES/DEA protected key functions by default */
	kvm->arch.crypto.aes_kw = 1;
	kvm->arch.crypto.dea_kw = 1;
	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1105 1106
}

1107 1108 1109
static void sca_dispose(struct kvm *kvm)
{
	if (kvm->arch.use_esca)
1110
		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1111 1112 1113 1114 1115
	else
		free_page((unsigned long)(kvm->arch.sca));
	kvm->arch.sca = NULL;
}

1116
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1117
{
1118
	int i, rc;
1119
	char debug_name[16];
1120
	static unsigned long sca_offset;
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
	if (type & ~KVM_VM_S390_UCONTROL)
		goto out_err;
	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
		goto out_err;
#else
	if (type)
		goto out_err;
#endif

1133 1134
	rc = s390_enable_sie();
	if (rc)
1135
		goto out_err;
1136

1137 1138
	rc = -ENOMEM;

1139
	kvm->arch.use_esca = 0; /* start with basic SCA */
1140
	rwlock_init(&kvm->arch.sca_lock);
1141
	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1142
	if (!kvm->arch.sca)
1143
		goto out_err;
1144
	spin_lock(&kvm_lock);
1145
	sca_offset += 16;
1146
	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1147
		sca_offset = 0;
1148 1149
	kvm->arch.sca = (struct bsca_block *)
			((char *) kvm->arch.sca + sca_offset);
1150
	spin_unlock(&kvm_lock);
1151 1152 1153

	sprintf(debug_name, "kvm-%u", current->pid);

1154
	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1155
	if (!kvm->arch.dbf)
1156
		goto out_err;
1157

1158 1159 1160
	kvm->arch.sie_page2 =
	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!kvm->arch.sie_page2)
1161
		goto out_err;
1162

1163
	/* Populate the facility mask initially. */
1164
	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1165
	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1166 1167
	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
		if (i < kvm_s390_fac_list_mask_size())
1168
			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
1169
		else
1170
			kvm->arch.model.fac_mask[i] = 0UL;
1171 1172
	}

1173
	/* Populate the facility list initially. */
1174 1175
	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1176 1177
	       S390_ARCH_FAC_LIST_SIZE_BYTE);

1178
	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1179
	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1180

1181
	kvm_s390_crypto_init(kvm);
1182

1183
	spin_lock_init(&kvm->arch.float_int.lock);
1184 1185
	for (i = 0; i < FIRQ_LIST_COUNT; i++)
		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1186
	init_waitqueue_head(&kvm->arch.ipte_wq);
1187
	mutex_init(&kvm->arch.ipte_mutex);
1188

1189
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1190
	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1191

1192 1193
	if (type & KVM_VM_S390_UCONTROL) {
		kvm->arch.gmap = NULL;
1194
		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1195
	} else {
1196 1197 1198 1199 1200
		if (sclp.hamax == U64_MAX)
			kvm->arch.mem_limit = TASK_MAX_SIZE;
		else
			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
						    sclp.hamax + 1);
1201
		kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1202
		if (!kvm->arch.gmap)
1203
			goto out_err;
1204
		kvm->arch.gmap->private = kvm;
1205
		kvm->arch.gmap->pfault_enabled = 0;
1206
	}
1207 1208

	kvm->arch.css_support = 0;
1209
	kvm->arch.use_irqchip = 0;
1210
	kvm->arch.epoch = 0;
1211

1212
	spin_lock_init(&kvm->arch.start_stop_lock);
1213
	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1214

1215
	return 0;
1216
out_err:
1217
	free_page((unsigned long)kvm->arch.sie_page2);
1218
	debug_unregister(kvm->arch.dbf);
1219
	sca_dispose(kvm);
1220
	KVM_EVENT(3, "creation of vm failed: %d", rc);
1221
	return rc;
1222 1223
}

1224 1225 1226
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1227
	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1228
	kvm_s390_clear_local_irqs(vcpu);
1229
	kvm_clear_async_pf_completion_queue(vcpu);
1230
	if (!kvm_is_ucontrol(vcpu->kvm))
1231
		sca_del_vcpu(vcpu);
1232 1233 1234 1235

	if (kvm_is_ucontrol(vcpu->kvm))
		gmap_free(vcpu->arch.gmap);

1236
	if (vcpu->kvm->arch.use_cmma)
1237
		kvm_s390_vcpu_unsetup_cmma(vcpu);
1238
	free_page((unsigned long)(vcpu->arch.sie_block));
1239

1240
	kvm_vcpu_uninit(vcpu);
1241
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1242 1243 1244 1245 1246
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
1247
	struct kvm_vcpu *vcpu;
1248

1249 1250 1251 1252 1253 1254 1255 1256 1257
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
1258 1259
}

1260 1261
void kvm_arch_destroy_vm(struct kvm *kvm)
{
1262
	kvm_free_vcpus(kvm);
1263
	sca_dispose(kvm);
1264
	debug_unregister(kvm->arch.dbf);
1265
	free_page((unsigned long)kvm->arch.sie_page2);
1266 1267
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
1268
	kvm_s390_destroy_adapters(kvm);
1269
	kvm_s390_clear_float_irqs(kvm);
1270
	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1271 1272 1273
}

/* Section: vcpu related */
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{
	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
	if (!vcpu->arch.gmap)
		return -ENOMEM;
	vcpu->arch.gmap->private = vcpu->kvm;

	return 0;
}

1284 1285
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
1286
	read_lock(&vcpu->kvm->arch.sca_lock);
1287 1288
	if (vcpu->kvm->arch.use_esca) {
		struct esca_block *sca = vcpu->kvm->arch.sca;
1289

1290
		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1291
		sca->cpu[vcpu->vcpu_id].sda = 0;
1292 1293 1294 1295
	} else {
		struct bsca_block *sca = vcpu->kvm->arch.sca;

		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1296
		sca->cpu[vcpu->vcpu_id].sda = 0;
1297
	}
1298
	read_unlock(&vcpu->kvm->arch.sca_lock);
1299 1300
}

1301
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1302
{
1303 1304 1305
	read_lock(&vcpu->kvm->arch.sca_lock);
	if (vcpu->kvm->arch.use_esca) {
		struct esca_block *sca = vcpu->kvm->arch.sca;
1306

1307
		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1308 1309
		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1310
		vcpu->arch.sie_block->ecb2 |= 0x04U;
1311
		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1312
	} else {
1313
		struct bsca_block *sca = vcpu->kvm->arch.sca;
1314

1315
		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1316 1317
		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1318
		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1319
	}
1320
	read_unlock(&vcpu->kvm->arch.sca_lock);
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
}

/* Basic SCA to Extended SCA data copy routines */
static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
{
	d->sda = s->sda;
	d->sigp_ctrl.c = s->sigp_ctrl.c;
	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
}

static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
{
	int i;

	d->ipte_control = s->ipte_control;
	d->mcn[0] = s->mcn;
	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
}

static int sca_switch_to_extended(struct kvm *kvm)
{
	struct bsca_block *old_sca = kvm->arch.sca;
	struct esca_block *new_sca;
	struct kvm_vcpu *vcpu;
	unsigned int vcpu_idx;
	u32 scaol, scaoh;

	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
	if (!new_sca)
		return -ENOMEM;

	scaoh = (u32)((u64)(new_sca) >> 32);
	scaol = (u32)(u64)(new_sca) & ~0x3fU;

	kvm_s390_vcpu_block_all(kvm);
	write_lock(&kvm->arch.sca_lock);

	sca_copy_b_to_e(new_sca, old_sca);

	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
		vcpu->arch.sie_block->scaoh = scaoh;
		vcpu->arch.sie_block->scaol = scaol;
		vcpu->arch.sie_block->ecb2 |= 0x04U;
	}
	kvm->arch.sca = new_sca;
	kvm->arch.use_esca = 1;

	write_unlock(&kvm->arch.sca_lock);
	kvm_s390_vcpu_unblock_all(kvm);

	free_page((unsigned long)old_sca);

1374 1375
	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
		 old_sca, kvm->arch.sca);
1376
	return 0;
1377 1378 1379 1380
}

static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	int rc;

	if (id < KVM_S390_BSCA_CPU_SLOTS)
		return true;
	if (!sclp.has_esca)
		return false;

	mutex_lock(&kvm->lock);
	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
	mutex_unlock(&kvm->lock);

	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1393 1394
}

1395 1396
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
1397 1398
	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
	kvm_clear_async_pf_completion_queue(vcpu);
1399 1400
	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
				    KVM_SYNC_GPRS |
1401
				    KVM_SYNC_ACRS |
1402 1403 1404
				    KVM_SYNC_CRS |
				    KVM_SYNC_ARCH0 |
				    KVM_SYNC_PFAULT;
1405 1406
	if (test_kvm_facility(vcpu->kvm, 64))
		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1407 1408 1409 1410
	/* fprs can be synchronized via vrs, even if the guest has no vx. With
	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
	 */
	if (MACHINE_HAS_VX)
1411
		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1412 1413
	else
		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1414 1415 1416 1417

	if (kvm_is_ucontrol(vcpu->kvm))
		return __kvm_ucontrol_vcpu_init(vcpu);

1418 1419 1420
	return 0;
}

1421 1422 1423 1424
/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
1425
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1426
	vcpu->arch.cputm_start = get_tod_clock_fast();
1427
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1428 1429 1430 1431 1432 1433
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
1434
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1435 1436
	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
	vcpu->arch.cputm_start = 0;
1437
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
	vcpu->arch.cputm_enabled = true;
	__start_cpu_timer_accounting(vcpu);
}

/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
	__stop_cpu_timer_accounting(vcpu);
	vcpu->arch.cputm_enabled = false;
}

static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	__enable_cpu_timer_accounting(vcpu);
	preempt_enable();
}

static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
{
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	__disable_cpu_timer_accounting(vcpu);
	preempt_enable();
}

1470 1471 1472
/* set the cpu timer - may only be called from the VCPU thread itself */
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
{
1473
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1474
	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1475 1476
	if (vcpu->arch.cputm_enabled)
		vcpu->arch.cputm_start = get_tod_clock_fast();
1477
	vcpu->arch.sie_block->cputm = cputm;
1478
	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1479
	preempt_enable();
1480 1481
}

1482
/* update and get the cpu timer - can also be called from other VCPU threads */
1483 1484
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
{
1485
	unsigned int seq;
1486 1487 1488 1489 1490
	__u64 value;

	if (unlikely(!vcpu->arch.cputm_enabled))
		return vcpu->arch.sie_block->cputm;

1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
	do {
		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
		/*
		 * If the writer would ever execute a read in the critical
		 * section, e.g. in irq context, we have a deadlock.
		 */
		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
		value = vcpu->arch.sie_block->cputm;
		/* if cputm_start is 0, accounting is being started/stopped */
		if (likely(vcpu->arch.cputm_start))
			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
	preempt_enable();
1505
	return value;
1506 1507
}

1508 1509
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
1510
	/* Save host register state */
1511
	save_fpu_regs();
1512 1513
	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
1514

1515 1516 1517 1518
	if (MACHINE_HAS_VX)
		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
	else
		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
1519
	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1520
	if (test_fp_ctl(current->thread.fpu.fpc))
1521
		/* User space provided an invalid FPC, let's clear it */
1522 1523 1524
		current->thread.fpu.fpc = 0;

	save_access_regs(vcpu->arch.host_acrs);
1525
	restore_access_regs(vcpu->run->s.regs.acrs);
1526
	gmap_enable(vcpu->arch.gmap);
1527
	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1528
	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1529
		__start_cpu_timer_accounting(vcpu);
1530
	vcpu->cpu = cpu;
1531 1532 1533 1534
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
1535
	vcpu->cpu = -1;
1536
	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1537
		__stop_cpu_timer_accounting(vcpu);
1538
	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1539
	gmap_disable(vcpu->arch.gmap);
1540

1541
	/* Save guest register state */
1542
	save_fpu_regs();
1543
	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1544

1545 1546 1547
	/* Restore host register state */
	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
1548 1549

	save_access_regs(vcpu->run->s.regs.acrs);
1550 1551 1552 1553 1554 1555 1556 1557
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
1558
	kvm_s390_set_prefix(vcpu, 0);
1559
	kvm_s390_set_cpu_timer(vcpu, 0);
1560 1561 1562 1563 1564
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1565 1566 1567
	/* make sure the new fpc will be lazily loaded */
	save_fpu_regs();
	current->thread.fpu.fpc = 0;
1568
	vcpu->arch.sie_block->gbea = 1;
1569
	vcpu->arch.sie_block->pp = 0;
1570 1571
	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
	kvm_clear_async_pf_completion_queue(vcpu);
1572 1573
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
		kvm_s390_vcpu_stop(vcpu);
1574
	kvm_s390_clear_local_irqs(vcpu);
1575 1576
}

1577
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1578
{
1579
	mutex_lock(&vcpu->kvm->lock);
1580
	preempt_disable();
1581
	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1582
	preempt_enable();
1583
	mutex_unlock(&vcpu->kvm->lock);
1584
	if (!kvm_is_ucontrol(vcpu->kvm)) {
1585
		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1586
		sca_add_vcpu(vcpu);
1587 1588
	}

1589 1590
}

1591 1592
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
1593
	if (!test_kvm_facility(vcpu->kvm, 76))
1594 1595
		return;

1596 1597 1598 1599 1600 1601 1602
	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);

	if (vcpu->kvm->arch.crypto.aes_kw)
		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
	if (vcpu->kvm->arch.crypto.dea_kw)
		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;

1603 1604 1605
	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
{
	free_page(vcpu->arch.sie_block->cbrlo);
	vcpu->arch.sie_block->cbrlo = 0;
}

int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
{
	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
	if (!vcpu->arch.sie_block->cbrlo)
		return -ENOMEM;

	vcpu->arch.sie_block->ecb2 |= 0x80;
	vcpu->arch.sie_block->ecb2 &= ~0x08;
	return 0;
}

1623 1624 1625 1626 1627 1628
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;

	vcpu->arch.cpu_id = model->cpu_id;
	vcpu->arch.sie_block->ibc = model->ibc;
1629
	if (test_kvm_facility(vcpu->kvm, 7))
1630
		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
1631 1632
}

1633 1634
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
1635
	int rc = 0;
1636

1637 1638
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
						    CPUSTAT_SM |
1639 1640
						    CPUSTAT_STOPPED);

1641
	if (test_kvm_facility(vcpu->kvm, 78))
1642
		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1643
	else if (test_kvm_facility(vcpu->kvm, 8))
1644
		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1645

1646 1647
	kvm_s390_vcpu_setup_model(vcpu);

1648
	vcpu->arch.sie_block->ecb   = 6;
1649
	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1650 1651
		vcpu->arch.sie_block->ecb |= 0x10;

1652
	vcpu->arch.sie_block->ecb2  = 8;
1653
	vcpu->arch.sie_block->eca   = 0xC1002000U;
1654
	if (sclp.has_siif)
1655
		vcpu->arch.sie_block->eca |= 1;
1656
	if (sclp.has_sigpif)
1657
		vcpu->arch.sie_block->eca |= 0x10000000U;
1658 1659
	if (test_kvm_facility(vcpu->kvm, 64))
		vcpu->arch.sie_block->ecb3 |= 0x01;
1660
	if (test_kvm_facility(vcpu->kvm, 129)) {
1661 1662 1663
		vcpu->arch.sie_block->eca |= 0x00020000;
		vcpu->arch.sie_block->ecd |= 0x20000000;
	}
1664
	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1665
	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1666

1667
	if (vcpu->kvm->arch.use_cmma) {
1668 1669 1670
		rc = kvm_s390_vcpu_setup_cmma(vcpu);
		if (rc)
			return rc;
1671
	}
1672
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1673
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1674

1675 1676
	kvm_s390_vcpu_crypto_setup(vcpu);

1677
	return rc;
1678 1679 1680 1681 1682
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
1683
	struct kvm_vcpu *vcpu;
1684
	struct sie_page *sie_page;
1685 1686
	int rc = -EINVAL;

1687
	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1688 1689 1690
		goto out;

	rc = -ENOMEM;
1691

1692
	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1693
	if (!vcpu)
1694
		goto out;
1695

1696 1697
	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
	if (!sie_page)
1698 1699
		goto out_free_cpu;

1700 1701 1702
	vcpu->arch.sie_block = &sie_page->sie_block;
	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;

1703
	vcpu->arch.sie_block->icpua = id;
1704 1705
	spin_lock_init(&vcpu->arch.local_int.lock);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1706
	vcpu->arch.local_int.wq = &vcpu->wq;
1707
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1708
	seqcount_init(&vcpu->arch.cputm_seqcount);
1709

1710 1711
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
1712
		goto out_free_sie_block;
1713
	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1714
		 vcpu->arch.sie_block);
1715
	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1716 1717

	return vcpu;
1718 1719
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
1720
out_free_cpu:
1721
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1722
out:
1723 1724 1725 1726 1727
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
1728
	return kvm_s390_vcpu_has_irq(vcpu, 0);
1729 1730
}

1731
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1732
{
1733
	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1734
	exit_sie(vcpu);
1735 1736
}

1737
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1738
{
1739
	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1740 1741
}

1742 1743
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
1744
	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1745
	exit_sie(vcpu);
1746 1747 1748 1749
}

static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
1750
	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1751 1752
}

1753 1754 1755 1756 1757 1758
/*
 * Kick a guest cpu out of SIE and wait until SIE is not running.
 * If the CPU is not running (e.g. waiting as idle) the function will
 * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
1759
	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1760 1761 1762 1763
	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
		cpu_relax();
}

1764 1765
/* Kick a guest cpu out of SIE to process a request synchronously */
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1766
{
1767 1768
	kvm_make_request(req, vcpu);
	kvm_s390_vcpu_request(vcpu);
1769 1770
}

1771 1772 1773 1774 1775 1776 1777 1778
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
{
	int i;
	struct kvm *kvm = gmap->private;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		/* match against both prefix pages */
1779
		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1780
			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1781
			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1782 1783 1784 1785
		}
	}
}

1786 1787 1788 1789 1790 1791 1792
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

1793 1794 1795 1796 1797 1798
static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;

	switch (reg->id) {
1799 1800 1801 1802 1803 1804 1805 1806
	case KVM_REG_S390_TODPR:
		r = put_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = put_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
1807
	case KVM_REG_S390_CPU_TIMER:
1808
		r = put_user(kvm_s390_get_cpu_timer(vcpu),
1809 1810 1811 1812 1813 1814
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = put_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
	case KVM_REG_S390_PFTOKEN:
		r = put_user(vcpu->arch.pfault_token,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFCOMPARE:
		r = put_user(vcpu->arch.pfault_compare,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFSELECT:
		r = put_user(vcpu->arch.pfault_select,
			     (u64 __user *)reg->addr);
		break;
1827 1828 1829 1830
	case KVM_REG_S390_PP:
		r = put_user(vcpu->arch.sie_block->pp,
			     (u64 __user *)reg->addr);
		break;
1831 1832 1833 1834
	case KVM_REG_S390_GBEA:
		r = put_user(vcpu->arch.sie_block->gbea,
			     (u64 __user *)reg->addr);
		break;
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	default:
		break;
	}

	return r;
}

static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;
1846
	__u64 val;
1847 1848

	switch (reg->id) {
1849 1850 1851 1852 1853 1854 1855 1856
	case KVM_REG_S390_TODPR:
		r = get_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = get_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
1857
	case KVM_REG_S390_CPU_TIMER:
1858 1859 1860
		r = get_user(val, (u64 __user *)reg->addr);
		if (!r)
			kvm_s390_set_cpu_timer(vcpu, val);
1861 1862 1863 1864 1865
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = get_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
1866 1867 1868
	case KVM_REG_S390_PFTOKEN:
		r = get_user(vcpu->arch.pfault_token,
			     (u64 __user *)reg->addr);
1869 1870
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			kvm_clear_async_pf_completion_queue(vcpu);
1871 1872 1873 1874 1875 1876 1877 1878 1879
		break;
	case KVM_REG_S390_PFCOMPARE:
		r = get_user(vcpu->arch.pfault_compare,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_PFSELECT:
		r = get_user(vcpu->arch.pfault_select,
			     (u64 __user *)reg->addr);
		break;
1880 1881 1882 1883
	case KVM_REG_S390_PP:
		r = get_user(vcpu->arch.sie_block->pp,
			     (u64 __user *)reg->addr);
		break;
1884 1885 1886 1887
	case KVM_REG_S390_GBEA:
		r = get_user(vcpu->arch.sie_block->gbea,
			     (u64 __user *)reg->addr);
		break;
1888 1889 1890 1891 1892 1893
	default:
		break;
	}

	return r;
}
1894

1895 1896 1897 1898 1899 1900 1901 1902
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
1903
	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1904 1905 1906 1907 1908
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
1909
	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1910 1911 1912 1913 1914 1915
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
1916
	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1917
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1918
	restore_access_regs(vcpu->run->s.regs.acrs);
1919 1920 1921 1922 1923 1924
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
1925
	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1926 1927 1928 1929 1930 1931
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
1932 1933
	/* make sure the new values will be lazily loaded */
	save_fpu_regs();
1934 1935
	if (test_fp_ctl(fpu->fpc))
		return -EINVAL;
1936 1937 1938 1939 1940
	current->thread.fpu.fpc = fpu->fpc;
	if (MACHINE_HAS_VX)
		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
	else
		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
1941 1942 1943 1944 1945
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
1946 1947 1948 1949 1950 1951 1952
	/* make sure we have the latest values */
	save_fpu_regs();
	if (MACHINE_HAS_VX)
		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
	else
		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
	fpu->fpc = current->thread.fpu.fpc;
1953 1954 1955 1956 1957 1958 1959
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

1960
	if (!is_vcpu_stopped(vcpu))
1961
		rc = -EBUSY;
1962 1963 1964 1965
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
1966 1967 1968 1969 1970 1971 1972 1973 1974
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

1975 1976 1977 1978
#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
			      KVM_GUESTDBG_USE_HW_BP | \
			      KVM_GUESTDBG_ENABLE)

J
Jan Kiszka 已提交
1979 1980
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
1981
{
1982 1983 1984 1985 1986
	int rc = 0;

	vcpu->guest_debug = 0;
	kvm_s390_clear_bp_data(vcpu);

1987
	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1988 1989 1990 1991 1992
		return -EINVAL;

	if (dbg->control & KVM_GUESTDBG_ENABLE) {
		vcpu->guest_debug = dbg->control;
		/* enforce guest PER */
1993
		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1994 1995 1996 1997

		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
			rc = kvm_s390_import_bp_data(vcpu, dbg);
	} else {
1998
		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1999 2000 2001 2002 2003 2004
		vcpu->arch.guestdbg.last_bp = 0;
	}

	if (rc) {
		vcpu->guest_debug = 0;
		kvm_s390_clear_bp_data(vcpu);
2005
		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2006 2007 2008
	}

	return rc;
2009 2010
}

2011 2012 2013
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
2014 2015 2016
	/* CHECK_STOP and LOAD are not supported yet */
	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
				       KVM_MP_STATE_OPERATING;
2017 2018 2019 2020 2021
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
	int rc = 0;

	/* user space knows about this interface - let it control the state */
	vcpu->kvm->arch.user_cpu_state_ctrl = 1;

	switch (mp_state->mp_state) {
	case KVM_MP_STATE_STOPPED:
		kvm_s390_vcpu_stop(vcpu);
		break;
	case KVM_MP_STATE_OPERATING:
		kvm_s390_vcpu_start(vcpu);
		break;
	case KVM_MP_STATE_LOAD:
	case KVM_MP_STATE_CHECK_STOP:
		/* fall through - CHECK_STOP and LOAD are not supported yet */
	default:
		rc = -ENXIO;
	}

	return rc;
2042 2043
}

2044 2045 2046 2047 2048
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
}

2049 2050
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
2051
retry:
2052
	kvm_s390_vcpu_request_handled(vcpu);
2053 2054
	if (!vcpu->requests)
		return 0;
2055 2056 2057 2058 2059 2060 2061
	/*
	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
	 * This ensures that the ipte instruction for this request has
	 * already finished. We might race against a second unmapper that
	 * wants to set the blocking bit. Lets just retry the request loop.
	 */
2062
	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2063 2064
		int rc;
		rc = gmap_ipte_notify(vcpu->arch.gmap,
2065
				      kvm_s390_get_prefix(vcpu),
2066 2067 2068
				      PAGE_SIZE * 2);
		if (rc)
			return rc;
2069
		goto retry;
2070
	}
2071

2072 2073 2074 2075 2076
	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
		vcpu->arch.sie_block->ihcpu = 0xffff;
		goto retry;
	}

2077 2078 2079
	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
		if (!ibs_enabled(vcpu)) {
			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2080
			atomic_or(CPUSTAT_IBS,
2081 2082 2083
					&vcpu->arch.sie_block->cpuflags);
		}
		goto retry;
2084
	}
2085 2086 2087 2088

	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
		if (ibs_enabled(vcpu)) {
			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2089
			atomic_andnot(CPUSTAT_IBS,
2090 2091 2092 2093 2094
					  &vcpu->arch.sie_block->cpuflags);
		}
		goto retry;
	}

2095 2096 2097
	/* nothing to do, just clear the request */
	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);

2098 2099 2100
	return 0;
}

2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
{
	struct kvm_vcpu *vcpu;
	int i;

	mutex_lock(&kvm->lock);
	preempt_disable();
	kvm->arch.epoch = tod - get_tod_clock();
	kvm_s390_vcpu_block_all(kvm);
	kvm_for_each_vcpu(i, vcpu, kvm)
		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
	kvm_s390_vcpu_unblock_all(kvm);
	preempt_enable();
	mutex_unlock(&kvm->lock);
}

2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
/**
 * kvm_arch_fault_in_page - fault-in guest page if necessary
 * @vcpu: The corresponding virtual cpu
 * @gpa: Guest physical address
 * @writable: Whether the page should be writable or not
 *
 * Make sure that a guest page has been faulted-in on the host.
 *
 * Return: Zero on success, negative error code otherwise.
 */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2128
{
2129 2130
	return gmap_fault(vcpu->arch.gmap, gpa,
			  writable ? FAULT_FLAG_WRITE : 0);
2131 2132
}

2133 2134 2135 2136
static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
				      unsigned long token)
{
	struct kvm_s390_interrupt inti;
2137
	struct kvm_s390_irq irq;
2138 2139

	if (start_token) {
2140 2141 2142
		irq.u.ext.ext_params2 = token;
		irq.type = KVM_S390_INT_PFAULT_INIT;
		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2143 2144
	} else {
		inti.type = KVM_S390_INT_PFAULT_DONE;
2145
		inti.parm64 = token;
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
	}
}

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{
	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{
	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
}

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work)
{
	/* s390 will always inject the page directly */
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
	/*
	 * s390 will always inject the page directly,
	 * but we still want check_async_completion to cleanup
	 */
	return true;
}

static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
{
	hva_t hva;
	struct kvm_arch_async_pf arch;
	int rc;

	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
		return 0;
	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
	    vcpu->arch.pfault_compare)
		return 0;
	if (psw_extint_disabled(vcpu))
		return 0;
2192
	if (kvm_s390_vcpu_has_irq(vcpu, 0))
2193 2194 2195 2196 2197 2198
		return 0;
	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
		return 0;
	if (!vcpu->arch.gmap->pfault_enabled)
		return 0;

H
Heiko Carstens 已提交
2199 2200 2201
	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
	hva += current->thread.gmap_addr & ~PAGE_MASK;
	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2202 2203 2204 2205 2206 2207
		return 0;

	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
	return rc;
}

2208
static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2209
{
2210
	int rc, cpuflags;
2211

2212 2213 2214 2215 2216 2217 2218
	/*
	 * On s390 notifications for arriving pages will be delivered directly
	 * to the guest but the house keeping for completed pfaults is
	 * handled outside the worker.
	 */
	kvm_check_async_pf_completion(vcpu);

2219 2220
	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2221 2222 2223 2224

	if (need_resched())
		schedule();

2225
	if (test_cpu_flag(CIF_MCCK_PENDING))
2226 2227
		s390_handle_mcck();

2228 2229 2230 2231 2232
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		rc = kvm_s390_deliver_pending_interrupts(vcpu);
		if (rc)
			return rc;
	}
C
Carsten Otte 已提交
2233

2234 2235 2236 2237
	rc = kvm_s390_handle_requests(vcpu);
	if (rc)
		return rc;

2238 2239 2240 2241 2242
	if (guestdbg_enabled(vcpu)) {
		kvm_s390_backup_guest_per_regs(vcpu);
		kvm_s390_patch_guest_per_regs(vcpu);
	}

2243
	vcpu->arch.sie_block->icptcode = 0;
2244 2245 2246
	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
	trace_kvm_s390_sie_enter(vcpu, cpuflags);
2247

2248 2249 2250
	return 0;
}

2251 2252
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
{
2253 2254 2255 2256
	struct kvm_s390_pgm_info pgm_info = {
		.code = PGM_ADDRESSING,
	};
	u8 opcode, ilen;
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
	int rc;

	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
	trace_kvm_s390_sie_fault(vcpu);

	/*
	 * We want to inject an addressing exception, which is defined as a
	 * suppressing or terminating exception. However, since we came here
	 * by a DAT access exception, the PSW still points to the faulting
	 * instruction since DAT exceptions are nullifying. So we've got
	 * to look up the current opcode to get the length of the instruction
	 * to be able to forward the PSW.
	 */
2270
	rc = read_guest_instr(vcpu, &opcode, 1);
2271
	ilen = insn_length(opcode);
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
	if (rc < 0) {
		return rc;
	} else if (rc) {
		/* Instruction-Fetching Exceptions - we can't detect the ilen.
		 * Forward by arbitrary ilc, injection will take care of
		 * nullification if necessary.
		 */
		pgm_info = vcpu->arch.pgm;
		ilen = 4;
	}
2282 2283 2284
	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
	kvm_s390_forward_psw(vcpu, ilen);
	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2285 2286
}

2287 2288
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
2289 2290 2291 2292
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);

2293 2294 2295
	if (guestdbg_enabled(vcpu))
		kvm_s390_restore_guest_per_regs(vcpu);

2296 2297
	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311

	if (vcpu->arch.sie_block->icptcode > 0) {
		int rc = kvm_handle_sie_intercept(vcpu);

		if (rc != -EOPNOTSUPP)
			return rc;
		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
		return -EREMOTE;
	} else if (exit_reason != -EFAULT) {
		vcpu->stat.exit_null++;
		return 0;
2312 2313 2314 2315 2316
	} else if (kvm_is_ucontrol(vcpu->kvm)) {
		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
		vcpu->run->s390_ucontrol.trans_exc_code =
						current->thread.gmap_addr;
		vcpu->run->s390_ucontrol.pgm_code = 0x10;
2317
		return -EREMOTE;
2318
	} else if (current->thread.gmap_pfault) {
2319
		trace_kvm_s390_major_guest_pfault(vcpu);
2320
		current->thread.gmap_pfault = 0;
2321 2322 2323
		if (kvm_arch_setup_async_pf(vcpu))
			return 0;
		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2324
	}
2325
	return vcpu_post_run_fault_in_sie(vcpu);
2326 2327 2328 2329 2330 2331
}

static int __vcpu_run(struct kvm_vcpu *vcpu)
{
	int rc, exit_reason;

2332 2333 2334 2335 2336 2337
	/*
	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
	 * ning the guest), so that memslots (and other stuff) are protected
	 */
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

2338 2339 2340 2341
	do {
		rc = vcpu_pre_run(vcpu);
		if (rc)
			break;
2342

2343
		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2344 2345 2346 2347
		/*
		 * As PF_VCPU will be used in fault handler, between
		 * guest_enter and guest_exit should be no uaccess.
		 */
2348 2349
		local_irq_disable();
		__kvm_guest_enter();
2350
		__disable_cpu_timer_accounting(vcpu);
2351
		local_irq_enable();
2352 2353
		exit_reason = sie64a(vcpu->arch.sie_block,
				     vcpu->run->s.regs.gprs);
2354
		local_irq_disable();
2355
		__enable_cpu_timer_accounting(vcpu);
2356 2357
		__kvm_guest_exit();
		local_irq_enable();
2358
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2359 2360

		rc = vcpu_post_run(vcpu, exit_reason);
2361
	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2362

2363
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2364
	return rc;
2365 2366
}

2367 2368 2369 2370 2371 2372 2373 2374
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2375 2376
		/* some control register changes require a tlb flush */
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2377 2378
	}
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2379
		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2380 2381 2382 2383 2384 2385 2386 2387 2388
		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
	}
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2389 2390
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			kvm_clear_async_pf_completion_queue(vcpu);
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
	}
	kvm_run->kvm_dirty_regs = 0;
}

static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2401
	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2402 2403 2404 2405 2406 2407 2408 2409 2410
	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
}

2411 2412
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
2413
	int rc;
2414 2415
	sigset_t sigsaved;

2416 2417 2418 2419 2420
	if (guestdbg_exit_pending(vcpu)) {
		kvm_s390_prepare_debug_exit(vcpu);
		return 0;
	}

2421 2422 2423
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

2424 2425 2426
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
		kvm_s390_vcpu_start(vcpu);
	} else if (is_vcpu_stopped(vcpu)) {
2427
		pr_err_ratelimited("can't run stopped vcpu %d\n",
2428 2429 2430
				   vcpu->vcpu_id);
		return -EINVAL;
	}
2431

2432
	sync_regs(vcpu, kvm_run);
2433
	enable_cpu_timer_accounting(vcpu);
2434

2435
	might_fault();
2436
	rc = __vcpu_run(vcpu);
2437

2438 2439
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
2440
		rc = -EINTR;
2441
	}
2442

2443 2444 2445 2446 2447
	if (guestdbg_exit_pending(vcpu) && !rc)  {
		kvm_s390_prepare_debug_exit(vcpu);
		rc = 0;
	}

2448
	if (rc == -EREMOTE) {
2449
		/* userspace support is needed, kvm_run has been prepared */
2450 2451
		rc = 0;
	}
2452

2453
	disable_cpu_timer_accounting(vcpu);
2454
	store_regs(vcpu, kvm_run);
2455

2456 2457 2458 2459
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
2460
	return rc;
2461 2462 2463 2464 2465 2466 2467 2468
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
2469
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2470
{
2471
	unsigned char archmode = 1;
2472
	freg_t fprs[NUM_FPRS];
2473
	unsigned int px;
2474
	u64 clkcomp, cputm;
2475
	int rc;
2476

2477
	px = kvm_s390_get_prefix(vcpu);
2478 2479
	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
		if (write_guest_abs(vcpu, 163, &archmode, 1))
2480
			return -EFAULT;
2481
		gpa = 0;
2482 2483
	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
		if (write_guest_real(vcpu, 163, &archmode, 1))
2484
			return -EFAULT;
2485 2486 2487
		gpa = px;
	} else
		gpa -= __LC_FPREGS_SAVE_AREA;
2488 2489 2490

	/* manually convert vector registers if necessary */
	if (MACHINE_HAS_VX) {
2491
		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2492 2493 2494 2495
		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
				     fprs, 128);
	} else {
		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2496
				     vcpu->run->s.regs.fprs, 128);
2497
	}
2498
	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2499
			      vcpu->run->s.regs.gprs, 128);
2500
	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2501
			      &vcpu->arch.sie_block->gpsw, 16);
2502
	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2503
			      &px, 4);
2504
	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2505
			      &vcpu->run->s.regs.fpc, 4);
2506
	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2507
			      &vcpu->arch.sie_block->todpr, 4);
2508
	cputm = kvm_s390_get_cpu_timer(vcpu);
2509
	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2510
			      &cputm, 8);
2511
	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2512
	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2513
			      &clkcomp, 8);
2514
	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2515
			      &vcpu->run->s.regs.acrs, 64);
2516
	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2517 2518
			      &vcpu->arch.sie_block->gcr, 128);
	return rc ? -EFAULT : 0;
2519 2520
}

2521 2522 2523 2524 2525 2526 2527
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	/*
	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
	 * copying in vcpu load/put. Lets update our copies before we save
	 * it into the save area
	 */
2528
	save_fpu_regs();
2529
	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2530 2531 2532 2533 2534
	save_access_regs(vcpu->run->s.regs.acrs);

	return kvm_s390_store_status_unloaded(vcpu, addr);
}

E
Eric Farman 已提交
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
/*
 * store additional status at address
 */
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
					unsigned long gpa)
{
	/* Only bits 0-53 are used for address formation */
	if (!(gpa & ~0x3ff))
		return 0;

	return write_guest_abs(vcpu, gpa & ~0x3ff,
			       (void *)&vcpu->run->s.regs.vrs, 512);
}

int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	if (!test_kvm_facility(vcpu->kvm, 129))
		return 0;

	/*
	 * The guest VXRS are in the host VXRs due to the lazy
2556 2557 2558 2559 2560
	 * copying in vcpu load/put. We can simply call save_fpu_regs()
	 * to save the current register state because we are in the
	 * middle of a load/put cycle.
	 *
	 * Let's update our copies before we save it into the save area.
E
Eric Farman 已提交
2561
	 */
2562
	save_fpu_regs();
E
Eric Farman 已提交
2563 2564 2565 2566

	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}

2567 2568 2569
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2570
	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
}

static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
{
	unsigned int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		__disable_ibs_on_vcpu(vcpu);
	}
}

static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2586
	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2587 2588
}

2589 2590
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
2591 2592 2593 2594 2595
	int i, online_vcpus, started_vcpus = 0;

	if (!is_vcpu_stopped(vcpu))
		return;

2596
	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2597
	/* Only one cpu at a time may enter/leave the STOPPED state. */
2598
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

	for (i = 0; i < online_vcpus; i++) {
		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
			started_vcpus++;
	}

	if (started_vcpus == 0) {
		/* we're the only active VCPU -> speed it up */
		__enable_ibs_on_vcpu(vcpu);
	} else if (started_vcpus == 1) {
		/*
		 * As we are starting a second VCPU, we have to disable
		 * the IBS facility on all VCPUs to remove potentially
		 * oustanding ENABLE requests.
		 */
		__disable_ibs_on_all_vcpus(vcpu->kvm);
	}

2618
	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2619 2620 2621 2622
	/*
	 * Another VCPU might have used IBS while we were offline.
	 * Let's play safe and flush the VCPU at startup.
	 */
2623
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2624
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2625
	return;
2626 2627 2628 2629
}

void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
2630 2631 2632 2633 2634 2635
	int i, online_vcpus, started_vcpus = 0;
	struct kvm_vcpu *started_vcpu = NULL;

	if (is_vcpu_stopped(vcpu))
		return;

2636
	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2637
	/* Only one cpu at a time may enter/leave the STOPPED state. */
2638
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2639 2640
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

2641
	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2642
	kvm_s390_clear_stop_irq(vcpu);
2643

2644
	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	__disable_ibs_on_vcpu(vcpu);

	for (i = 0; i < online_vcpus; i++) {
		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
			started_vcpus++;
			started_vcpu = vcpu->kvm->vcpus[i];
		}
	}

	if (started_vcpus == 1) {
		/*
		 * As we only have one VCPU left, we want to enable the
		 * IBS facility for that VCPU to speed it up.
		 */
		__enable_ibs_on_vcpu(started_vcpu);
	}

2662
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2663
	return;
2664 2665
}

2666 2667 2668 2669 2670 2671 2672 2673 2674
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
2675 2676 2677
	case KVM_CAP_S390_CSS_SUPPORT:
		if (!vcpu->kvm->arch.css_support) {
			vcpu->kvm->arch.css_support = 1;
2678
			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2679 2680 2681 2682
			trace_kvm_s390_enable_css(vcpu->kvm);
		}
		r = 0;
		break;
2683 2684 2685 2686 2687 2688 2689
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715
static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
				  struct kvm_s390_mem_op *mop)
{
	void __user *uaddr = (void __user *)mop->buf;
	void *tmpbuf = NULL;
	int r, srcu_idx;
	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
				    | KVM_S390_MEMOP_F_CHECK_ONLY;

	if (mop->flags & ~supported_flags)
		return -EINVAL;

	if (mop->size > MEM_OP_MAX_SIZE)
		return -E2BIG;

	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
		tmpbuf = vmalloc(mop->size);
		if (!tmpbuf)
			return -ENOMEM;
	}

	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

	switch (mop->op) {
	case KVM_S390_MEMOP_LOGICAL_READ:
		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2716 2717
			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
					    mop->size, GACC_FETCH);
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
			break;
		}
		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
		if (r == 0) {
			if (copy_to_user(uaddr, tmpbuf, mop->size))
				r = -EFAULT;
		}
		break;
	case KVM_S390_MEMOP_LOGICAL_WRITE:
		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2728 2729
			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
					    mop->size, GACC_STORE);
2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
			break;
		}
		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
			r = -EFAULT;
			break;
		}
		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
		break;
	default:
		r = -EINVAL;
	}

	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);

	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);

	vfree(tmpbuf);
	return r;
}

2751 2752 2753 2754 2755
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
2756
	int idx;
2757
	long r;
2758

2759
	switch (ioctl) {
2760 2761 2762 2763 2764 2765 2766 2767 2768
	case KVM_S390_IRQ: {
		struct kvm_s390_irq s390irq;

		r = -EFAULT;
		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
		break;
	}
2769
	case KVM_S390_INTERRUPT: {
2770
		struct kvm_s390_interrupt s390int;
2771
		struct kvm_s390_irq s390irq;
2772

2773
		r = -EFAULT;
2774
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2775
			break;
2776 2777 2778
		if (s390int_to_s390irq(&s390int, &s390irq))
			return -EINVAL;
		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2779
		break;
2780
	}
2781
	case KVM_S390_STORE_STATUS:
2782
		idx = srcu_read_lock(&vcpu->kvm->srcu);
2783
		r = kvm_s390_vcpu_store_status(vcpu, arg);
2784
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2785
		break;
2786 2787 2788
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

2789
		r = -EFAULT;
2790
		if (copy_from_user(&psw, argp, sizeof(psw)))
2791 2792 2793
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
2794 2795
	}
	case KVM_S390_INITIAL_RESET:
2796 2797
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
		r = -EFAULT;
		if (copy_from_user(&reg, argp, sizeof(reg)))
			break;
		if (ioctl == KVM_SET_ONE_REG)
			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
		else
			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
		break;
	}
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_S390_UCAS_MAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
				     ucasmap.vcpu_addr, ucasmap.length);
		break;
	}
	case KVM_S390_UCAS_UNMAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
			ucasmap.length);
		break;
	}
#endif
2846
	case KVM_S390_VCPU_FAULT: {
2847
		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2848 2849
		break;
	}
2850 2851 2852 2853 2854 2855 2856 2857 2858
	case KVM_ENABLE_CAP:
	{
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			break;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
2859 2860 2861 2862 2863 2864 2865 2866 2867
	case KVM_S390_MEM_OP: {
		struct kvm_s390_mem_op mem_op;

		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
		else
			r = -EFAULT;
		break;
	}
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
	case KVM_S390_SET_IRQ_STATE: {
		struct kvm_s390_irq_state irq_state;

		r = -EFAULT;
		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
			break;
		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
		    irq_state.len == 0 ||
		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
			r = -EINVAL;
			break;
		}
		r = kvm_s390_set_irq_state(vcpu,
					   (void __user *) irq_state.buf,
					   irq_state.len);
		break;
	}
	case KVM_S390_GET_IRQ_STATE: {
		struct kvm_s390_irq_state irq_state;

		r = -EFAULT;
		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
			break;
		if (irq_state.len == 0) {
			r = -EINVAL;
			break;
		}
		r = kvm_s390_get_irq_state(vcpu,
					   (__u8 __user *)  irq_state.buf,
					   irq_state.len);
		break;
	}
2900
	default:
2901
		r = -ENOTTY;
2902
	}
2903
	return r;
2904 2905
}

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
		 && (kvm_is_ucontrol(vcpu->kvm))) {
		vmf->page = virt_to_page(vcpu->arch.sie_block);
		get_page(vmf->page);
		return 0;
	}
#endif
	return VM_FAULT_SIGBUS;
}

2919 2920
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages)
2921 2922 2923 2924
{
	return 0;
}

2925
/* Section: memory related */
2926 2927
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
2928
				   const struct kvm_userspace_memory_region *mem,
2929
				   enum kvm_mr_change change)
2930
{
2931 2932 2933 2934
	/* A few sanity checks. We can have memory slots which have to be
	   located/ended at a segment boundary (1MB). The memory in userland is
	   ok to be fragmented into various different vmas. It is okay to mmap()
	   and munmap() stuff in this slot after doing this call at any time */
2935

2936
	if (mem->userspace_addr & 0xffffful)
2937 2938
		return -EINVAL;

2939
	if (mem->memory_size & 0xffffful)
2940 2941
		return -EINVAL;

2942 2943 2944
	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
		return -EINVAL;

2945 2946 2947 2948
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
2949
				const struct kvm_userspace_memory_region *mem,
2950
				const struct kvm_memory_slot *old,
2951
				const struct kvm_memory_slot *new,
2952
				enum kvm_mr_change change)
2953
{
2954
	int rc;
2955

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965
	/* If the basics of the memslot do not change, we do not want
	 * to update the gmap. Every update causes several unnecessary
	 * segment translation exceptions. This is usually handled just
	 * fine by the normal fault handler + gmap, but it will also
	 * cause faults on the prefix page of running guest CPUs.
	 */
	if (old->userspace_addr == mem->userspace_addr &&
	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
	    old->npages * PAGE_SIZE == mem->memory_size)
		return;
2966 2967 2968 2969

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
2970
		pr_warn("failed to commit memory region\n");
2971
	return;
2972 2973 2974 2975
}

static int __init kvm_s390_init(void)
{
2976 2977 2978 2979 2980
	if (!sclp.has_sief2) {
		pr_info("SIE not available\n");
		return -ENODEV;
	}

2981
	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2982 2983 2984 2985 2986 2987 2988 2989 2990
}

static void __exit kvm_s390_exit(void)
{
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);
2991 2992 2993 2994 2995 2996 2997 2998 2999

/*
 * Enable autoloading of the kvm module.
 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
 * since x86 takes a different approach.
 */
#include <linux/miscdevice.h>
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");