kvm-s390.c 19.8 KB
Newer Older
1 2 3
/*
 * s390host.c --  hosting zSeries kernel virtual machines
 *
4
 * Copyright IBM Corp. 2008,2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14 15 16 17 18
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
19
#include <linux/hrtimer.h>
20 21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
25
#include <linux/timer.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/lowcore.h>
#include <asm/pgtable.h>
29
#include <asm/nmi.h>
30
#include <asm/system.h>
31
#include "kvm-s390.h"
32 33 34 35 36 37
#include "gaccess.h"

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
38
	{ "exit_null", VCPU_STAT(exit_null) },
39 40 41 42
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 44 45
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 48 49 50 51 52 53 54 55
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 57 58 59 60 61 62 63 64
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 66 67 68 69 70
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
72 73 74
	{ NULL }
};

75
static unsigned long long *facilities;
76 77

/* Section: not file related */
78
int kvm_arch_hardware_enable(void *garbage)
79 80
{
	/* every s390 is virtualization enabled ;-) */
81
	return 0;
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

int kvm_dev_ioctl_check_extension(long ext)
{
121 122
	int r;

123
	switch (ext) {
124 125 126
	case KVM_CAP_S390_PSW:
		r = 1;
		break;
127
	default:
128
		r = 0;
129
	}
130
	return r;
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
	return 0;
}

long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;

	switch (ioctl) {
151 152 153 154 155 156 157 158 159
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
160
	default:
161
		r = -ENOTTY;
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	}

	return r;
}

struct kvm *kvm_arch_create_vm(void)
{
	struct kvm *kvm;
	int rc;
	char debug_name[16];

	rc = s390_enable_sie();
	if (rc)
		goto out_nokvm;

	rc = -ENOMEM;
	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
	if (!kvm)
		goto out_nokvm;

	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
		goto out_nosca;

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
		goto out_nodbf;

192 193 194
	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);

195 196 197 198 199 200 201 202 203 204 205 206
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

	return kvm;
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
out_nosca:
	kfree(kvm);
out_nokvm:
	return ERR_PTR(rc);
}

207 208 209
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 211 212 213
	if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		(__u64) vcpu->arch.sie_block)
		vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	smp_mb();
214
	free_page((unsigned long)(vcpu->arch.sie_block));
215
	kvm_vcpu_uninit(vcpu);
216 217 218 219 220 221
	kfree(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
222
	struct kvm_vcpu *vcpu;
223

224 225 226 227 228 229 230 231 232
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
233 234
}

235 236 237 238
void kvm_arch_sync_events(struct kvm *kvm)
{
}

239 240
void kvm_arch_destroy_vm(struct kvm *kvm)
{
241
	kvm_free_vcpus(kvm);
242
	kvm_free_physmem(kvm);
243
	free_page((unsigned long)(kvm->arch.sca));
244
	debug_unregister(kvm->arch.dbf);
245
	cleanup_srcu_struct(&kvm->srcu);
246 247 248 249 250 251 252 253 254 255 256
	kfree(kvm);
}

/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
257
	/* Nothing todo */
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	save_fp_regs(&vcpu->arch.host_fpregs);
	save_access_regs(vcpu->arch.host_acrs);
	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
	restore_fp_regs(&vcpu->arch.guest_fpregs);
	restore_access_regs(vcpu->arch.guest_acrs);
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
	save_fp_regs(&vcpu->arch.guest_fpregs);
	save_access_regs(vcpu->arch.guest_acrs);
	restore_fp_regs(&vcpu->arch.host_fpregs);
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
	vcpu->arch.sie_block->prefix    = 0UL;
	vcpu->arch.sie_block->ihcpu     = 0xffff;
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298
	set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299 300
	vcpu->arch.sie_block->ecb   = 2;
	vcpu->arch.sie_block->eca   = 0xC1002001U;
301
	vcpu->arch.sie_block->fac   = (int) (long) facilities;
302 303 304 305
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
306
	get_cpu_id(&vcpu->arch.cpu_id);
307
	vcpu->arch.cpu_id.version = 0xff;
308 309 310 311 312 313 314 315 316 317 318 319
	return 0;
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
	int rc = -ENOMEM;

	if (!vcpu)
		goto out_nomem;

320 321
	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
					get_zeroed_page(GFP_KERNEL);
322 323 324 325 326 327

	if (!vcpu->arch.sie_block)
		goto out_free_cpu;

	vcpu->arch.sie_block->icpua = id;
	BUG_ON(!kvm->arch.sca);
328 329
	if (!kvm->arch.sca->cpu[id].sda)
		kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330 331 332
	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;

333 334 335
	spin_lock_init(&vcpu->arch.local_int.lock);
	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336
	spin_lock(&kvm->arch.float_int.lock);
337 338
	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
	init_waitqueue_head(&vcpu->arch.local_int.wq);
339
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340
	spin_unlock(&kvm->arch.float_int.lock);
341

342 343
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
344
		goto out_free_sie_block;
345 346 347 348
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);

	return vcpu;
349 350
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
out_free_cpu:
	kfree(vcpu);
out_nomem:
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	vcpu_load(vcpu);
	kvm_s390_vcpu_initial_reset(vcpu);
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	vcpu_load(vcpu);
	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	vcpu_load(vcpu);
	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	vcpu_load(vcpu);
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
	vcpu_put(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	vcpu_load(vcpu);
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	vcpu_put(vcpu);
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

	vcpu_load(vcpu);
	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
		rc = -EBUSY;
433 434 435 436
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
437 438 439 440 441 442 443 444 445 446
	vcpu_put(vcpu);
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

J
Jan Kiszka 已提交
447 448
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
449 450 451 452
{
	return -EINVAL; /* not implemented yet */
}

453 454 455 456 457 458 459 460 461 462 463 464
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

465 466 467 468 469 470 471
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);

	if (need_resched())
		schedule();

472 473 474
	if (test_thread_flag(TIF_MCCK_PENDING))
		s390_handle_mcck();

C
Carsten Otte 已提交
475 476
	kvm_s390_deliver_pending_interrupts(vcpu);

477 478 479 480 481 482
	vcpu->arch.sie_block->icptcode = 0;
	local_irq_disable();
	kvm_guest_enter();
	local_irq_enable();
	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
		   atomic_read(&vcpu->arch.sie_block->cpuflags));
483 484 485 486
	if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	}
487 488 489 490 491 492 493 494 495 496 497
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	local_irq_disable();
	kvm_guest_exit();
	local_irq_enable();

	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
498
	int rc;
499 500 501 502
	sigset_t sigsaved;

	vcpu_load(vcpu);

503
rerun_vcpu:
504 505 506 507
	if (vcpu->requests)
		if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
			kvm_s390_vcpu_set_mem(vcpu);

508
	/* verify, that memory has been registered */
509
	if (!vcpu->arch.sie_block->gmslm) {
510
		vcpu_put(vcpu);
511
		VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
512 513 514
		return -EINVAL;
	}

515 516 517 518 519
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);

520 521
	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);

522 523 524
	switch (kvm_run->exit_reason) {
	case KVM_EXIT_S390_SIEIC:
	case KVM_EXIT_UNKNOWN:
525
	case KVM_EXIT_INTR:
526 527 528 529 530 531
	case KVM_EXIT_S390_RESET:
		break;
	default:
		BUG();
	}

532 533 534
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;

535
	might_fault();
536 537 538 539 540 541

	do {
		__vcpu_run(vcpu);
		rc = kvm_handle_sie_intercept(vcpu);
	} while (!signal_pending(current) && !rc);

542 543 544
	if (rc == SIE_INTERCEPT_RERUNVCPU)
		goto rerun_vcpu;

545 546
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
547
		rc = -EINTR;
548
	}
549

550
	if (rc == -EOPNOTSUPP) {
551 552 553 554 555 556 557 558 559 560 561 562 563
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
564

565 566 567
	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;

568 569 570 571 572 573
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu_put(vcpu);

	vcpu->stat.exit_userspace++;
574
	return rc;
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
}

static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
		       unsigned long n, int prefix)
{
	if (prefix)
		return copy_to_guest(vcpu, guestdest, from, n);
	else
		return copy_to_guest_absolute(vcpu, guestdest, from, n);
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	const unsigned char archmode = 1;
	int prefix;

	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 0;
	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 1;
	} else
		prefix = 0;

610
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
611 612 613
			vcpu->arch.guest_fpregs.fprs, 128, prefix))
		return -EFAULT;

614
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
615 616 617
			vcpu->arch.guest_gprs, 128, prefix))
		return -EFAULT;

618
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
619 620 621
			&vcpu->arch.sie_block->gpsw, 16, prefix))
		return -EFAULT;

622
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
623 624 625 626
			&vcpu->arch.sie_block->prefix, 4, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
627
			addr + offsetof(struct save_area, fp_ctrl_reg),
628 629 630
			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
		return -EFAULT;

631
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
632 633 634
			&vcpu->arch.sie_block->todpr, 4, prefix))
		return -EFAULT;

635
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
636 637 638
			&vcpu->arch.sie_block->cputm, 8, prefix))
		return -EFAULT;

639
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
640 641 642
			&vcpu->arch.sie_block->ckc, 8, prefix))
		return -EFAULT;

643
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
644 645 646 647
			&vcpu->arch.guest_acrs, 64, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
648
			addr + offsetof(struct save_area, ctrl_regs),
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
			&vcpu->arch.sie_block->gcr, 128, prefix))
		return -EFAULT;
	return 0;
}

static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
	int rc;

	vcpu_load(vcpu);
	rc = __kvm_s390_vcpu_store_status(vcpu, addr);
	vcpu_put(vcpu);
	return rc;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;

	switch (ioctl) {
671 672 673 674 675 676 677
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			return -EFAULT;
		return kvm_s390_inject_vcpu(vcpu, &s390int);
	}
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
	case KVM_S390_STORE_STATUS:
		return kvm_s390_vcpu_store_status(vcpu, arg);
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

		if (copy_from_user(&psw, argp, sizeof(psw)))
			return -EFAULT;
		return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
	}
	case KVM_S390_INITIAL_RESET:
		return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
	default:
		;
	}
	return -EINVAL;
}

/* Section: memory related */
696 697 698 699 700
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
701 702 703 704 705 706 707 708
{
	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
	   The memory in userland is ok to be fragmented into various different
	   vmas. It is okay to mmap() and munmap() stuff in this slot after
	   doing this call at any time */

709
	if (mem->slot)
710 711 712 713 714 715 716 717 718 719 720
		return -EINVAL;

	if (mem->guest_phys_addr)
		return -EINVAL;

	if (mem->userspace_addr & (PAGE_SIZE - 1))
		return -EINVAL;

	if (mem->memory_size & (PAGE_SIZE - 1))
		return -EINVAL;

721 722 723
	if (!user_alloc)
		return -EINVAL;

724 725 726 727 728 729 730 731 732 733 734
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
	int i;
	struct kvm_vcpu *vcpu;

735
	/* request update of sie control block for all available vcpus */
736 737 738 739
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
			continue;
		kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
740
	}
741 742
}

743 744 745 746
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

747 748 749 750 751 752 753
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
	return gfn;
}

static int __init kvm_s390_init(void)
{
754
	int ret;
755
	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	if (ret)
		return ret;

	/*
	 * guests can ask for up to 255+1 double words, we need a full page
	 * to hold the maximum amount of facilites. On the other hand, we
	 * only set facilities that are known to work in KVM.
	 */
	facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
	if (!facilities) {
		kvm_exit();
		return -ENOMEM;
	}
	stfle(facilities, 1);
	facilities[0] &= 0xff00fff3f0700000ULL;
	return 0;
772 773 774 775
}

static void __exit kvm_s390_exit(void)
{
776
	free_page((unsigned long) facilities);
777 778 779 780 781
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);