kvm-s390.c 22.5 KB
Newer Older
1 2 3
/*
 * s390host.c --  hosting zSeries kernel virtual machines
 *
4
 * Copyright IBM Corp. 2008,2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14 15 16 17 18
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
19
#include <linux/hrtimer.h>
20 21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
25
#include <linux/timer.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/lowcore.h>
#include <asm/pgtable.h>
29
#include <asm/nmi.h>
30
#include <asm/system.h>
31
#include "kvm-s390.h"
32 33 34 35 36 37
#include "gaccess.h"

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
38
	{ "exit_null", VCPU_STAT(exit_null) },
39 40 41 42
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 44 45
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 48
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 51 52 53 54 55 56
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 58 59 60 61 62 63 64 65
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
66
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
67
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 71 72 73 74
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75
	{ "diagnose_10", VCPU_STAT(diagnose_10) },
76
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
77 78 79
	{ NULL }
};

80
static unsigned long long *facilities;
81 82

/* Section: not file related */
83
int kvm_arch_hardware_enable(void *garbage)
84 85
{
	/* every s390 is virtualization enabled ;-) */
86
	return 0;
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

int kvm_dev_ioctl_check_extension(long ext)
{
126 127
	int r;

128
	switch (ext) {
129
	case KVM_CAP_S390_PSW:
130
	case KVM_CAP_S390_GMAP:
131
	case KVM_CAP_SYNC_MMU:
132 133 134
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
135
	case KVM_CAP_SYNC_REGS:
136 137
		r = 1;
		break;
138
	default:
139
		r = 0;
140
	}
141
	return r;
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
}

/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
	return 0;
}

long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;

	switch (ioctl) {
162 163 164 165 166 167 168 169 170
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
171
	default:
172
		r = -ENOTTY;
173 174 175 176 177
	}

	return r;
}

178
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
179 180 181 182
{
	int rc;
	char debug_name[16];

183 184 185 186 187 188 189 190 191 192 193
	rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
	if (type & ~KVM_VM_S390_UCONTROL)
		goto out_err;
	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
		goto out_err;
#else
	if (type)
		goto out_err;
#endif

194 195
	rc = s390_enable_sie();
	if (rc)
196
		goto out_err;
197

198 199
	rc = -ENOMEM;

200 201
	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
202
		goto out_err;
203 204 205 206 207 208 209

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
		goto out_nodbf;

210 211 212
	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);

213 214 215
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

216 217 218 219 220 221 222
	if (type & KVM_VM_S390_UCONTROL) {
		kvm->arch.gmap = NULL;
	} else {
		kvm->arch.gmap = gmap_alloc(current->mm);
		if (!kvm->arch.gmap)
			goto out_nogmap;
	}
223
	return 0;
224 225
out_nogmap:
	debug_unregister(kvm->arch.dbf);
226 227
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
228 229
out_err:
	return rc;
230 231
}

232 233 234
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
C
Carsten Otte 已提交
235 236 237 238 239 240 241
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		clear_bit(63 - vcpu->vcpu_id,
			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		    (__u64) vcpu->arch.sie_block)
			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	}
242
	smp_mb();
243 244 245 246

	if (kvm_is_ucontrol(vcpu->kvm))
		gmap_free(vcpu->arch.gmap);

247
	free_page((unsigned long)(vcpu->arch.sie_block));
248
	kvm_vcpu_uninit(vcpu);
249 250 251 252 253 254
	kfree(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
255
	struct kvm_vcpu *vcpu;
256

257 258 259 260 261 262 263 264 265
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
266 267
}

268 269 270 271
void kvm_arch_sync_events(struct kvm *kvm)
{
}

272 273
void kvm_arch_destroy_vm(struct kvm *kvm)
{
274
	kvm_free_vcpus(kvm);
275
	free_page((unsigned long)(kvm->arch.sca));
276
	debug_unregister(kvm->arch.dbf);
277 278
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
279 280 281 282 283
}

/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
284 285 286 287 288 289 290
	if (kvm_is_ucontrol(vcpu->kvm)) {
		vcpu->arch.gmap = gmap_alloc(current->mm);
		if (!vcpu->arch.gmap)
			return -ENOMEM;
		return 0;
	}

291
	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292
	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | KVM_SYNC_GPRS;
293 294 295 296 297
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
298
	/* Nothing todo */
299 300 301 302 303 304 305 306 307
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	save_fp_regs(&vcpu->arch.host_fpregs);
	save_access_regs(vcpu->arch.host_acrs);
	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
	restore_fp_regs(&vcpu->arch.guest_fpregs);
	restore_access_regs(vcpu->arch.guest_acrs);
308
	gmap_enable(vcpu->arch.gmap);
309
	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
310 311 312 313
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
314
	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
315
	gmap_disable(vcpu->arch.gmap);
316 317 318 319 320 321 322 323 324 325 326
	save_fp_regs(&vcpu->arch.guest_fpregs);
	save_access_regs(vcpu->arch.guest_acrs);
	restore_fp_regs(&vcpu->arch.host_fpregs);
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
327
	kvm_s390_set_prefix(vcpu, 0);
328 329 330 331 332 333 334 335 336 337 338 339 340
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
341 342 343
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
						    CPUSTAT_SM |
						    CPUSTAT_STOPPED);
344
	vcpu->arch.sie_block->ecb   = 6;
345
	vcpu->arch.sie_block->eca   = 0xC1002001U;
346
	vcpu->arch.sie_block->fac   = (int) (long) facilities;
347 348 349 350
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
351
	get_cpu_id(&vcpu->arch.cpu_id);
352
	vcpu->arch.cpu_id.version = 0xff;
353 354 355 356 357 358
	return 0;
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
359 360 361 362 363 364 365
	struct kvm_vcpu *vcpu;
	int rc = -EINVAL;

	if (id >= KVM_MAX_VCPUS)
		goto out;

	rc = -ENOMEM;
366

367
	vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
368
	if (!vcpu)
369
		goto out;
370

371 372
	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
					get_zeroed_page(GFP_KERNEL);
373 374 375 376 377

	if (!vcpu->arch.sie_block)
		goto out_free_cpu;

	vcpu->arch.sie_block->icpua = id;
C
Carsten Otte 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390
	if (!kvm_is_ucontrol(kvm)) {
		if (!kvm->arch.sca) {
			WARN_ON_ONCE(1);
			goto out_free_cpu;
		}
		if (!kvm->arch.sca->cpu[id].sda)
			kvm->arch.sca->cpu[id].sda =
				(__u64) vcpu->arch.sie_block;
		vcpu->arch.sie_block->scaoh =
			(__u32)(((__u64)kvm->arch.sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
	}
391

392 393 394
	spin_lock_init(&vcpu->arch.local_int.lock);
	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
395
	spin_lock(&kvm->arch.float_int.lock);
396 397
	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
	init_waitqueue_head(&vcpu->arch.local_int.wq);
398
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
399
	spin_unlock(&kvm->arch.float_int.lock);
400

401 402
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
403
		goto out_free_sie_block;
404 405 406 407
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);

	return vcpu;
408 409
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
410 411
out_free_cpu:
	kfree(vcpu);
412
out:
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
431
	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
432 433 434 435 436
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
437
	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
438 439 440 441 442 443 444 445
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
C
Carsten Otte 已提交
446
	restore_access_regs(vcpu->arch.guest_acrs);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
C
Carsten Otte 已提交
462
	restore_fp_regs(&vcpu->arch.guest_fpregs);
463 464 465 466 467 468 469 470 471 472 473 474 475 476
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

477
	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
478
		rc = -EBUSY;
479 480 481 482
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
483 484 485 486 487 488 489 490 491
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

J
Jan Kiszka 已提交
492 493
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
494 495 496 497
{
	return -EINVAL; /* not implemented yet */
}

498 499 500 501 502 503 504 505 506 507 508 509
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

510
static int __vcpu_run(struct kvm_vcpu *vcpu)
511
{
512 513
	int rc;

514
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
515 516 517 518

	if (need_resched())
		schedule();

519 520 521
	if (test_thread_flag(TIF_MCCK_PENDING))
		s390_handle_mcck();

522 523
	if (!kvm_is_ucontrol(vcpu->kvm))
		kvm_s390_deliver_pending_interrupts(vcpu);
C
Carsten Otte 已提交
524

525 526 527 528 529 530
	vcpu->arch.sie_block->icptcode = 0;
	local_irq_disable();
	kvm_guest_enter();
	local_irq_enable();
	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
		   atomic_read(&vcpu->arch.sie_block->cpuflags));
531
	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
532 533 534 535 536 537 538 539
	if (rc) {
		if (kvm_is_ucontrol(vcpu->kvm)) {
			rc = SIE_INTERCEPT_UCONTROL;
		} else {
			VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			rc = 0;
		}
540
	}
541 542 543 544 545 546
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	local_irq_disable();
	kvm_guest_exit();
	local_irq_enable();

547
	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
548
	return rc;
549 550 551 552
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
553
	int rc;
554 555
	sigset_t sigsaved;

556
rerun_vcpu:
557 558 559
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

560
	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
561

562 563
	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);

564 565 566
	switch (kvm_run->exit_reason) {
	case KVM_EXIT_S390_SIEIC:
	case KVM_EXIT_UNKNOWN:
567
	case KVM_EXIT_INTR:
568
	case KVM_EXIT_S390_RESET:
569
	case KVM_EXIT_S390_UCONTROL:
570 571 572 573 574
		break;
	default:
		BUG();
	}

575 576
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
577 578 579 580
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	}
581

582
	might_fault();
583 584

	do {
585 586 587
		rc = __vcpu_run(vcpu);
		if (rc)
			break;
588 589 590 591
		if (kvm_is_ucontrol(vcpu->kvm))
			rc = -EOPNOTSUPP;
		else
			rc = kvm_handle_sie_intercept(vcpu);
592 593
	} while (!signal_pending(current) && !rc);

594 595 596
	if (rc == SIE_INTERCEPT_RERUNVCPU)
		goto rerun_vcpu;

597 598
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
599
		rc = -EINTR;
600
	}
601

602 603 604 605 606 607 608 609 610 611
#ifdef CONFIG_KVM_S390_UCONTROL
	if (rc == SIE_INTERCEPT_UCONTROL) {
		kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
		kvm_run->s390_ucontrol.trans_exc_code =
			current->thread.gmap_addr;
		kvm_run->s390_ucontrol.pgm_code = 0x10;
		rc = 0;
	}
#endif

612
	if (rc == -EOPNOTSUPP) {
613 614 615 616 617 618 619 620 621 622 623 624 625
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
626

627 628
	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
629
	kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
630

631 632 633 634
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
635
	return rc;
636 637
}

638
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
639 640 641 642 643 644 645 646 647 648 649 650 651 652
		       unsigned long n, int prefix)
{
	if (prefix)
		return copy_to_guest(vcpu, guestdest, from, n);
	else
		return copy_to_guest_absolute(vcpu, guestdest, from, n);
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
653
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
654
{
655
	unsigned char archmode = 1;
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	int prefix;

	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 0;
	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 1;
	} else
		prefix = 0;

671
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
672 673 674
			vcpu->arch.guest_fpregs.fprs, 128, prefix))
		return -EFAULT;

675
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
676
			vcpu->run->s.regs.gprs, 128, prefix))
677 678
		return -EFAULT;

679
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
680 681 682
			&vcpu->arch.sie_block->gpsw, 16, prefix))
		return -EFAULT;

683
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
684 685 686 687
			&vcpu->arch.sie_block->prefix, 4, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
688
			addr + offsetof(struct save_area, fp_ctrl_reg),
689 690 691
			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
		return -EFAULT;

692
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
693 694 695
			&vcpu->arch.sie_block->todpr, 4, prefix))
		return -EFAULT;

696
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
697 698 699
			&vcpu->arch.sie_block->cputm, 8, prefix))
		return -EFAULT;

700
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
701 702 703
			&vcpu->arch.sie_block->ckc, 8, prefix))
		return -EFAULT;

704
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
705 706 707 708
			&vcpu->arch.guest_acrs, 64, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
709
			addr + offsetof(struct save_area, ctrl_regs),
710 711 712 713 714 715 716 717 718 719
			&vcpu->arch.sie_block->gcr, 128, prefix))
		return -EFAULT;
	return 0;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
720
	long r;
721

722 723
	switch (ioctl) {
	case KVM_S390_INTERRUPT: {
724 725
		struct kvm_s390_interrupt s390int;

726
		r = -EFAULT;
727
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
728 729 730
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390int);
		break;
731
	}
732
	case KVM_S390_STORE_STATUS:
733 734
		r = kvm_s390_vcpu_store_status(vcpu, arg);
		break;
735 736 737
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

738
		r = -EFAULT;
739
		if (copy_from_user(&psw, argp, sizeof(psw)))
740 741 742
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
743 744
	}
	case KVM_S390_INITIAL_RESET:
745 746
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_S390_UCAS_MAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
				     ucasmap.vcpu_addr, ucasmap.length);
		break;
	}
	case KVM_S390_UCAS_UNMAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
			ucasmap.length);
		break;
	}
#endif
783 784 785 786 787 788
	case KVM_S390_VCPU_FAULT: {
		r = gmap_fault(arg, vcpu->arch.gmap);
		if (!IS_ERR_VALUE(r))
			r = 0;
		break;
	}
789
	default:
790
		r = -ENOTTY;
791
	}
792
	return r;
793 794
}

795 796 797 798 799 800 801 802 803 804 805 806 807
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
		 && (kvm_is_ucontrol(vcpu->kvm))) {
		vmf->page = virt_to_page(vcpu->arch.sie_block);
		get_page(vmf->page);
		return 0;
	}
#endif
	return VM_FAULT_SIGBUS;
}

808
/* Section: memory related */
809 810 811 812 813
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
814 815 816 817 818 819 820 821
{
	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
	   The memory in userland is ok to be fragmented into various different
	   vmas. It is okay to mmap() and munmap() stuff in this slot after
	   doing this call at any time */

822
	if (mem->slot)
823 824 825 826 827
		return -EINVAL;

	if (mem->guest_phys_addr)
		return -EINVAL;

828
	if (mem->userspace_addr & 0xffffful)
829 830
		return -EINVAL;

831
	if (mem->memory_size & 0xffffful)
832 833
		return -EINVAL;

834 835 836
	if (!user_alloc)
		return -EINVAL;

837 838 839 840 841 842 843 844
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
845
	int rc;
846

847 848 849 850

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
851
		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
852
	return;
853 854
}

855 856 857 858
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

859 860
static int __init kvm_s390_init(void)
{
861
	int ret;
862
	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
863 864 865 866 867
	if (ret)
		return ret;

	/*
	 * guests can ask for up to 255+1 double words, we need a full page
L
Lucas De Marchi 已提交
868
	 * to hold the maximum amount of facilities. On the other hand, we
869 870
	 * only set facilities that are known to work in KVM.
	 */
871
	facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
872 873 874 875
	if (!facilities) {
		kvm_exit();
		return -ENOMEM;
	}
876
	memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
877
	facilities[0] &= 0xff00fff3f47c0000ULL;
878
	facilities[1] &= 0x201c000000000000ULL;
879
	return 0;
880 881 882 883
}

static void __exit kvm_s390_exit(void)
{
884
	free_page((unsigned long) facilities);
885 886 887 888 889
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);