kvm-s390.c 23.3 KB
Newer Older
1 2 3
/*
 * s390host.c --  hosting zSeries kernel virtual machines
 *
4
 * Copyright IBM Corp. 2008,2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14 15 16 17 18
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
19
#include <linux/hrtimer.h>
20 21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
25
#include <linux/timer.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/lowcore.h>
#include <asm/pgtable.h>
29
#include <asm/nmi.h>
30
#include <asm/switch_to.h>
31
#include "kvm-s390.h"
32 33 34 35 36 37
#include "gaccess.h"

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
38
	{ "exit_null", VCPU_STAT(exit_null) },
39 40 41 42
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 44 45
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 48
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 51 52 53 54 55 56
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 58 59 60 61 62 63 64 65
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
66
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
67
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 71 72 73 74
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75
	{ "diagnose_10", VCPU_STAT(diagnose_10) },
76
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
77
	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
78 79 80
	{ NULL }
};

81
static unsigned long long *facilities;
82 83

/* Section: not file related */
84
int kvm_arch_hardware_enable(void *garbage)
85 86
{
	/* every s390 is virtualization enabled ;-) */
87
	return 0;
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

int kvm_dev_ioctl_check_extension(long ext)
{
127 128
	int r;

129
	switch (ext) {
130
	case KVM_CAP_S390_PSW:
131
	case KVM_CAP_S390_GMAP:
132
	case KVM_CAP_SYNC_MMU:
133 134 135
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
136
	case KVM_CAP_SYNC_REGS:
137 138
		r = 1;
		break;
139 140 141 142
	case KVM_CAP_NR_VCPUS:
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
143
	default:
144
		r = 0;
145
	}
146
	return r;
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
}

/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
	return 0;
}

long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;

	switch (ioctl) {
167 168 169 170 171 172 173 174 175
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
176
	default:
177
		r = -ENOTTY;
178 179 180 181 182
	}

	return r;
}

183
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
184 185 186 187
{
	int rc;
	char debug_name[16];

188 189 190 191 192 193 194 195 196 197 198
	rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
	if (type & ~KVM_VM_S390_UCONTROL)
		goto out_err;
	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
		goto out_err;
#else
	if (type)
		goto out_err;
#endif

199 200
	rc = s390_enable_sie();
	if (rc)
201
		goto out_err;
202

203 204
	rc = -ENOMEM;

205 206
	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
207
		goto out_err;
208 209 210 211 212 213 214

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
		goto out_nodbf;

215 216 217
	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);

218 219 220
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

221 222 223 224 225 226 227
	if (type & KVM_VM_S390_UCONTROL) {
		kvm->arch.gmap = NULL;
	} else {
		kvm->arch.gmap = gmap_alloc(current->mm);
		if (!kvm->arch.gmap)
			goto out_nogmap;
	}
228
	return 0;
229 230
out_nogmap:
	debug_unregister(kvm->arch.dbf);
231 232
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
233 234
out_err:
	return rc;
235 236
}

237 238 239
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
C
Carsten Otte 已提交
240 241 242 243 244 245 246
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		clear_bit(63 - vcpu->vcpu_id,
			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		    (__u64) vcpu->arch.sie_block)
			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	}
247
	smp_mb();
248 249 250 251

	if (kvm_is_ucontrol(vcpu->kvm))
		gmap_free(vcpu->arch.gmap);

252
	free_page((unsigned long)(vcpu->arch.sie_block));
253
	kvm_vcpu_uninit(vcpu);
254 255 256 257 258 259
	kfree(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
260
	struct kvm_vcpu *vcpu;
261

262 263 264 265 266 267 268 269 270
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
271 272
}

273 274 275 276
void kvm_arch_sync_events(struct kvm *kvm)
{
}

277 278
void kvm_arch_destroy_vm(struct kvm *kvm)
{
279
	kvm_free_vcpus(kvm);
280
	free_page((unsigned long)(kvm->arch.sca));
281
	debug_unregister(kvm->arch.dbf);
282 283
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
284 285 286 287 288
}

/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
289 290 291 292 293 294 295
	if (kvm_is_ucontrol(vcpu->kvm)) {
		vcpu->arch.gmap = gmap_alloc(current->mm);
		if (!vcpu->arch.gmap)
			return -ENOMEM;
		return 0;
	}

296
	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
297 298
	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
				    KVM_SYNC_GPRS |
299 300
				    KVM_SYNC_ACRS |
				    KVM_SYNC_CRS;
301 302 303 304 305
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
306
	/* Nothing todo */
307 308 309 310 311 312 313 314
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	save_fp_regs(&vcpu->arch.host_fpregs);
	save_access_regs(vcpu->arch.host_acrs);
	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
	restore_fp_regs(&vcpu->arch.guest_fpregs);
315
	restore_access_regs(vcpu->run->s.regs.acrs);
316
	gmap_enable(vcpu->arch.gmap);
317
	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
318 319 320 321
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
322
	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
323
	gmap_disable(vcpu->arch.gmap);
324
	save_fp_regs(&vcpu->arch.guest_fpregs);
325
	save_access_regs(vcpu->run->s.regs.acrs);
326 327 328 329 330 331 332 333 334
	restore_fp_regs(&vcpu->arch.host_fpregs);
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
335
	kvm_s390_set_prefix(vcpu, 0);
336 337 338 339 340 341 342 343 344 345 346 347 348
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
349 350 351
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
						    CPUSTAT_SM |
						    CPUSTAT_STOPPED);
352
	vcpu->arch.sie_block->ecb   = 6;
353
	vcpu->arch.sie_block->eca   = 0xC1002001U;
354
	vcpu->arch.sie_block->fac   = (int) (long) facilities;
355 356 357 358
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
359
	get_cpu_id(&vcpu->arch.cpu_id);
360
	vcpu->arch.cpu_id.version = 0xff;
361 362 363 364 365 366
	return 0;
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
367 368 369 370 371 372 373
	struct kvm_vcpu *vcpu;
	int rc = -EINVAL;

	if (id >= KVM_MAX_VCPUS)
		goto out;

	rc = -ENOMEM;
374

375
	vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
376
	if (!vcpu)
377
		goto out;
378

379 380
	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
					get_zeroed_page(GFP_KERNEL);
381 382 383 384 385

	if (!vcpu->arch.sie_block)
		goto out_free_cpu;

	vcpu->arch.sie_block->icpua = id;
C
Carsten Otte 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398
	if (!kvm_is_ucontrol(kvm)) {
		if (!kvm->arch.sca) {
			WARN_ON_ONCE(1);
			goto out_free_cpu;
		}
		if (!kvm->arch.sca->cpu[id].sda)
			kvm->arch.sca->cpu[id].sda =
				(__u64) vcpu->arch.sie_block;
		vcpu->arch.sie_block->scaoh =
			(__u32)(((__u64)kvm->arch.sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
	}
399

400 401 402
	spin_lock_init(&vcpu->arch.local_int.lock);
	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
403
	spin_lock(&kvm->arch.float_int.lock);
404 405
	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
	init_waitqueue_head(&vcpu->arch.local_int.wq);
406
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
407
	spin_unlock(&kvm->arch.float_int.lock);
408

409 410
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
411
		goto out_free_sie_block;
412 413 414 415
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);

	return vcpu;
416 417
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
418 419
out_free_cpu:
	kfree(vcpu);
420
out:
421 422 423 424 425 426 427 428 429 430
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

431 432 433 434 435 436 437 438
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}


439 440 441 442 443 444 445 446
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
447
	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
448 449 450 451 452
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
453
	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
454 455 456 457 458 459
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
460
	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
461
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
462
	restore_access_regs(vcpu->run->s.regs.acrs);
463 464 465 466 467 468
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
469
	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
470 471 472 473 474 475 476
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
477
	vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
C
Carsten Otte 已提交
478
	restore_fp_regs(&vcpu->arch.guest_fpregs);
479 480 481 482 483 484 485 486 487 488 489 490 491 492
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

493
	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
494
		rc = -EBUSY;
495 496 497 498
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
499 500 501 502 503 504 505 506 507
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

J
Jan Kiszka 已提交
508 509
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
510 511 512 513
{
	return -EINVAL; /* not implemented yet */
}

514 515 516 517 518 519 520 521 522 523 524 525
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

526
static int __vcpu_run(struct kvm_vcpu *vcpu)
527
{
528 529
	int rc;

530
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
531 532 533 534

	if (need_resched())
		schedule();

535 536 537
	if (test_thread_flag(TIF_MCCK_PENDING))
		s390_handle_mcck();

538 539
	if (!kvm_is_ucontrol(vcpu->kvm))
		kvm_s390_deliver_pending_interrupts(vcpu);
C
Carsten Otte 已提交
540

541 542 543 544 545 546
	vcpu->arch.sie_block->icptcode = 0;
	local_irq_disable();
	kvm_guest_enter();
	local_irq_enable();
	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
		   atomic_read(&vcpu->arch.sie_block->cpuflags));
547
	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
548 549 550 551 552 553 554 555
	if (rc) {
		if (kvm_is_ucontrol(vcpu->kvm)) {
			rc = SIE_INTERCEPT_UCONTROL;
		} else {
			VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			rc = 0;
		}
556
	}
557 558 559 560 561 562
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	local_irq_disable();
	kvm_guest_exit();
	local_irq_enable();

563
	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
564
	return rc;
565 566 567 568
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
569
	int rc;
570 571
	sigset_t sigsaved;

572
rerun_vcpu:
573 574 575
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

576
	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
577

578 579
	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);

580 581 582
	switch (kvm_run->exit_reason) {
	case KVM_EXIT_S390_SIEIC:
	case KVM_EXIT_UNKNOWN:
583
	case KVM_EXIT_INTR:
584
	case KVM_EXIT_S390_RESET:
585
	case KVM_EXIT_S390_UCONTROL:
586 587 588 589 590
		break;
	default:
		BUG();
	}

591 592
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
593 594 595 596
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	}
597 598 599 600 601
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	}
602

603
	might_fault();
604 605

	do {
606 607 608
		rc = __vcpu_run(vcpu);
		if (rc)
			break;
609 610 611 612
		if (kvm_is_ucontrol(vcpu->kvm))
			rc = -EOPNOTSUPP;
		else
			rc = kvm_handle_sie_intercept(vcpu);
613 614
	} while (!signal_pending(current) && !rc);

615 616 617
	if (rc == SIE_INTERCEPT_RERUNVCPU)
		goto rerun_vcpu;

618 619
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
620
		rc = -EINTR;
621
	}
622

623 624 625 626 627 628 629 630 631 632
#ifdef CONFIG_KVM_S390_UCONTROL
	if (rc == SIE_INTERCEPT_UCONTROL) {
		kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
		kvm_run->s390_ucontrol.trans_exc_code =
			current->thread.gmap_addr;
		kvm_run->s390_ucontrol.pgm_code = 0x10;
		rc = 0;
	}
#endif

633
	if (rc == -EOPNOTSUPP) {
634 635 636 637 638 639 640 641 642 643 644 645 646
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
647

648 649
	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
650
	kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
651
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
652

653 654 655 656
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
657
	return rc;
658 659
}

660
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
661 662 663 664 665 666 667 668 669 670 671 672 673 674
		       unsigned long n, int prefix)
{
	if (prefix)
		return copy_to_guest(vcpu, guestdest, from, n);
	else
		return copy_to_guest_absolute(vcpu, guestdest, from, n);
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
675
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
676
{
677
	unsigned char archmode = 1;
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	int prefix;

	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 0;
	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 1;
	} else
		prefix = 0;

693
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
694 695 696
			vcpu->arch.guest_fpregs.fprs, 128, prefix))
		return -EFAULT;

697
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
698
			vcpu->run->s.regs.gprs, 128, prefix))
699 700
		return -EFAULT;

701
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
702 703 704
			&vcpu->arch.sie_block->gpsw, 16, prefix))
		return -EFAULT;

705
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
706 707 708 709
			&vcpu->arch.sie_block->prefix, 4, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
710
			addr + offsetof(struct save_area, fp_ctrl_reg),
711 712 713
			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
		return -EFAULT;

714
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
715 716 717
			&vcpu->arch.sie_block->todpr, 4, prefix))
		return -EFAULT;

718
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
719 720 721
			&vcpu->arch.sie_block->cputm, 8, prefix))
		return -EFAULT;

722
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
723 724 725
			&vcpu->arch.sie_block->ckc, 8, prefix))
		return -EFAULT;

726
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
727
			&vcpu->run->s.regs.acrs, 64, prefix))
728 729 730
		return -EFAULT;

	if (__guestcopy(vcpu,
731
			addr + offsetof(struct save_area, ctrl_regs),
732 733 734 735 736 737 738 739 740 741
			&vcpu->arch.sie_block->gcr, 128, prefix))
		return -EFAULT;
	return 0;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
742
	long r;
743

744 745
	switch (ioctl) {
	case KVM_S390_INTERRUPT: {
746 747
		struct kvm_s390_interrupt s390int;

748
		r = -EFAULT;
749
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
750 751 752
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390int);
		break;
753
	}
754
	case KVM_S390_STORE_STATUS:
755 756
		r = kvm_s390_vcpu_store_status(vcpu, arg);
		break;
757 758 759
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

760
		r = -EFAULT;
761
		if (copy_from_user(&psw, argp, sizeof(psw)))
762 763 764
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
765 766
	}
	case KVM_S390_INITIAL_RESET:
767 768
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_S390_UCAS_MAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
				     ucasmap.vcpu_addr, ucasmap.length);
		break;
	}
	case KVM_S390_UCAS_UNMAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
			ucasmap.length);
		break;
	}
#endif
805 806 807 808 809 810
	case KVM_S390_VCPU_FAULT: {
		r = gmap_fault(arg, vcpu->arch.gmap);
		if (!IS_ERR_VALUE(r))
			r = 0;
		break;
	}
811
	default:
812
		r = -ENOTTY;
813
	}
814
	return r;
815 816
}

817 818 819 820 821 822 823 824 825 826 827 828 829
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
		 && (kvm_is_ucontrol(vcpu->kvm))) {
		vmf->page = virt_to_page(vcpu->arch.sie_block);
		get_page(vmf->page);
		return 0;
	}
#endif
	return VM_FAULT_SIGBUS;
}

830 831 832 833 834 835 836 837 838 839
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
			   struct kvm_memory_slot *dont)
{
}

int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
	return 0;
}

840
/* Section: memory related */
841 842 843 844 845
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
846 847 848 849 850 851 852 853
{
	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
	   The memory in userland is ok to be fragmented into various different
	   vmas. It is okay to mmap() and munmap() stuff in this slot after
	   doing this call at any time */

854
	if (mem->slot)
855 856 857 858 859
		return -EINVAL;

	if (mem->guest_phys_addr)
		return -EINVAL;

860
	if (mem->userspace_addr & 0xffffful)
861 862
		return -EINVAL;

863
	if (mem->memory_size & 0xffffful)
864 865
		return -EINVAL;

866 867 868
	if (!user_alloc)
		return -EINVAL;

869 870 871 872 873 874 875 876
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
877
	int rc;
878

879 880 881 882

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
883
		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
884
	return;
885 886
}

887 888 889 890
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

891 892
static int __init kvm_s390_init(void)
{
893
	int ret;
894
	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
895 896 897 898 899
	if (ret)
		return ret;

	/*
	 * guests can ask for up to 255+1 double words, we need a full page
L
Lucas De Marchi 已提交
900
	 * to hold the maximum amount of facilities. On the other hand, we
901 902
	 * only set facilities that are known to work in KVM.
	 */
903
	facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
904 905 906 907
	if (!facilities) {
		kvm_exit();
		return -ENOMEM;
	}
908
	memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
909
	facilities[0] &= 0xff00fff3f47c0000ULL;
910
	facilities[1] &= 0x201c000000000000ULL;
911
	return 0;
912 913 914 915
}

static void __exit kvm_s390_exit(void)
{
916
	free_page((unsigned long) facilities);
917 918 919 920 921
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);