kvm-s390.c 25.0 KB
Newer Older
1
/*
2
 * hosting zSeries kernel virtual machines
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14 15 16 17 18
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
19
#include <linux/hrtimer.h>
20 21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
25
#include <linux/timer.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/lowcore.h>
#include <asm/pgtable.h>
29
#include <asm/nmi.h>
30
#include <asm/switch_to.h>
31
#include <asm/sclp.h>
32
#include "kvm-s390.h"
33 34 35 36 37 38
#include "gaccess.h"

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
39
	{ "exit_null", VCPU_STAT(exit_null) },
40 41 42 43
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
44 45 46
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
47
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
48 49
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
50
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
51 52 53 54 55 56 57
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
58 59 60 61 62 63 64 65 66
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
67
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
68
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
69
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
70
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
71 72 73 74 75
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
76
	{ "diagnose_10", VCPU_STAT(diagnose_10) },
77
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
78
	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
79 80 81
	{ NULL }
};

82
static unsigned long long *facilities;
83 84

/* Section: not file related */
85
int kvm_arch_hardware_enable(void *garbage)
86 87
{
	/* every s390 is virtualization enabled ;-) */
88
	return 0;
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

int kvm_dev_ioctl_check_extension(long ext)
{
128 129
	int r;

130
	switch (ext) {
131
	case KVM_CAP_S390_PSW:
132
	case KVM_CAP_S390_GMAP:
133
	case KVM_CAP_SYNC_MMU:
134 135 136
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
137
	case KVM_CAP_SYNC_REGS:
138
	case KVM_CAP_ONE_REG:
139 140
		r = 1;
		break;
141 142 143 144
	case KVM_CAP_NR_VCPUS:
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
145 146 147
	case KVM_CAP_S390_COW:
		r = sclp_get_fac85() & 0x2;
		break;
148
	default:
149
		r = 0;
150
	}
151
	return r;
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
}

/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
	return 0;
}

long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;

	switch (ioctl) {
172 173 174 175 176 177 178 179 180
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
181
	default:
182
		r = -ENOTTY;
183 184 185 186 187
	}

	return r;
}

188
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
189 190 191 192
{
	int rc;
	char debug_name[16];

193 194 195 196 197 198 199 200 201 202 203
	rc = -EINVAL;
#ifdef CONFIG_KVM_S390_UCONTROL
	if (type & ~KVM_VM_S390_UCONTROL)
		goto out_err;
	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
		goto out_err;
#else
	if (type)
		goto out_err;
#endif

204 205
	rc = s390_enable_sie();
	if (rc)
206
		goto out_err;
207

208 209
	rc = -ENOMEM;

210 211
	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
212
		goto out_err;
213 214 215 216 217 218 219

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
		goto out_nodbf;

220 221 222
	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);

223 224 225
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

226 227 228 229 230 231 232
	if (type & KVM_VM_S390_UCONTROL) {
		kvm->arch.gmap = NULL;
	} else {
		kvm->arch.gmap = gmap_alloc(current->mm);
		if (!kvm->arch.gmap)
			goto out_nogmap;
	}
233
	return 0;
234 235
out_nogmap:
	debug_unregister(kvm->arch.dbf);
236 237
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
238 239
out_err:
	return rc;
240 241
}

242 243 244
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
C
Carsten Otte 已提交
245 246 247 248 249 250 251
	if (!kvm_is_ucontrol(vcpu->kvm)) {
		clear_bit(63 - vcpu->vcpu_id,
			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		    (__u64) vcpu->arch.sie_block)
			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	}
252
	smp_mb();
253 254 255 256

	if (kvm_is_ucontrol(vcpu->kvm))
		gmap_free(vcpu->arch.gmap);

257
	free_page((unsigned long)(vcpu->arch.sie_block));
258
	kvm_vcpu_uninit(vcpu);
259 260 261 262 263 264
	kfree(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
265
	struct kvm_vcpu *vcpu;
266

267 268 269 270 271 272 273 274 275
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
276 277
}

278 279 280 281
void kvm_arch_sync_events(struct kvm *kvm)
{
}

282 283
void kvm_arch_destroy_vm(struct kvm *kvm)
{
284
	kvm_free_vcpus(kvm);
285
	free_page((unsigned long)(kvm->arch.sca));
286
	debug_unregister(kvm->arch.dbf);
287 288
	if (!kvm_is_ucontrol(kvm))
		gmap_free(kvm->arch.gmap);
289 290 291 292 293
}

/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
294 295 296 297 298 299 300
	if (kvm_is_ucontrol(vcpu->kvm)) {
		vcpu->arch.gmap = gmap_alloc(current->mm);
		if (!vcpu->arch.gmap)
			return -ENOMEM;
		return 0;
	}

301
	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
302 303
	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
				    KVM_SYNC_GPRS |
304 305
				    KVM_SYNC_ACRS |
				    KVM_SYNC_CRS;
306 307 308 309 310
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
311
	/* Nothing todo */
312 313 314 315 316 317 318 319
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	save_fp_regs(&vcpu->arch.host_fpregs);
	save_access_regs(vcpu->arch.host_acrs);
	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
	restore_fp_regs(&vcpu->arch.guest_fpregs);
320
	restore_access_regs(vcpu->run->s.regs.acrs);
321
	gmap_enable(vcpu->arch.gmap);
322
	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
323 324 325 326
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
327
	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328
	gmap_disable(vcpu->arch.gmap);
329
	save_fp_regs(&vcpu->arch.guest_fpregs);
330
	save_access_regs(vcpu->run->s.regs.acrs);
331 332 333 334 335 336 337 338 339
	restore_fp_regs(&vcpu->arch.host_fpregs);
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
340
	kvm_s390_set_prefix(vcpu, 0);
341 342 343 344 345 346 347 348 349
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
350
	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
351 352 353 354
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
355 356 357
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
						    CPUSTAT_SM |
						    CPUSTAT_STOPPED);
358
	vcpu->arch.sie_block->ecb   = 6;
359
	vcpu->arch.sie_block->eca   = 0xC1002001U;
360
	vcpu->arch.sie_block->fac   = (int) (long) facilities;
361 362 363 364
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
365
	get_cpu_id(&vcpu->arch.cpu_id);
366
	vcpu->arch.cpu_id.version = 0xff;
367 368 369 370 371 372
	return 0;
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
373 374 375 376 377 378 379
	struct kvm_vcpu *vcpu;
	int rc = -EINVAL;

	if (id >= KVM_MAX_VCPUS)
		goto out;

	rc = -ENOMEM;
380

381
	vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
382
	if (!vcpu)
383
		goto out;
384

385 386
	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
					get_zeroed_page(GFP_KERNEL);
387 388 389 390 391

	if (!vcpu->arch.sie_block)
		goto out_free_cpu;

	vcpu->arch.sie_block->icpua = id;
C
Carsten Otte 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404
	if (!kvm_is_ucontrol(kvm)) {
		if (!kvm->arch.sca) {
			WARN_ON_ONCE(1);
			goto out_free_cpu;
		}
		if (!kvm->arch.sca->cpu[id].sda)
			kvm->arch.sca->cpu[id].sda =
				(__u64) vcpu->arch.sie_block;
		vcpu->arch.sie_block->scaoh =
			(__u32)(((__u64)kvm->arch.sca) >> 32);
		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
	}
405

406 407 408
	spin_lock_init(&vcpu->arch.local_int.lock);
	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
409
	spin_lock(&kvm->arch.float_int.lock);
410 411
	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
	init_waitqueue_head(&vcpu->arch.local_int.wq);
412
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
413
	spin_unlock(&kvm->arch.float_int.lock);
414

415 416
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
417
		goto out_free_sie_block;
418 419 420 421
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);

	return vcpu;
422 423
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
424 425
out_free_cpu:
	kfree(vcpu);
426
out:
427 428 429 430 431 432 433 434 435 436
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

437 438 439 440 441 442 443
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

444 445 446 447 448 449
static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;

	switch (reg->id) {
450 451 452 453 454 455 456 457
	case KVM_REG_S390_TODPR:
		r = put_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = put_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
458 459 460 461 462 463 464 465
	case KVM_REG_S390_CPU_TIMER:
		r = put_user(vcpu->arch.sie_block->cputm,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = put_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
466 467 468 469 470 471 472 473 474 475 476 477 478
	default:
		break;
	}

	return r;
}

static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
					   struct kvm_one_reg *reg)
{
	int r = -EINVAL;

	switch (reg->id) {
479 480 481 482 483 484 485 486
	case KVM_REG_S390_TODPR:
		r = get_user(vcpu->arch.sie_block->todpr,
			     (u32 __user *)reg->addr);
		break;
	case KVM_REG_S390_EPOCHDIFF:
		r = get_user(vcpu->arch.sie_block->epoch,
			     (u64 __user *)reg->addr);
		break;
487 488 489 490 491 492 493 494
	case KVM_REG_S390_CPU_TIMER:
		r = get_user(vcpu->arch.sie_block->cputm,
			     (u64 __user *)reg->addr);
		break;
	case KVM_REG_S390_CLOCK_COMP:
		r = get_user(vcpu->arch.sie_block->ckc,
			     (u64 __user *)reg->addr);
		break;
495 496 497 498 499 500
	default:
		break;
	}

	return r;
}
501

502 503 504 505 506 507 508 509
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
510
	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
511 512 513 514 515
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
516
	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
517 518 519 520 521 522
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
523
	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
524
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
525
	restore_access_regs(vcpu->run->s.regs.acrs);
526 527 528 529 530 531
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
532
	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
533 534 535 536 537 538 539
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
540
	vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
C
Carsten Otte 已提交
541
	restore_fp_regs(&vcpu->arch.guest_fpregs);
542 543 544 545 546 547 548 549 550 551 552 553 554 555
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

556
	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
557
		rc = -EBUSY;
558 559 560 561
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
562 563 564 565 566 567 568 569 570
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

J
Jan Kiszka 已提交
571 572
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
573 574 575 576
{
	return -EINVAL; /* not implemented yet */
}

577 578 579 580 581 582 583 584 585 586 587 588
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

589
static int __vcpu_run(struct kvm_vcpu *vcpu)
590
{
591 592
	int rc;

593
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
594 595 596 597

	if (need_resched())
		schedule();

598 599 600
	if (test_thread_flag(TIF_MCCK_PENDING))
		s390_handle_mcck();

601 602
	if (!kvm_is_ucontrol(vcpu->kvm))
		kvm_s390_deliver_pending_interrupts(vcpu);
C
Carsten Otte 已提交
603

604 605 606 607 608 609
	vcpu->arch.sie_block->icptcode = 0;
	local_irq_disable();
	kvm_guest_enter();
	local_irq_enable();
	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
		   atomic_read(&vcpu->arch.sie_block->cpuflags));
610
	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
611 612 613 614 615 616 617 618
	if (rc) {
		if (kvm_is_ucontrol(vcpu->kvm)) {
			rc = SIE_INTERCEPT_UCONTROL;
		} else {
			VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			rc = 0;
		}
619
	}
620 621 622 623 624 625
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	local_irq_disable();
	kvm_guest_exit();
	local_irq_enable();

626
	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
627
	return rc;
628 629 630 631
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
632
	int rc;
633 634
	sigset_t sigsaved;

635
rerun_vcpu:
636 637 638
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

639
	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
640

641 642
	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);

643 644 645
	switch (kvm_run->exit_reason) {
	case KVM_EXIT_S390_SIEIC:
	case KVM_EXIT_UNKNOWN:
646
	case KVM_EXIT_INTR:
647
	case KVM_EXIT_S390_RESET:
648
	case KVM_EXIT_S390_UCONTROL:
649 650 651 652 653
		break;
	default:
		BUG();
	}

654 655
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
656 657 658 659
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	}
660 661 662 663 664
	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
	}
665

666
	might_fault();
667 668

	do {
669 670 671
		rc = __vcpu_run(vcpu);
		if (rc)
			break;
672 673 674 675
		if (kvm_is_ucontrol(vcpu->kvm))
			rc = -EOPNOTSUPP;
		else
			rc = kvm_handle_sie_intercept(vcpu);
676 677
	} while (!signal_pending(current) && !rc);

678 679 680
	if (rc == SIE_INTERCEPT_RERUNVCPU)
		goto rerun_vcpu;

681 682
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
683
		rc = -EINTR;
684
	}
685

686 687 688 689 690 691 692 693 694 695
#ifdef CONFIG_KVM_S390_UCONTROL
	if (rc == SIE_INTERCEPT_UCONTROL) {
		kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
		kvm_run->s390_ucontrol.trans_exc_code =
			current->thread.gmap_addr;
		kvm_run->s390_ucontrol.pgm_code = 0x10;
		rc = 0;
	}
#endif

696
	if (rc == -EOPNOTSUPP) {
697 698 699 700 701 702 703 704 705 706 707 708 709
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
710

711 712
	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
713
	kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
714
	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
715

716 717 718 719
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
720
	return rc;
721 722
}

723
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
724 725 726 727 728 729 730 731 732 733 734 735 736 737
		       unsigned long n, int prefix)
{
	if (prefix)
		return copy_to_guest(vcpu, guestdest, from, n);
	else
		return copy_to_guest_absolute(vcpu, guestdest, from, n);
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
738
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
739
{
740
	unsigned char archmode = 1;
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	int prefix;

	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 0;
	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 1;
	} else
		prefix = 0;

756
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
757 758 759
			vcpu->arch.guest_fpregs.fprs, 128, prefix))
		return -EFAULT;

760
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
761
			vcpu->run->s.regs.gprs, 128, prefix))
762 763
		return -EFAULT;

764
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
765 766 767
			&vcpu->arch.sie_block->gpsw, 16, prefix))
		return -EFAULT;

768
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
769 770 771 772
			&vcpu->arch.sie_block->prefix, 4, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
773
			addr + offsetof(struct save_area, fp_ctrl_reg),
774 775 776
			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
		return -EFAULT;

777
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
778 779 780
			&vcpu->arch.sie_block->todpr, 4, prefix))
		return -EFAULT;

781
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
782 783 784
			&vcpu->arch.sie_block->cputm, 8, prefix))
		return -EFAULT;

785
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
786 787 788
			&vcpu->arch.sie_block->ckc, 8, prefix))
		return -EFAULT;

789
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
790
			&vcpu->run->s.regs.acrs, 64, prefix))
791 792 793
		return -EFAULT;

	if (__guestcopy(vcpu,
794
			addr + offsetof(struct save_area, ctrl_regs),
795 796 797 798 799 800 801 802 803 804
			&vcpu->arch.sie_block->gcr, 128, prefix))
		return -EFAULT;
	return 0;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
805
	long r;
806

807 808
	switch (ioctl) {
	case KVM_S390_INTERRUPT: {
809 810
		struct kvm_s390_interrupt s390int;

811
		r = -EFAULT;
812
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
813 814 815
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390int);
		break;
816
	}
817
	case KVM_S390_STORE_STATUS:
818 819
		r = kvm_s390_vcpu_store_status(vcpu, arg);
		break;
820 821 822
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

823
		r = -EFAULT;
824
		if (copy_from_user(&psw, argp, sizeof(psw)))
825 826 827
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
828 829
	}
	case KVM_S390_INITIAL_RESET:
830 831
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
832 833 834 835 836 837 838 839 840 841 842 843
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
		r = -EFAULT;
		if (copy_from_user(&reg, argp, sizeof(reg)))
			break;
		if (ioctl == KVM_SET_ONE_REG)
			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
		else
			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
		break;
	}
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_S390_UCAS_MAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
				     ucasmap.vcpu_addr, ucasmap.length);
		break;
	}
	case KVM_S390_UCAS_UNMAP: {
		struct kvm_s390_ucas_mapping ucasmap;

		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
			r = -EFAULT;
			break;
		}

		if (!kvm_is_ucontrol(vcpu->kvm)) {
			r = -EINVAL;
			break;
		}

		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
			ucasmap.length);
		break;
	}
#endif
880 881 882 883 884 885
	case KVM_S390_VCPU_FAULT: {
		r = gmap_fault(arg, vcpu->arch.gmap);
		if (!IS_ERR_VALUE(r))
			r = 0;
		break;
	}
886
	default:
887
		r = -ENOTTY;
888
	}
889
	return r;
890 891
}

892 893 894 895 896 897 898 899 900 901 902 903 904
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
		 && (kvm_is_ucontrol(vcpu->kvm))) {
		vmf->page = virt_to_page(vcpu->arch.sie_block);
		get_page(vmf->page);
		return 0;
	}
#endif
	return VM_FAULT_SIGBUS;
}

905 906 907 908 909 910 911 912 913 914
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
			   struct kvm_memory_slot *dont)
{
}

int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
	return 0;
}

915
/* Section: memory related */
916 917 918 919 920
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
921 922 923 924 925 926 927 928
{
	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
	   The memory in userland is ok to be fragmented into various different
	   vmas. It is okay to mmap() and munmap() stuff in this slot after
	   doing this call at any time */

929
	if (mem->slot)
930 931 932 933 934
		return -EINVAL;

	if (mem->guest_phys_addr)
		return -EINVAL;

935
	if (mem->userspace_addr & 0xffffful)
936 937
		return -EINVAL;

938
	if (mem->memory_size & 0xffffful)
939 940
		return -EINVAL;

941 942 943
	if (!user_alloc)
		return -EINVAL;

944 945 946 947 948 949 950 951
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
952
	int rc;
953

954 955 956 957

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
958
		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
959
	return;
960 961
}

962 963 964 965
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

966 967
static int __init kvm_s390_init(void)
{
968
	int ret;
969
	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
970 971 972 973 974
	if (ret)
		return ret;

	/*
	 * guests can ask for up to 255+1 double words, we need a full page
L
Lucas De Marchi 已提交
975
	 * to hold the maximum amount of facilities. On the other hand, we
976 977
	 * only set facilities that are known to work in KVM.
	 */
978
	facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
979 980 981 982
	if (!facilities) {
		kvm_exit();
		return -ENOMEM;
	}
983
	memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
984
	facilities[0] &= 0xff00fff3f47c0000ULL;
985
	facilities[1] &= 0x201c000000000000ULL;
986
	return 0;
987 988 989 990
}

static void __exit kvm_s390_exit(void)
{
991
	free_page((unsigned long) facilities);
992 993 994 995 996
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);