kvm-s390.c 19.5 KB
Newer Older
1 2 3
/*
 * s390host.c --  hosting zSeries kernel virtual machines
 *
4
 * Copyright IBM Corp. 2008,2009
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
13
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14 15 16 17 18
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
19
#include <linux/hrtimer.h>
20 21 22 23 24
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/slab.h>
25
#include <linux/timer.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/lowcore.h>
#include <asm/pgtable.h>
29
#include <asm/nmi.h>
30
#include <asm/system.h>
31
#include "kvm-s390.h"
32 33 34 35 36 37
#include "gaccess.h"

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
38
	{ "exit_null", VCPU_STAT(exit_null) },
39 40 41 42
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 44 45
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 48
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 51 52 53 54 55 56
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 58 59 60 61 62 63 64 65
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
66
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
67
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 70 71 72 73
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74
	{ "diagnose_44", VCPU_STAT(diagnose_44) },
75 76 77
	{ NULL }
};

78
static unsigned long long *facilities;
79 80

/* Section: not file related */
81
int kvm_arch_hardware_enable(void *garbage)
82 83
{
	/* every s390 is virtualization enabled ;-) */
84
	return 0;
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}

/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

int kvm_dev_ioctl_check_extension(long ext)
{
124 125
	int r;

126
	switch (ext) {
127
	case KVM_CAP_S390_PSW:
128
	case KVM_CAP_S390_GMAP:
129 130
		r = 1;
		break;
131
	default:
132
		r = 0;
133
	}
134
	return r;
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
}

/* Section: vm related */
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
	return 0;
}

long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;

	switch (ioctl) {
155 156 157 158 159 160 161 162 163
	case KVM_S390_INTERRUPT: {
		struct kvm_s390_interrupt s390int;

		r = -EFAULT;
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
			break;
		r = kvm_s390_inject_vm(kvm, &s390int);
		break;
	}
164
	default:
165
		r = -ENOTTY;
166 167 168 169 170
	}

	return r;
}

171
int kvm_arch_init_vm(struct kvm *kvm)
172 173 174 175 176 177
{
	int rc;
	char debug_name[16];

	rc = s390_enable_sie();
	if (rc)
178
		goto out_err;
179

180 181
	rc = -ENOMEM;

182 183
	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
	if (!kvm->arch.sca)
184
		goto out_err;
185 186 187 188 189 190 191

	sprintf(debug_name, "kvm-%u", current->pid);

	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
	if (!kvm->arch.dbf)
		goto out_nodbf;

192 193 194
	spin_lock_init(&kvm->arch.float_int.lock);
	INIT_LIST_HEAD(&kvm->arch.float_int.list);

195 196 197
	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
	VM_EVENT(kvm, 3, "%s", "vm created");

198 199 200 201
	kvm->arch.gmap = gmap_alloc(current->mm);
	if (!kvm->arch.gmap)
		goto out_nogmap;

202
	return 0;
203 204
out_nogmap:
	debug_unregister(kvm->arch.dbf);
205 206
out_nodbf:
	free_page((unsigned long)(kvm->arch.sca));
207 208
out_err:
	return rc;
209 210
}

211 212 213
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
214
	clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
215 216 217 218
	if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
		(__u64) vcpu->arch.sie_block)
		vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
	smp_mb();
219
	free_page((unsigned long)(vcpu->arch.sie_block));
220
	kvm_vcpu_uninit(vcpu);
221 222 223 224 225 226
	kfree(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
227
	struct kvm_vcpu *vcpu;
228

229 230 231 232 233 234 235 236 237
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_destroy(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
238 239
}

240 241 242 243
void kvm_arch_sync_events(struct kvm *kvm)
{
}

244 245
void kvm_arch_destroy_vm(struct kvm *kvm)
{
246
	kvm_free_vcpus(kvm);
247
	free_page((unsigned long)(kvm->arch.sca));
248
	debug_unregister(kvm->arch.dbf);
249
	gmap_free(kvm->arch.gmap);
250 251 252 253 254
}

/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
255
	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
256 257 258 259 260
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
261
	/* Nothing todo */
262 263 264 265 266 267 268 269 270
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	save_fp_regs(&vcpu->arch.host_fpregs);
	save_access_regs(vcpu->arch.host_acrs);
	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
	restore_fp_regs(&vcpu->arch.guest_fpregs);
	restore_access_regs(vcpu->arch.guest_acrs);
271
	gmap_enable(vcpu->arch.gmap);
272 273 274 275
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
276
	gmap_disable(vcpu->arch.gmap);
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	save_fp_regs(&vcpu->arch.guest_fpregs);
	save_access_regs(vcpu->arch.guest_acrs);
	restore_fp_regs(&vcpu->arch.host_fpregs);
	restore_access_regs(vcpu->arch.host_acrs);
}

static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
{
	/* this equals initial cpu reset in pop, but we don't switch to ESA */
	vcpu->arch.sie_block->gpsw.mask = 0UL;
	vcpu->arch.sie_block->gpsw.addr = 0UL;
	vcpu->arch.sie_block->prefix    = 0UL;
	vcpu->arch.sie_block->ihcpu     = 0xffff;
	vcpu->arch.sie_block->cputm     = 0UL;
	vcpu->arch.sie_block->ckc       = 0UL;
	vcpu->arch.sie_block->todpr     = 0;
	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
	vcpu->arch.guest_fpregs.fpc = 0;
	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
	vcpu->arch.sie_block->gbea = 1;
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
303
	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
304
	vcpu->arch.sie_block->ecb   = 6;
305
	vcpu->arch.sie_block->eca   = 0xC1002001U;
306
	vcpu->arch.sie_block->fac   = (int) (long) facilities;
307 308 309 310
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
311
	get_cpu_id(&vcpu->arch.cpu_id);
312
	vcpu->arch.cpu_id.version = 0xff;
313 314 315 316 317 318
	return 0;
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
				      unsigned int id)
{
319 320 321 322 323 324 325
	struct kvm_vcpu *vcpu;
	int rc = -EINVAL;

	if (id >= KVM_MAX_VCPUS)
		goto out;

	rc = -ENOMEM;
326

327
	vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
328
	if (!vcpu)
329
		goto out;
330

331 332
	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
					get_zeroed_page(GFP_KERNEL);
333 334 335 336 337 338

	if (!vcpu->arch.sie_block)
		goto out_free_cpu;

	vcpu->arch.sie_block->icpua = id;
	BUG_ON(!kvm->arch.sca);
339 340
	if (!kvm->arch.sca->cpu[id].sda)
		kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
341 342
	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
343
	set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
344

345 346 347
	spin_lock_init(&vcpu->arch.local_int.lock);
	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
348
	spin_lock(&kvm->arch.float_int.lock);
349 350
	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
	init_waitqueue_head(&vcpu->arch.local_int.wq);
351
	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
352
	spin_unlock(&kvm->arch.float_int.lock);
353

354 355
	rc = kvm_vcpu_init(vcpu, kvm, id);
	if (rc)
356
		goto out_free_sie_block;
357 358 359 360
	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
		 vcpu->arch.sie_block);

	return vcpu;
361 362
out_free_sie_block:
	free_page((unsigned long)(vcpu->arch.sie_block));
363 364
out_free_cpu:
	kfree(vcpu);
365
out:
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	return ERR_PTR(rc);
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
	/* kvm common code refers to this, but never calls it */
	BUG();
	return 0;
}

static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
	kvm_s390_vcpu_initial_reset(vcpu);
	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
C
Carsten Otte 已提交
399
	restore_access_regs(vcpu->arch.guest_acrs);
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
C
Carsten Otte 已提交
415
	restore_fp_regs(&vcpu->arch.guest_fpregs);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	return 0;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
	return 0;
}

static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{
	int rc = 0;

	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
		rc = -EBUSY;
432 433 434 435
	else {
		vcpu->run->psw_mask = psw.mask;
		vcpu->run->psw_addr = psw.addr;
	}
436 437 438 439 440 441 442 443 444
	return rc;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				  struct kvm_translation *tr)
{
	return -EINVAL; /* not implemented yet */
}

J
Jan Kiszka 已提交
445 446
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
447 448 449 450
{
	return -EINVAL; /* not implemented yet */
}

451 452 453 454 455 456 457 458 459 460 461 462
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
	return -EINVAL; /* not implemented yet */
}

463 464 465 466 467 468 469
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);

	if (need_resched())
		schedule();

470 471 472
	if (test_thread_flag(TIF_MCCK_PENDING))
		s390_handle_mcck();

C
Carsten Otte 已提交
473 474
	kvm_s390_deliver_pending_interrupts(vcpu);

475 476 477 478 479 480
	vcpu->arch.sie_block->icptcode = 0;
	local_irq_disable();
	kvm_guest_enter();
	local_irq_enable();
	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
		   atomic_read(&vcpu->arch.sie_block->cpuflags));
481 482 483 484
	if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	}
485 486 487 488 489 490 491 492 493 494 495
	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
		   vcpu->arch.sie_block->icptcode);
	local_irq_disable();
	kvm_guest_exit();
	local_irq_enable();

	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
496
	int rc;
497 498
	sigset_t sigsaved;

499
rerun_vcpu:
500 501 502 503 504
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);

505 506
	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);

507 508 509
	switch (kvm_run->exit_reason) {
	case KVM_EXIT_S390_SIEIC:
	case KVM_EXIT_UNKNOWN:
510
	case KVM_EXIT_INTR:
511 512 513 514 515 516
	case KVM_EXIT_S390_RESET:
		break;
	default:
		BUG();
	}

517 518 519
	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;

520
	might_fault();
521 522 523 524 525 526

	do {
		__vcpu_run(vcpu);
		rc = kvm_handle_sie_intercept(vcpu);
	} while (!signal_pending(current) && !rc);

527 528 529
	if (rc == SIE_INTERCEPT_RERUNVCPU)
		goto rerun_vcpu;

530 531
	if (signal_pending(current) && !rc) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
532
		rc = -EINTR;
533
	}
534

535
	if (rc == -EOPNOTSUPP) {
536 537 538 539 540 541 542 543 544 545 546 547 548
		/* intercept cannot be handled in-kernel, prepare kvm-run */
		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
		rc = 0;
	}

	if (rc == -EREMOTE) {
		/* intercept was handled, but userspace support is needed
		 * kvm_run has been prepared by the handler */
		rc = 0;
	}
549

550 551 552
	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;

553 554 555 556
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu->stat.exit_userspace++;
557
	return rc;
558 559
}

560
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
561 562 563 564 565 566 567 568 569 570 571 572 573 574
		       unsigned long n, int prefix)
{
	if (prefix)
		return copy_to_guest(vcpu, guestdest, from, n);
	else
		return copy_to_guest_absolute(vcpu, guestdest, from, n);
}

/*
 * store status at address
 * we use have two special cases:
 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 */
575
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
576
{
577
	unsigned char archmode = 1;
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	int prefix;

	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 0;
	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
			return -EFAULT;
		addr = SAVE_AREA_BASE;
		prefix = 1;
	} else
		prefix = 0;

593
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
594 595 596
			vcpu->arch.guest_fpregs.fprs, 128, prefix))
		return -EFAULT;

597
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
598 599 600
			vcpu->arch.guest_gprs, 128, prefix))
		return -EFAULT;

601
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
602 603 604
			&vcpu->arch.sie_block->gpsw, 16, prefix))
		return -EFAULT;

605
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
606 607 608 609
			&vcpu->arch.sie_block->prefix, 4, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
610
			addr + offsetof(struct save_area, fp_ctrl_reg),
611 612 613
			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
		return -EFAULT;

614
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
615 616 617
			&vcpu->arch.sie_block->todpr, 4, prefix))
		return -EFAULT;

618
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
619 620 621
			&vcpu->arch.sie_block->cputm, 8, prefix))
		return -EFAULT;

622
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
623 624 625
			&vcpu->arch.sie_block->ckc, 8, prefix))
		return -EFAULT;

626
	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
627 628 629 630
			&vcpu->arch.guest_acrs, 64, prefix))
		return -EFAULT;

	if (__guestcopy(vcpu,
631
			addr + offsetof(struct save_area, ctrl_regs),
632 633 634 635 636 637 638 639 640 641
			&vcpu->arch.sie_block->gcr, 128, prefix))
		return -EFAULT;
	return 0;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
642
	long r;
643

644 645
	switch (ioctl) {
	case KVM_S390_INTERRUPT: {
646 647
		struct kvm_s390_interrupt s390int;

648
		r = -EFAULT;
649
		if (copy_from_user(&s390int, argp, sizeof(s390int)))
650 651 652
			break;
		r = kvm_s390_inject_vcpu(vcpu, &s390int);
		break;
653
	}
654
	case KVM_S390_STORE_STATUS:
655 656
		r = kvm_s390_vcpu_store_status(vcpu, arg);
		break;
657 658 659
	case KVM_S390_SET_INITIAL_PSW: {
		psw_t psw;

660
		r = -EFAULT;
661
		if (copy_from_user(&psw, argp, sizeof(psw)))
662 663 664
			break;
		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
		break;
665 666
	}
	case KVM_S390_INITIAL_RESET:
667 668
		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
		break;
669
	default:
670
		r = -EINVAL;
671
	}
672
	return r;
673 674 675
}

/* Section: memory related */
676 677 678 679 680
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
				   struct kvm_memory_slot old,
				   struct kvm_userspace_memory_region *mem,
				   int user_alloc)
681 682 683 684 685 686 687 688
{
	/* A few sanity checks. We can have exactly one memory slot which has
	   to start at guest virtual zero and which has to be located at a
	   page boundary in userland and which has to end at a page boundary.
	   The memory in userland is ok to be fragmented into various different
	   vmas. It is okay to mmap() and munmap() stuff in this slot after
	   doing this call at any time */

689
	if (mem->slot)
690 691 692 693 694
		return -EINVAL;

	if (mem->guest_phys_addr)
		return -EINVAL;

695
	if (mem->userspace_addr & 0xffffful)
696 697
		return -EINVAL;

698
	if (mem->memory_size & 0xffffful)
699 700
		return -EINVAL;

701 702 703
	if (!user_alloc)
		return -EINVAL;

704 705 706 707 708 709 710 711
	return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc)
{
712
	int rc;
713

714 715 716 717

	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
		mem->guest_phys_addr, mem->memory_size);
	if (rc)
718
		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
719
	return;
720 721
}

722 723 724 725
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

726 727
static int __init kvm_s390_init(void)
{
728
	int ret;
729
	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
730 731 732 733 734
	if (ret)
		return ret;

	/*
	 * guests can ask for up to 255+1 double words, we need a full page
L
Lucas De Marchi 已提交
735
	 * to hold the maximum amount of facilities. On the other hand, we
736 737
	 * only set facilities that are known to work in KVM.
	 */
738
	facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
739 740 741 742
	if (!facilities) {
		kvm_exit();
		return -ENOMEM;
	}
743
	memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
744
	facilities[0] &= 0xff00fff3f47c0000ULL;
745
	facilities[1] &= 0x201c000000000000ULL;
746
	return 0;
747 748 749 750
}

static void __exit kvm_s390_exit(void)
{
751
	free_page((unsigned long) facilities);
752 753 754 755 756
	kvm_exit();
}

module_init(kvm_s390_init);
module_exit(kvm_s390_exit);