arm.c 31.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

19
#include <linux/cpu_pm.h>
20 21 22
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
23
#include <linux/list.h>
24 25 26 27 28
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
29
#include <linux/kvm.h>
30
#include <trace/events/kvm.h>
31
#include <kvm/arm_pmu.h>
32 33 34 35 36 37 38

#define CREATE_TRACE_POINTS
#include "trace.h"

#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
39
#include <asm/tlbflush.h>
40
#include <asm/cacheflush.h>
41 42 43 44
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
45
#include <asm/kvm_emulate.h>
46
#include <asm/kvm_coproc.h>
47
#include <asm/kvm_psci.h>
48
#include <asm/sections.h>
49 50 51 52 53

#ifdef REQUIRES_VIRT
__asm__(".arch_extension	virt");
#endif

54
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
55
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
56 57
static unsigned long hyp_default_vectors;

58 59 60
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);

61 62
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
63 64
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
65
static DEFINE_SPINLOCK(kvm_vmid_lock);
66

67 68
static bool vgic_present;

69 70
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);

71 72 73
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
	BUG_ON(preemptible());
74
	__this_cpu_write(kvm_arm_running_vcpu, vcpu);
75 76 77 78 79 80 81 82 83
}

/**
 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
 * Must be called from non-preemptible context
 */
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
	BUG_ON(preemptible());
84
	return __this_cpu_read(kvm_arm_running_vcpu);
85 86 87 88 89
}

/**
 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
 */
90
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
91 92 93 94
{
	return &kvm_arm_running_vcpu;
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_check_processor_compat(void *rtn)
{
	*(int *)rtn = 0;
}


111 112 113 114
/**
 * kvm_arch_init_vm - initializes a VM data structure
 * @kvm:	pointer to the KVM struct
 */
115 116
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
117
	int ret, cpu;
118

119 120 121
	if (type)
		return -EINVAL;

122 123 124 125 126 127 128
	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
	if (!kvm->arch.last_vcpu_ran)
		return -ENOMEM;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;

129 130 131 132
	ret = kvm_alloc_stage2_pgd(kvm);
	if (ret)
		goto out_fail_alloc;

133
	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
134 135 136
	if (ret)
		goto out_free_stage2_pgd;

137
	kvm_vgic_early_init(kvm);
138 139
	kvm_timer_init(kvm);

140 141 142
	/* Mark the initial VMID generation invalid */
	kvm->arch.vmid_gen = 0;

143
	/* The maximum number of VCPUs is limited by the host's GIC model */
144 145
	kvm->arch.max_vcpus = vgic_present ?
				kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
146

147 148 149 150
	return ret;
out_free_stage2_pgd:
	kvm_free_stage2_pgd(kvm);
out_fail_alloc:
151 152
	free_percpu(kvm->arch.last_vcpu_ran);
	kvm->arch.last_vcpu_ran = NULL;
153
	return ret;
154 155
}

156 157 158 159 160 161 162 163 164 165
bool kvm_arch_has_vcpu_debugfs(void)
{
	return false;
}

int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
	return 0;
}

166 167 168 169 170 171
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
	return VM_FAULT_SIGBUS;
}


172 173 174 175
/**
 * kvm_arch_destroy_vm - destroy the VM data structure
 * @kvm:	pointer to the KVM struct
 */
176 177 178 179
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	int i;

180 181 182
	free_percpu(kvm->arch.last_vcpu_ran);
	kvm->arch.last_vcpu_ran = NULL;

183 184 185 186 187 188
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		if (kvm->vcpus[i]) {
			kvm_arch_vcpu_free(kvm->vcpus[i]);
			kvm->vcpus[i] = NULL;
		}
	}
189 190

	kvm_vgic_destroy(kvm);
191 192
}

193
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
194 195 196
{
	int r;
	switch (ext) {
197
	case KVM_CAP_IRQCHIP:
198 199
		r = vgic_present;
		break;
200
	case KVM_CAP_IOEVENTFD:
201
	case KVM_CAP_DEVICE_CTRL:
202 203 204 205
	case KVM_CAP_USER_MEMORY:
	case KVM_CAP_SYNC_MMU:
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
	case KVM_CAP_ONE_REG:
206
	case KVM_CAP_ARM_PSCI:
207
	case KVM_CAP_ARM_PSCI_0_2:
208
	case KVM_CAP_READONLY_MEM:
209
	case KVM_CAP_MP_STATE:
210 211 212 213 214
		r = 1;
		break;
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
215 216
	case KVM_CAP_ARM_SET_DEVICE_ADDR:
		r = 1;
217
		break;
218 219 220 221 222 223
	case KVM_CAP_NR_VCPUS:
		r = num_online_cpus();
		break;
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
V
Vladimir Murzin 已提交
224 225 226 227 228 229
	case KVM_CAP_MSI_DEVID:
		if (!kvm)
			r = -EINVAL;
		else
			r = kvm->arch.vgic.msis_require_devid;
		break;
230
	default:
231
		r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
		break;
	}
	return r;
}

long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	return -EINVAL;
}


struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
	int err;
	struct kvm_vcpu *vcpu;

249 250 251 252 253
	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
		err = -EBUSY;
		goto out;
	}

254 255 256 257 258
	if (id >= kvm->arch.max_vcpus) {
		err = -EINVAL;
		goto out;
	}

259 260 261 262 263 264 265 266 267 268
	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
	if (!vcpu) {
		err = -ENOMEM;
		goto out;
	}

	err = kvm_vcpu_init(vcpu, kvm, id);
	if (err)
		goto free_vcpu;

269
	err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
270 271 272
	if (err)
		goto vcpu_uninit;

273
	return vcpu;
274 275
vcpu_uninit:
	kvm_vcpu_uninit(vcpu);
276 277 278 279 280 281
free_vcpu:
	kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
	return ERR_PTR(err);
}

282
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
283
{
284
	kvm_vgic_vcpu_early_init(vcpu);
285 286 287 288
}

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
289
	kvm_mmu_free_memory_caches(vcpu);
290
	kvm_timer_vcpu_terminate(vcpu);
291
	kvm_vgic_vcpu_destroy(vcpu);
292
	kvm_pmu_vcpu_destroy(vcpu);
293
	kvm_vcpu_uninit(vcpu);
294
	kmem_cache_free(kvm_vcpu_cache, vcpu);
295 296 297 298 299 300 301 302 303
}

void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_arch_vcpu_free(vcpu);
}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
304
	return kvm_timer_should_fire(vcpu);
305 306
}

307 308 309 310 311 312 313 314 315 316
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	kvm_timer_schedule(vcpu);
}

void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	kvm_timer_unschedule(vcpu);
}

317 318
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
319 320
	/* Force users to call KVM_ARM_VCPU_INIT */
	vcpu->arch.target = -1;
321
	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
322

323 324 325
	/* Set up the timer */
	kvm_timer_vcpu_init(vcpu);

326 327
	kvm_arm_reset_debug_ptr(vcpu);

328 329 330 331 332
	return 0;
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
333 334 335 336 337 338 339 340 341 342 343 344 345
	int *last_ran;

	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);

	/*
	 * We might get preempted before the vCPU actually runs, but
	 * over-invalidation doesn't affect correctness.
	 */
	if (*last_ran != vcpu->vcpu_id) {
		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
		*last_ran = vcpu->vcpu_id;
	}

346
	vcpu->cpu = cpu;
347
	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
348

349
	kvm_arm_set_running_vcpu(vcpu);
350 351 352 353
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
354 355 356 357 358 359 360
	/*
	 * The arch-generic KVM code expects the cpu field of a vcpu to be -1
	 * if the vcpu is no longer assigned to a cpu.  This is used for the
	 * optimized make_all_cpus_request path.
	 */
	vcpu->cpu = -1;

361
	kvm_arm_set_running_vcpu(NULL);
362
	kvm_timer_vcpu_put(vcpu);
363 364 365 366 367
}

int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
368
	if (vcpu->arch.power_off)
369 370 371 372 373
		mp_state->mp_state = KVM_MP_STATE_STOPPED;
	else
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;

	return 0;
374 375 376 377 378
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
379 380
	switch (mp_state->mp_state) {
	case KVM_MP_STATE_RUNNABLE:
381
		vcpu->arch.power_off = false;
382 383
		break;
	case KVM_MP_STATE_STOPPED:
384
		vcpu->arch.power_off = true;
385 386 387 388 389 390
		break;
	default:
		return -EINVAL;
	}

	return 0;
391 392
}

393 394 395 396 397 398 399
/**
 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
 * @v:		The VCPU pointer
 *
 * If the guest CPU is not waiting for interrupts or an interrupt line is
 * asserted, the CPU is by definition runnable.
 */
400 401
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
402
	return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
403
		&& !v->arch.power_off && !v->arch.pause);
404 405
}

406 407 408 409 410 411 412
/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
}

void force_vm_exit(const cpumask_t *mask)
{
413
	preempt_disable();
414
	smp_call_function_many(mask, exit_vm_noop, NULL, true);
415
	preempt_enable();
416 417 418 419
}

/**
 * need_new_vmid_gen - check that the VMID is still valid
A
Andrea Gelmini 已提交
420
 * @kvm: The VM's VMID to check
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
 *
 * return true if there is a new generation of VMIDs being used
 *
 * The hardware supports only 256 values with the value zero reserved for the
 * host, so we check if an assigned value belongs to a previous generation,
 * which which requires us to assign a new value. If we're the first to use a
 * VMID for the new generation, we must flush necessary caches and TLBs on all
 * CPUs.
 */
static bool need_new_vmid_gen(struct kvm *kvm)
{
	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
}

/**
 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
 * @kvm	The guest that we are about to run
 *
 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
 * caches and TLBs.
 */
static void update_vttbr(struct kvm *kvm)
{
	phys_addr_t pgd_phys;
	u64 vmid;

	if (!need_new_vmid_gen(kvm))
		return;

	spin_lock(&kvm_vmid_lock);

	/*
	 * We need to re-check the vmid_gen here to ensure that if another vcpu
	 * already allocated a valid vmid for this vm, then this vcpu should
	 * use the same vmid.
	 */
	if (!need_new_vmid_gen(kvm)) {
		spin_unlock(&kvm_vmid_lock);
		return;
	}

	/* First user of a new VMID generation? */
	if (unlikely(kvm_next_vmid == 0)) {
		atomic64_inc(&kvm_vmid_gen);
		kvm_next_vmid = 1;

		/*
		 * On SMP we know no other CPUs can use this CPU's or each
		 * other's VMID after force_vm_exit returns since the
		 * kvm_vmid_lock blocks them from reentry to the guest.
		 */
		force_vm_exit(cpu_all_mask);
		/*
		 * Now broadcast TLB + ICACHE invalidation over the inner
		 * shareable domain to make sure all data structures are
		 * clean.
		 */
		kvm_call_hyp(__kvm_flush_vm_context);
	}

	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
	kvm->arch.vmid = kvm_next_vmid;
	kvm_next_vmid++;
485
	kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
486 487

	/* update vttbr to be used with the new vmid */
488
	pgd_phys = virt_to_phys(kvm->arch.pgd);
489
	BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
490
	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
491
	kvm->arch.vttbr = pgd_phys | vmid;
492 493 494 495 496 497

	spin_unlock(&kvm_vmid_lock);
}

static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
498
	struct kvm *kvm = vcpu->kvm;
499
	int ret = 0;
500

501 502 503 504
	if (likely(vcpu->arch.has_run_once))
		return 0;

	vcpu->arch.has_run_once = true;
505

506
	/*
507 508
	 * Map the VGIC hardware resources before running a vcpu the first
	 * time on this VM.
509
	 */
510
	if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
511
		ret = kvm_vgic_map_resources(kvm);
512 513 514 515
		if (ret)
			return ret;
	}

516 517 518 519 520 521
	/*
	 * Enable the arch timers only if we have an in-kernel VGIC
	 * and it has been properly initialized, since we cannot handle
	 * interrupts from the virtual timer with a userspace gic.
	 */
	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
522
		ret = kvm_timer_enable(vcpu);
523

524
	return ret;
525 526
}

527 528 529 530 531
bool kvm_arch_intc_initialized(struct kvm *kvm)
{
	return vgic_initialized(kvm);
}

532
void kvm_arm_halt_guest(struct kvm *kvm)
533 534 535 536 537 538
{
	int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm)
		vcpu->arch.pause = true;
539
	kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
540 541
}

542 543 544 545 546 547 548
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
{
	vcpu->arch.pause = true;
	kvm_vcpu_kick(vcpu);
}

void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
549 550 551 552 553 554 555 556
{
	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);

	vcpu->arch.pause = false;
	swake_up(wq);
}

void kvm_arm_resume_guest(struct kvm *kvm)
557 558 559 560
{
	int i;
	struct kvm_vcpu *vcpu;

561 562
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arm_resume_vcpu(vcpu);
563 564
}

565
static void vcpu_sleep(struct kvm_vcpu *vcpu)
566
{
567
	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
568

569
	swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
570
				       (!vcpu->arch.pause)));
571 572
}

573 574 575 576 577
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.target >= 0;
}

578 579 580 581 582 583 584 585 586 587 588
/**
 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 * @vcpu:	The VCPU pointer
 * @run:	The kvm_run structure pointer used for userspace state exchange
 *
 * This function is called through the VCPU_RUN ioctl called from user space. It
 * will execute VM code in a loop until the time slice for the process is used
 * or some emulation is needed from user space in which case the function will
 * return with return value 0 and with the kvm_run structure filled in with the
 * required data for the requested emulation.
 */
589 590
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
591 592 593
	int ret;
	sigset_t sigsaved;

594
	if (unlikely(!kvm_vcpu_initialized(vcpu)))
595 596 597 598 599 600
		return -ENOEXEC;

	ret = kvm_vcpu_first_run_init(vcpu);
	if (ret)
		return ret;

C
Christoffer Dall 已提交
601 602 603 604 605 606
	if (run->exit_reason == KVM_EXIT_MMIO) {
		ret = kvm_handle_mmio_return(vcpu, vcpu->run);
		if (ret)
			return ret;
	}

607 608 609 610 611 612 613 614 615 616 617 618 619
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	ret = 1;
	run->exit_reason = KVM_EXIT_UNKNOWN;
	while (ret > 0) {
		/*
		 * Check conditions before entering the guest
		 */
		cond_resched();

		update_vttbr(vcpu->kvm);

620
		if (vcpu->arch.power_off || vcpu->arch.pause)
621
			vcpu_sleep(vcpu);
622

623 624 625 626 627
		/*
		 * Preparing the interrupts to be injected also
		 * involves poking the GIC, which must be done in a
		 * non-preemptible context.
		 */
628
		preempt_disable();
629
		kvm_pmu_flush_hwstate(vcpu);
630
		kvm_timer_flush_hwstate(vcpu);
631 632
		kvm_vgic_flush_hwstate(vcpu);

633 634 635 636 637 638 639 640 641 642
		local_irq_disable();

		/*
		 * Re-check atomic conditions
		 */
		if (signal_pending(current)) {
			ret = -EINTR;
			run->exit_reason = KVM_EXIT_INTR;
		}

643
		if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
644
			vcpu->arch.power_off || vcpu->arch.pause) {
645
			local_irq_enable();
646
			kvm_pmu_sync_hwstate(vcpu);
647
			kvm_timer_sync_hwstate(vcpu);
648
			kvm_vgic_sync_hwstate(vcpu);
649
			preempt_enable();
650 651 652
			continue;
		}

653 654
		kvm_arm_setup_debug(vcpu);

655 656 657 658
		/**************************************************************
		 * Enter the guest
		 */
		trace_kvm_entry(*vcpu_pc(vcpu));
659
		guest_enter_irqoff();
660 661 662 663 664
		vcpu->mode = IN_GUEST_MODE;

		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);

		vcpu->mode = OUTSIDE_GUEST_MODE;
665
		vcpu->stat.exits++;
666 667 668 669
		/*
		 * Back from guest
		 *************************************************************/

670 671
		kvm_arm_clear_debug(vcpu);

672 673 674 675 676 677 678 679 680 681 682 683 684
		/*
		 * We may have taken a host interrupt in HYP mode (ie
		 * while executing the guest). This interrupt is still
		 * pending, as we haven't serviced it yet!
		 *
		 * We're now back in SVC mode, with interrupts
		 * disabled.  Enabling the interrupts now will have
		 * the effect of taking the interrupt again, in SVC
		 * mode this time.
		 */
		local_irq_enable();

		/*
685
		 * We do local_irq_enable() before calling guest_exit() so
686 687
		 * that if a timer interrupt hits while running the guest we
		 * account that tick as being spent in the guest.  We enable
688
		 * preemption after calling guest_exit() so that if we get
689 690 691
		 * preempted we make sure ticks after that is not counted as
		 * guest time.
		 */
692
		guest_exit();
693
		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
694

695
		/*
696 697
		 * We must sync the PMU and timer state before the vgic state so
		 * that the vgic can properly sample the updated state of the
698 699
		 * interrupt line.
		 */
700
		kvm_pmu_sync_hwstate(vcpu);
701 702
		kvm_timer_sync_hwstate(vcpu);

703
		kvm_vgic_sync_hwstate(vcpu);
704 705 706

		preempt_enable();

707 708 709 710 711 712
		ret = handle_exit(vcpu, run, ret);
	}

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
	return ret;
713 714
}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
{
	int bit_index;
	bool set;
	unsigned long *ptr;

	if (number == KVM_ARM_IRQ_CPU_IRQ)
		bit_index = __ffs(HCR_VI);
	else /* KVM_ARM_IRQ_CPU_FIQ */
		bit_index = __ffs(HCR_VF);

	ptr = (unsigned long *)&vcpu->arch.irq_lines;
	if (level)
		set = test_and_set_bit(bit_index, ptr);
	else
		set = test_and_clear_bit(bit_index, ptr);

	/*
	 * If we didn't change anything, no need to wake up or kick other CPUs
	 */
	if (set == level)
		return 0;

	/*
	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
	 * trigger a world-switch round on the running physical CPU to set the
	 * virtual IRQ/FIQ fields in the HCR appropriately.
	 */
	kvm_vcpu_kick(vcpu);

	return 0;
}

748 749
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
			  bool line_status)
750 751 752 753 754 755 756 757 758 759 760 761 762
{
	u32 irq = irq_level->irq;
	unsigned int irq_type, vcpu_idx, irq_num;
	int nrcpus = atomic_read(&kvm->online_vcpus);
	struct kvm_vcpu *vcpu = NULL;
	bool level = irq_level->level;

	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;

	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);

763 764 765 766
	switch (irq_type) {
	case KVM_ARM_IRQ_TYPE_CPU:
		if (irqchip_in_kernel(kvm))
			return -ENXIO;
767

768 769
		if (vcpu_idx >= nrcpus)
			return -EINVAL;
770

771 772 773
		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
		if (!vcpu)
			return -EINVAL;
774

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
			return -EINVAL;

		return vcpu_interrupt_line(vcpu, irq_num, level);
	case KVM_ARM_IRQ_TYPE_PPI:
		if (!irqchip_in_kernel(kvm))
			return -ENXIO;

		if (vcpu_idx >= nrcpus)
			return -EINVAL;

		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
		if (!vcpu)
			return -EINVAL;

		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
			return -EINVAL;
792

793 794 795 796 797
		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
	case KVM_ARM_IRQ_TYPE_SPI:
		if (!irqchip_in_kernel(kvm))
			return -ENXIO;

798
		if (irq_num < VGIC_NR_PRIVATE_IRQS)
799 800 801 802 803 804
			return -EINVAL;

		return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
	}

	return -EINVAL;
805 806
}

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
			       const struct kvm_vcpu_init *init)
{
	unsigned int i;
	int phys_target = kvm_target_cpu();

	if (init->target != phys_target)
		return -EINVAL;

	/*
	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
	 * use the same target.
	 */
	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
		return -EINVAL;

	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
	for (i = 0; i < sizeof(init->features) * 8; i++) {
		bool set = (init->features[i / 32] & (1 << (i % 32)));

		if (set && i >= KVM_VCPU_MAX_FEATURES)
			return -ENOENT;

		/*
		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
		 * use the same feature set.
		 */
		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
		    test_bit(i, vcpu->arch.features) != set)
			return -EINVAL;

		if (set)
			set_bit(i, vcpu->arch.features);
	}

	vcpu->arch.target = phys_target;

	/* Now we know what it is, we can reset it. */
	return kvm_reset_vcpu(vcpu);
}


849 850 851 852 853 854 855 856 857
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
					 struct kvm_vcpu_init *init)
{
	int ret;

	ret = kvm_vcpu_set_target(vcpu, init);
	if (ret)
		return ret;

858 859 860 861 862 863 864
	/*
	 * Ensure a rebooted VM will fault in RAM pages and detect if the
	 * guest MMU is turned off and flush the caches as needed.
	 */
	if (vcpu->arch.has_run_once)
		stage2_unmap_vm(vcpu->kvm);

865 866
	vcpu_reset_hcr(vcpu);

867
	/*
868
	 * Handle the "start in power-off" case.
869
	 */
870
	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
871
		vcpu->arch.power_off = true;
872
	else
873
		vcpu->arch.power_off = false;
874 875 876 877

	return 0;
}

878 879 880 881 882 883 884
static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
885
		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
886 887 888 889 890 891 892 893 894 895 896 897 898
		break;
	}

	return ret;
}

static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
899
		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
900 901 902 903 904 905 906 907 908 909 910 911 912
		break;
	}

	return ret;
}

static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
913
		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
914 915 916 917 918 919
		break;
	}

	return ret;
}

920 921 922 923 924
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
925
	struct kvm_device_attr attr;
926 927 928 929 930 931 932 933

	switch (ioctl) {
	case KVM_ARM_VCPU_INIT: {
		struct kvm_vcpu_init init;

		if (copy_from_user(&init, argp, sizeof(init)))
			return -EFAULT;

934
		return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
935 936 937 938
	}
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
939 940 941 942

		if (unlikely(!kvm_vcpu_initialized(vcpu)))
			return -ENOEXEC;

943 944 945 946 947 948 949 950 951 952 953 954
		if (copy_from_user(&reg, argp, sizeof(reg)))
			return -EFAULT;
		if (ioctl == KVM_SET_ONE_REG)
			return kvm_arm_set_reg(vcpu, &reg);
		else
			return kvm_arm_get_reg(vcpu, &reg);
	}
	case KVM_GET_REG_LIST: {
		struct kvm_reg_list __user *user_list = argp;
		struct kvm_reg_list reg_list;
		unsigned n;

955 956 957
		if (unlikely(!kvm_vcpu_initialized(vcpu)))
			return -ENOEXEC;

958 959 960 961 962 963 964 965 966 967
		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
			return -EFAULT;
		n = reg_list.n;
		reg_list.n = kvm_arm_num_regs(vcpu);
		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
			return -EFAULT;
		if (n < reg_list.n)
			return -E2BIG;
		return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
	}
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
	case KVM_SET_DEVICE_ATTR: {
		if (copy_from_user(&attr, argp, sizeof(attr)))
			return -EFAULT;
		return kvm_arm_vcpu_set_attr(vcpu, &attr);
	}
	case KVM_GET_DEVICE_ATTR: {
		if (copy_from_user(&attr, argp, sizeof(attr)))
			return -EFAULT;
		return kvm_arm_vcpu_get_attr(vcpu, &attr);
	}
	case KVM_HAS_DEVICE_ATTR: {
		if (copy_from_user(&attr, argp, sizeof(attr)))
			return -EFAULT;
		return kvm_arm_vcpu_has_attr(vcpu, &attr);
	}
983 984 985 986 987
	default:
		return -EINVAL;
	}
}

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
/**
 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 * @kvm: kvm instance
 * @log: slot id and address to which we copy the log
 *
 * Steps 1-4 below provide general overview of dirty page logging. See
 * kvm_get_dirty_log_protect() function description for additional details.
 *
 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 * always flush the TLB (step 4) even if previous step failed  and the dirty
 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 * writes will be marked dirty for next log read.
 *
 *   1. Take a snapshot of the bit and clear it if needed.
 *   2. Write protect the corresponding page.
 *   3. Copy the snapshot to the userspace.
 *   4. Flush TLB's if needed.
 */
1007 1008
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	bool is_dirty = false;
	int r;

	mutex_lock(&kvm->slots_lock);

	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);

	if (is_dirty)
		kvm_flush_remote_tlbs(kvm);

	mutex_unlock(&kvm->slots_lock);
	return r;
1021 1022
}

1023 1024 1025
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
					struct kvm_arm_device_addr *dev_addr)
{
1026 1027 1028 1029 1030 1031 1032 1033 1034
	unsigned long dev_id, type;

	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
		KVM_ARM_DEVICE_ID_SHIFT;
	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
		KVM_ARM_DEVICE_TYPE_SHIFT;

	switch (dev_id) {
	case KVM_ARM_DEVICE_VGIC_V2:
1035 1036
		if (!vgic_present)
			return -ENXIO;
1037
		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1038 1039 1040
	default:
		return -ENODEV;
	}
1041 1042
}

1043 1044 1045
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
1046 1047 1048 1049
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;

	switch (ioctl) {
1050
	case KVM_CREATE_IRQCHIP: {
1051
		int ret;
1052 1053
		if (!vgic_present)
			return -ENXIO;
1054 1055 1056 1057
		mutex_lock(&kvm->lock);
		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
		mutex_unlock(&kvm->lock);
		return ret;
1058
	}
1059 1060 1061 1062 1063 1064 1065
	case KVM_ARM_SET_DEVICE_ADDR: {
		struct kvm_arm_device_addr dev_addr;

		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
			return -EFAULT;
		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
	}
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
	case KVM_ARM_PREFERRED_TARGET: {
		int err;
		struct kvm_vcpu_init init;

		err = kvm_vcpu_preferred_target(&init);
		if (err)
			return err;

		if (copy_to_user(argp, &init, sizeof(init)))
			return -EFAULT;

		return 0;
	}
1079 1080 1081
	default:
		return -EINVAL;
	}
1082 1083
}

1084
static void cpu_init_hyp_mode(void *dummy)
1085
{
1086
	phys_addr_t pgd_ptr;
1087 1088 1089 1090 1091
	unsigned long hyp_stack_ptr;
	unsigned long stack_page;
	unsigned long vector_ptr;

	/* Switch from the HYP stub to our own HYP init vector */
1092
	__hyp_set_vectors(kvm_get_idmap_vector());
1093

1094
	pgd_ptr = kvm_mmu_get_httbr();
1095
	stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1096
	hyp_stack_ptr = stack_page + PAGE_SIZE;
1097
	vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
1098

M
Marc Zyngier 已提交
1099
	__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1100
	__cpu_init_stage2();
1101 1102

	kvm_arm_init_debug();
1103 1104
}

1105 1106 1107 1108
static void cpu_hyp_reinit(void)
{
	if (is_kernel_in_hyp_mode()) {
		/*
1109
		 * __cpu_init_stage2() is safe to call even if the PM
1110 1111
		 * event was cancelled before the CPU was reset.
		 */
1112
		__cpu_init_stage2();
1113 1114 1115 1116 1117 1118
	} else {
		if (__hyp_get_vectors() == hyp_default_vectors)
			cpu_init_hyp_mode(NULL);
	}
}

1119
static void cpu_hyp_reset(void)
1120
{
M
Marc Zyngier 已提交
1121
	if (!is_kernel_in_hyp_mode())
M
Marc Zyngier 已提交
1122 1123
		__cpu_reset_hyp_mode(hyp_default_vectors,
				     kvm_get_idmap_start());
1124 1125 1126 1127 1128
}

static void _kvm_arch_hardware_enable(void *discard)
{
	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1129
		cpu_hyp_reinit();
1130
		__this_cpu_write(kvm_arm_hardware_enabled, 1);
1131
	}
1132
}
1133

1134 1135 1136 1137
int kvm_arch_hardware_enable(void)
{
	_kvm_arch_hardware_enable(NULL);
	return 0;
1138 1139
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static void _kvm_arch_hardware_disable(void *discard)
{
	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
		cpu_hyp_reset();
		__this_cpu_write(kvm_arm_hardware_enabled, 0);
	}
}

void kvm_arch_hardware_disable(void)
{
	_kvm_arch_hardware_disable(NULL);
}
1152

1153 1154 1155 1156 1157
#ifdef CONFIG_CPU_PM
static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
				    unsigned long cmd,
				    void *v)
{
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	/*
	 * kvm_arm_hardware_enabled is left with its old value over
	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
	 * re-enable hyp.
	 */
	switch (cmd) {
	case CPU_PM_ENTER:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
			/*
			 * don't update kvm_arm_hardware_enabled here
			 * so that the hardware will be re-enabled
			 * when we resume. See below.
			 */
			cpu_hyp_reset();

1173
		return NOTIFY_OK;
1174 1175 1176 1177
	case CPU_PM_EXIT:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
			/* The hardware was enabled before suspend. */
			cpu_hyp_reinit();
1178

1179 1180 1181 1182 1183
		return NOTIFY_OK;

	default:
		return NOTIFY_DONE;
	}
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
}

static struct notifier_block hyp_init_cpu_pm_nb = {
	.notifier_call = hyp_init_cpu_pm_notifier,
};

static void __init hyp_cpu_pm_init(void)
{
	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
1194 1195 1196 1197
static void __init hyp_cpu_pm_exit(void)
{
	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
}
1198 1199 1200 1201
#else
static inline void hyp_cpu_pm_init(void)
{
}
1202 1203 1204
static inline void hyp_cpu_pm_exit(void)
{
}
1205 1206
#endif

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
static void teardown_common_resources(void)
{
	free_percpu(kvm_host_cpu_state);
}

static int init_common_resources(void)
{
	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
	if (!kvm_host_cpu_state) {
		kvm_err("Cannot allocate host CPU state\n");
		return -ENOMEM;
	}

1220 1221 1222 1223
	/* set size of VMID supported by CPU */
	kvm_vmid_bits = kvm_get_vmid_bits();
	kvm_info("%d-bit VMID\n", kvm_vmid_bits);

1224 1225 1226 1227 1228
	return 0;
}

static int init_subsystems(void)
{
1229
	int err = 0;
1230

1231
	/*
1232
	 * Enable hardware so that subsystem initialisation can access EL2.
1233
	 */
1234
	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1235 1236 1237 1238 1239 1240

	/*
	 * Register CPU lower-power notifier
	 */
	hyp_cpu_pm_init();

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	/*
	 * Init HYP view of VGIC
	 */
	err = kvm_vgic_hyp_init();
	switch (err) {
	case 0:
		vgic_present = true;
		break;
	case -ENODEV:
	case -ENXIO:
		vgic_present = false;
1252
		err = 0;
1253 1254
		break;
	default:
1255
		goto out;
1256 1257 1258 1259 1260 1261 1262
	}

	/*
	 * Init HYP architected timer support
	 */
	err = kvm_timer_hyp_init();
	if (err)
1263
		goto out;
1264 1265 1266 1267

	kvm_perf_init();
	kvm_coproc_table_init();

1268 1269 1270 1271
out:
	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);

	return err;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
}

static void teardown_hyp_mode(void)
{
	int cpu;

	if (is_kernel_in_hyp_mode())
		return;

	free_hyp_pgds();
	for_each_possible_cpu(cpu)
		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1284
	hyp_cpu_pm_exit();
1285 1286 1287 1288 1289 1290 1291 1292
}

static int init_vhe_mode(void)
{
	kvm_info("VHE mode initialized successfully\n");
	return 0;
}

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
/**
 * Inits Hyp-mode on all online CPUs
 */
static int init_hyp_mode(void)
{
	int cpu;
	int err = 0;

	/*
	 * Allocate Hyp PGD and setup Hyp identity mapping
	 */
	err = kvm_mmu_init();
	if (err)
		goto out_err;

	/*
	 * It is probably enough to obtain the default on one
	 * CPU. It's unlikely to be different on the others.
	 */
	hyp_default_vectors = __hyp_get_vectors();

	/*
	 * Allocate stack pages for Hypervisor-mode
	 */
	for_each_possible_cpu(cpu) {
		unsigned long stack_page;

		stack_page = __get_free_page(GFP_KERNEL);
		if (!stack_page) {
			err = -ENOMEM;
1323
			goto out_err;
1324 1325 1326 1327 1328 1329 1330 1331
		}

		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
	}

	/*
	 * Map the Hyp-code called directly from the host
	 */
1332
	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1333
				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1334 1335
	if (err) {
		kvm_err("Cannot map world-switch code\n");
1336
		goto out_err;
1337 1338
	}

1339
	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1340
				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1341 1342
	if (err) {
		kvm_err("Cannot map rodata section\n");
M
Marc Zyngier 已提交
1343 1344 1345 1346 1347 1348 1349
		goto out_err;
	}

	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
	if (err) {
		kvm_err("Cannot map bss section\n");
1350
		goto out_err;
1351 1352
	}

1353 1354 1355 1356 1357
	/*
	 * Map the Hyp stack pages
	 */
	for_each_possible_cpu(cpu) {
		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1358 1359
		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
					  PAGE_HYP);
1360 1361 1362

		if (err) {
			kvm_err("Cannot map hyp stack\n");
1363
			goto out_err;
1364 1365 1366 1367
		}
	}

	for_each_possible_cpu(cpu) {
1368
		kvm_cpu_context_t *cpu_ctxt;
1369

1370
		cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
1371
		err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
1372 1373

		if (err) {
1374
			kvm_err("Cannot map host CPU state: %d\n", err);
1375
			goto out_err;
1376 1377 1378 1379
		}
	}

	kvm_info("Hyp mode initialized successfully\n");
1380

1381
	return 0;
1382

1383
out_err:
1384
	teardown_hyp_mode();
1385 1386 1387 1388
	kvm_err("error initializing Hyp mode: %d\n", err);
	return err;
}

1389 1390 1391 1392 1393
static void check_kvm_target_cpu(void *ret)
{
	*(int *)ret = kvm_target_cpu();
}

1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
	struct kvm_vcpu *vcpu;
	int i;

	mpidr &= MPIDR_HWID_BITMASK;
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
			return vcpu;
	}
	return NULL;
}

1407 1408 1409
/**
 * Initialize Hyp-mode and memory mappings on all CPUs.
 */
1410 1411
int kvm_arch_init(void *opaque)
{
1412
	int err;
1413
	int ret, cpu;
1414 1415 1416 1417 1418 1419

	if (!is_hyp_mode_available()) {
		kvm_err("HYP mode not available\n");
		return -ENODEV;
	}

1420 1421 1422 1423 1424 1425
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
		if (ret < 0) {
			kvm_err("Error, CPU %d not supported!\n", cpu);
			return -ENODEV;
		}
1426 1427
	}

1428
	err = init_common_resources();
1429
	if (err)
1430
		return err;
1431

1432 1433 1434 1435 1436
	if (is_kernel_in_hyp_mode())
		err = init_vhe_mode();
	else
		err = init_hyp_mode();
	if (err)
1437
		goto out_err;
1438

1439 1440 1441
	err = init_subsystems();
	if (err)
		goto out_hyp;
1442

1443
	return 0;
1444 1445 1446

out_hyp:
	teardown_hyp_mode();
1447
out_err:
1448
	teardown_common_resources();
1449
	return err;
1450 1451 1452 1453 1454
}

/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
1455
	kvm_perf_teardown();
1456 1457 1458 1459 1460 1461 1462 1463 1464
}

static int arm_init(void)
{
	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
	return rc;
}

module_init(arm_init);