arm.c 39.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 */

7
#include <linux/bug.h>
8
#include <linux/cpu_pm.h>
9 10 11
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
12
#include <linux/list.h>
13 14 15 16 17
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
18
#include <linux/kvm.h>
19 20
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
21
#include <linux/sched/stat.h>
22 23 24
#include <trace/events/kvm.h>

#define CREATE_TRACE_POINTS
25
#include "trace_arm.h"
26

27
#include <linux/uaccess.h>
28 29
#include <asm/ptrace.h>
#include <asm/mman.h>
30
#include <asm/tlbflush.h>
31
#include <asm/cacheflush.h>
32
#include <asm/cpufeature.h>
33 34 35 36
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
37
#include <asm/kvm_emulate.h>
38
#include <asm/kvm_coproc.h>
39
#include <asm/sections.h>
40

41 42 43 44
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>

45 46 47 48
#ifdef REQUIRES_VIRT
__asm__(".arch_extension	virt");
#endif

49 50
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);

51
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
52
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
53

54 55
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
56
static u32 kvm_next_vmid;
57
static DEFINE_SPINLOCK(kvm_vmid_lock);
58

59 60
static bool vgic_present;

61
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
62 63
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

64 65 66 67 68
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}

69
int kvm_arch_hardware_setup(void *opaque)
70 71 72 73
{
	return 0;
}

74
int kvm_arch_check_processor_compat(void *opaque)
75
{
76
	return 0;
77 78
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
			    struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	case KVM_CAP_ARM_NISV_TO_USER:
		r = 0;
		kvm->arch.return_nisv_io_abort_to_user = true;
		break;
	default:
		r = -EINVAL;
		break;
	}

	return r;
}
99

100 101 102 103 104
static int kvm_arm_default_max_vcpus(void)
{
	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
}

105 106 107 108
/**
 * kvm_arch_init_vm - initializes a VM data structure
 * @kvm:	pointer to the KVM struct
 */
109 110
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
111
	int ret;
112

113
	ret = kvm_arm_setup_stage2(kvm, type);
114 115
	if (ret)
		return ret;
116

117
	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
118
	if (ret)
119
		return ret;
120

121
	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
122 123 124
	if (ret)
		goto out_free_stage2_pgd;

125
	kvm_vgic_early_init(kvm);
126

127
	/* The maximum number of VCPUs is limited by the host's GIC model */
128
	kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
129

130 131
	return ret;
out_free_stage2_pgd:
132
	kvm_free_stage2_pgd(&kvm->arch.mmu);
133
	return ret;
134 135
}

136
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
137 138 139 140 141
{
	return VM_FAULT_SIGBUS;
}


142 143 144 145
/**
 * kvm_arch_destroy_vm - destroy the VM data structure
 * @kvm:	pointer to the KVM struct
 */
146 147 148 149
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	int i;

150 151
	bitmap_free(kvm->arch.pmu_filter);

152 153
	kvm_vgic_destroy(kvm);

154 155
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		if (kvm->vcpus[i]) {
156
			kvm_vcpu_destroy(kvm->vcpus[i]);
157 158 159
			kvm->vcpus[i] = NULL;
		}
	}
160
	atomic_set(&kvm->online_vcpus, 0);
161 162
}

163
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
164 165 166
{
	int r;
	switch (ext) {
167
	case KVM_CAP_IRQCHIP:
168 169
		r = vgic_present;
		break;
170
	case KVM_CAP_IOEVENTFD:
171
	case KVM_CAP_DEVICE_CTRL:
172 173 174 175
	case KVM_CAP_USER_MEMORY:
	case KVM_CAP_SYNC_MMU:
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
	case KVM_CAP_ONE_REG:
176
	case KVM_CAP_ARM_PSCI:
177
	case KVM_CAP_ARM_PSCI_0_2:
178
	case KVM_CAP_READONLY_MEM:
179
	case KVM_CAP_MP_STATE:
180
	case KVM_CAP_IMMEDIATE_EXIT:
181
	case KVM_CAP_VCPU_EVENTS:
182
	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
183
	case KVM_CAP_ARM_NISV_TO_USER:
184
	case KVM_CAP_ARM_INJECT_EXT_DABT:
185 186
		r = 1;
		break;
187 188
	case KVM_CAP_ARM_SET_DEVICE_ADDR:
		r = 1;
189
		break;
190 191 192 193
	case KVM_CAP_NR_VCPUS:
		r = num_online_cpus();
		break;
	case KVM_CAP_MAX_VCPUS:
194
	case KVM_CAP_MAX_VCPU_ID:
195 196 197 198
		if (kvm)
			r = kvm->arch.max_vcpus;
		else
			r = kvm_arm_default_max_vcpus();
199
		break;
V
Vladimir Murzin 已提交
200 201 202 203 204 205
	case KVM_CAP_MSI_DEVID:
		if (!kvm)
			r = -EINVAL;
		else
			r = kvm->arch.vgic.msis_require_devid;
		break;
206 207 208 209 210 211 212
	case KVM_CAP_ARM_USER_IRQ:
		/*
		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
		 * (bump this number if adding more devices)
		 */
		r = 1;
		break;
213 214 215
	case KVM_CAP_STEAL_TIME:
		r = kvm_arm_pvtime_supported();
		break;
216
	default:
217
		r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
218 219 220 221 222 223 224 225 226 227 228
		break;
	}
	return r;
}

long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	return -EINVAL;
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
struct kvm *kvm_arch_alloc_vm(void)
{
	if (!has_vhe())
		return kzalloc(sizeof(struct kvm), GFP_KERNEL);

	return vzalloc(sizeof(struct kvm));
}

void kvm_arch_free_vm(struct kvm *kvm)
{
	if (!has_vhe())
		kfree(kvm);
	else
		vfree(kvm);
}
244

245 246 247 248 249 250 251 252 253 254 255
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{
	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
		return -EBUSY;

	if (id >= kvm->arch.max_vcpus)
		return -EINVAL;

	return 0;
}

256
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
257
{
258 259 260 261 262 263
	int err;

	/* Force users to call KVM_ARM_VCPU_INIT */
	vcpu->arch.target = -1;
	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);

264 265
	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;

266 267 268 269 270 271 272 273 274
	/* Set up the timer */
	kvm_timer_vcpu_init(vcpu);

	kvm_pmu_vcpu_init(vcpu);

	kvm_arm_reset_debug_ptr(vcpu);

	kvm_arm_pvtime_vcpu_init(&vcpu->arch);

275 276
	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;

277 278 279 280
	err = kvm_vgic_vcpu_init(vcpu);
	if (err)
		return err;

281
	return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
282 283
}

284
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
285 286 287
{
}

288
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
289
{
290 291 292
	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
		static_branch_dec(&userspace_irqchip_in_use);

293
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
294
	kvm_timer_vcpu_terminate(vcpu);
295
	kvm_pmu_vcpu_destroy(vcpu);
296 297

	kvm_arm_vcpu_destroy(vcpu);
298 299 300 301
}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
302
	return kvm_timer_is_pending(vcpu);
303 304
}

305 306
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
307 308 309
	/*
	 * If we're about to block (most likely because we've just hit a
	 * WFI), we need to sync back the state of the GIC CPU interface
310
	 * so that we have the latest PMR and group enables. This ensures
311 312
	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
	 * whether we have pending interrupts.
313 314 315
	 *
	 * For the same reason, we want to tell GICv4 that we need
	 * doorbells to be signalled, should an interrupt become pending.
316 317 318
	 */
	preempt_disable();
	kvm_vgic_vmcr_sync(vcpu);
319
	vgic_v4_put(vcpu, true);
320
	preempt_enable();
321 322 323 324
}

void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
325 326 327
	preempt_disable();
	vgic_v4_load(vcpu);
	preempt_enable();
328 329
}

330 331
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
332
	struct kvm_s2_mmu *mmu;
333 334
	int *last_ran;

335 336
	mmu = vcpu->arch.hw_mmu;
	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
337 338 339 340 341 342

	/*
	 * We might get preempted before the vCPU actually runs, but
	 * over-invalidation doesn't affect correctness.
	 */
	if (*last_ran != vcpu->vcpu_id) {
343
		kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
344 345 346
		*last_ran = vcpu->vcpu_id;
	}

347
	vcpu->cpu = cpu;
348

349
	kvm_vgic_load(vcpu);
350
	kvm_timer_vcpu_load(vcpu);
351 352
	if (has_vhe())
		kvm_vcpu_load_sysregs_vhe(vcpu);
353
	kvm_arch_vcpu_load_fp(vcpu);
354
	kvm_vcpu_pmu_restore_guest(vcpu);
355 356
	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
357 358

	if (single_task_running())
359
		vcpu_clear_wfx_traps(vcpu);
360
	else
361
		vcpu_set_wfx_traps(vcpu);
362

363
	if (vcpu_has_ptrauth(vcpu))
364
		vcpu_ptrauth_disable(vcpu);
365 366 367 368
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
369
	kvm_arch_vcpu_put_fp(vcpu);
370 371
	if (has_vhe())
		kvm_vcpu_put_sysregs_vhe(vcpu);
372
	kvm_timer_vcpu_put(vcpu);
373
	kvm_vgic_put(vcpu);
374
	kvm_vcpu_pmu_restore_host(vcpu);
375

376
	vcpu->cpu = -1;
377 378
}

A
Andrew Jones 已提交
379 380 381
static void vcpu_power_off(struct kvm_vcpu *vcpu)
{
	vcpu->arch.power_off = true;
382
	kvm_make_request(KVM_REQ_SLEEP, vcpu);
A
Andrew Jones 已提交
383 384 385
	kvm_vcpu_kick(vcpu);
}

386 387 388
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
389
	if (vcpu->arch.power_off)
390 391 392 393 394
		mp_state->mp_state = KVM_MP_STATE_STOPPED;
	else
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;

	return 0;
395 396 397 398 399
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
400 401
	int ret = 0;

402 403
	switch (mp_state->mp_state) {
	case KVM_MP_STATE_RUNNABLE:
404
		vcpu->arch.power_off = false;
405 406
		break;
	case KVM_MP_STATE_STOPPED:
A
Andrew Jones 已提交
407
		vcpu_power_off(vcpu);
408 409
		break;
	default:
410
		ret = -EINVAL;
411 412
	}

413
	return ret;
414 415
}

416 417 418 419 420 421 422
/**
 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
 * @v:		The VCPU pointer
 *
 * If the guest CPU is not waiting for interrupts or an interrupt line is
 * asserted, the CPU is by definition runnable.
 */
423 424
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
425 426
	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
427
		&& !v->arch.power_off && !v->arch.pause);
428 429
}

430 431
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
432
	return vcpu_mode_priv(vcpu);
433 434
}

435 436 437 438 439 440 441
/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
}

void force_vm_exit(const cpumask_t *mask)
{
442
	preempt_disable();
443
	smp_call_function_many(mask, exit_vm_noop, NULL, true);
444
	preempt_enable();
445 446 447 448
}

/**
 * need_new_vmid_gen - check that the VMID is still valid
449
 * @vmid: The VMID to check
450 451 452
 *
 * return true if there is a new generation of VMIDs being used
 *
453 454
 * The hardware supports a limited set of values with the value zero reserved
 * for the host, so we check if an assigned value belongs to a previous
F
Fuad Tabba 已提交
455 456 457
 * generation, which requires us to assign a new value. If we're the first to
 * use a VMID for the new generation, we must flush necessary caches and TLBs
 * on all CPUs.
458
 */
459
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
460
{
461 462
	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
463
	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
464 465 466
}

/**
467 468
 * update_vmid - Update the vmid with a valid VMID for the current generation
 * @vmid: The stage-2 VMID information struct
469
 */
470
static void update_vmid(struct kvm_vmid *vmid)
471
{
472
	if (!need_new_vmid_gen(vmid))
473 474
		return;

475
	spin_lock(&kvm_vmid_lock);
476 477 478 479 480 481

	/*
	 * We need to re-check the vmid_gen here to ensure that if another vcpu
	 * already allocated a valid vmid for this vm, then this vcpu should
	 * use the same vmid.
	 */
482
	if (!need_new_vmid_gen(vmid)) {
483
		spin_unlock(&kvm_vmid_lock);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
		return;
	}

	/* First user of a new VMID generation? */
	if (unlikely(kvm_next_vmid == 0)) {
		atomic64_inc(&kvm_vmid_gen);
		kvm_next_vmid = 1;

		/*
		 * On SMP we know no other CPUs can use this CPU's or each
		 * other's VMID after force_vm_exit returns since the
		 * kvm_vmid_lock blocks them from reentry to the guest.
		 */
		force_vm_exit(cpu_all_mask);
		/*
		 * Now broadcast TLB + ICACHE invalidation over the inner
		 * shareable domain to make sure all data structures are
		 * clean.
		 */
		kvm_call_hyp(__kvm_flush_vm_context);
	}

506
	vmid->vmid = kvm_next_vmid;
507
	kvm_next_vmid++;
508
	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
509

510
	smp_wmb();
511
	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
512 513

	spin_unlock(&kvm_vmid_lock);
514 515 516 517
}

static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
518
	struct kvm *kvm = vcpu->kvm;
519
	int ret = 0;
520

521 522 523
	if (likely(vcpu->arch.has_run_once))
		return 0;

524 525 526
	if (!kvm_arm_vcpu_is_finalized(vcpu))
		return -EPERM;

527
	vcpu->arch.has_run_once = true;
528

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	if (likely(irqchip_in_kernel(kvm))) {
		/*
		 * Map the VGIC hardware resources before running a vcpu the
		 * first time on this VM.
		 */
		if (unlikely(!vgic_ready(kvm))) {
			ret = kvm_vgic_map_resources(kvm);
			if (ret)
				return ret;
		}
	} else {
		/*
		 * Tell the rest of the code that there are userspace irqchip
		 * VMs in the wild.
		 */
		static_branch_inc(&userspace_irqchip_in_use);
545 546
	}

547
	ret = kvm_timer_enable(vcpu);
548 549 550 551
	if (ret)
		return ret;

	ret = kvm_arm_pmu_v3_enable(vcpu);
552

553
	return ret;
554 555
}

556 557 558 559 560
bool kvm_arch_intc_initialized(struct kvm *kvm)
{
	return vgic_initialized(kvm);
}

561
void kvm_arm_halt_guest(struct kvm *kvm)
562 563 564 565 566 567
{
	int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm)
		vcpu->arch.pause = true;
568
	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
569 570
}

571
void kvm_arm_resume_guest(struct kvm *kvm)
572 573 574 575
{
	int i;
	struct kvm_vcpu *vcpu;

576 577
	kvm_for_each_vcpu(i, vcpu, kvm) {
		vcpu->arch.pause = false;
578
		rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
579
	}
580 581
}

582
static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
583
{
584
	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
585

586 587 588
	rcuwait_wait_event(wait,
			   (!vcpu->arch.power_off) &&(!vcpu->arch.pause),
			   TASK_INTERRUPTIBLE);
589

A
Andrew Jones 已提交
590
	if (vcpu->arch.power_off || vcpu->arch.pause) {
591
		/* Awaken to handle a signal, request we sleep again later. */
592
		kvm_make_request(KVM_REQ_SLEEP, vcpu);
593
	}
594 595 596 597 598 599 600

	/*
	 * Make sure we will observe a potential reset request if we've
	 * observed a change to the power state. Pairs with the smp_wmb() in
	 * kvm_psci_vcpu_on().
	 */
	smp_rmb();
601 602
}

603 604 605 606 607
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.target >= 0;
}

608 609 610
static void check_vcpu_requests(struct kvm_vcpu *vcpu)
{
	if (kvm_request_pending(vcpu)) {
611 612
		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
			vcpu_req_sleep(vcpu);
613

614 615 616
		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
			kvm_reset_vcpu(vcpu);

617 618 619 620 621
		/*
		 * Clear IRQ_PENDING requests that were made to guarantee
		 * that a VCPU sees new virtual interrupts.
		 */
		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
622 623 624

		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
			kvm_update_stolen_time(vcpu);
625 626 627 628 629 630 631 632

		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
			/* The distributor enable bits were changed */
			preempt_disable();
			vgic_v4_put(vcpu, false);
			vgic_v4_load(vcpu);
			preempt_enable();
		}
633 634 635
	}
}

636 637 638 639 640 641 642 643 644 645
/**
 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 * @vcpu:	The VCPU pointer
 *
 * This function is called through the VCPU_RUN ioctl called from user space. It
 * will execute VM code in a loop until the time slice for the process is used
 * or some emulation is needed from user space in which case the function will
 * return with return value 0 and with the kvm_run structure filled in with the
 * required data for the requested emulation.
 */
646
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
647
{
648
	struct kvm_run *run = vcpu->run;
649 650
	int ret;

651
	if (unlikely(!kvm_vcpu_initialized(vcpu)))
652 653 654 655
		return -ENOEXEC;

	ret = kvm_vcpu_first_run_init(vcpu);
	if (ret)
656
		return ret;
657

C
Christoffer Dall 已提交
658
	if (run->exit_reason == KVM_EXIT_MMIO) {
659
		ret = kvm_handle_mmio_return(vcpu);
C
Christoffer Dall 已提交
660
		if (ret)
661
			return ret;
C
Christoffer Dall 已提交
662 663
	}

664 665 666 667
	if (run->immediate_exit)
		return -EINTR;

	vcpu_load(vcpu);
668

669
	kvm_sigset_activate(vcpu);
670 671 672 673 674 675 676 677 678

	ret = 1;
	run->exit_reason = KVM_EXIT_UNKNOWN;
	while (ret > 0) {
		/*
		 * Check conditions before entering the guest
		 */
		cond_resched();

679
		update_vmid(&vcpu->arch.hw_mmu->vmid);
680

681 682
		check_vcpu_requests(vcpu);

683 684 685 686 687
		/*
		 * Preparing the interrupts to be injected also
		 * involves poking the GIC, which must be done in a
		 * non-preemptible context.
		 */
688
		preempt_disable();
689

690
		kvm_pmu_flush_hwstate(vcpu);
691

692 693
		local_irq_disable();

694 695
		kvm_vgic_flush_hwstate(vcpu);

696
		/*
697 698
		 * Exit if we have a signal pending so that we can deliver the
		 * signal to user space.
699
		 */
700
		if (signal_pending(current)) {
701 702 703 704
			ret = -EINTR;
			run->exit_reason = KVM_EXIT_INTR;
		}

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
		/*
		 * If we're using a userspace irqchip, then check if we need
		 * to tell a userspace irqchip about timer or PMU level
		 * changes and if so, exit to userspace (the actual level
		 * state gets updated in kvm_timer_update_run and
		 * kvm_pmu_update_run below).
		 */
		if (static_branch_unlikely(&userspace_irqchip_in_use)) {
			if (kvm_timer_should_notify_user(vcpu) ||
			    kvm_pmu_should_notify_user(vcpu)) {
				ret = -EINTR;
				run->exit_reason = KVM_EXIT_INTR;
			}
		}

720 721 722 723
		/*
		 * Ensure we set mode to IN_GUEST_MODE after we disable
		 * interrupts and before the final VCPU requests check.
		 * See the comment in kvm_vcpu_exiting_guest_mode() and
724
		 * Documentation/virt/kvm/vcpu-requests.rst
725 726 727
		 */
		smp_store_mb(vcpu->mode, IN_GUEST_MODE);

728
		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
A
Andrew Jones 已提交
729
		    kvm_request_pending(vcpu)) {
730
			vcpu->mode = OUTSIDE_GUEST_MODE;
731
			isb(); /* Ensure work in x_flush_hwstate is committed */
732
			kvm_pmu_sync_hwstate(vcpu);
733
			if (static_branch_unlikely(&userspace_irqchip_in_use))
734
				kvm_timer_sync_user(vcpu);
735
			kvm_vgic_sync_hwstate(vcpu);
736
			local_irq_enable();
737
			preempt_enable();
738 739 740
			continue;
		}

741 742
		kvm_arm_setup_debug(vcpu);

743 744 745 746
		/**************************************************************
		 * Enter the guest
		 */
		trace_kvm_entry(*vcpu_pc(vcpu));
747
		guest_enter_irqoff();
748

749
		ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
750

751
		vcpu->mode = OUTSIDE_GUEST_MODE;
752
		vcpu->stat.exits++;
753 754 755 756
		/*
		 * Back from guest
		 *************************************************************/

757 758
		kvm_arm_clear_debug(vcpu);

759
		/*
760
		 * We must sync the PMU state before the vgic state so
761 762 763 764 765
		 * that the vgic can properly sample the updated state of the
		 * interrupt line.
		 */
		kvm_pmu_sync_hwstate(vcpu);

766 767 768 769 770
		/*
		 * Sync the vgic state before syncing the timer state because
		 * the timer code needs to know if the virtual timer
		 * interrupts are active.
		 */
771 772
		kvm_vgic_sync_hwstate(vcpu);

773 774 775 776 777
		/*
		 * Sync the timer hardware state before enabling interrupts as
		 * we don't want vtimer interrupts to race with syncing the
		 * timer virtual interrupt state.
		 */
778
		if (static_branch_unlikely(&userspace_irqchip_in_use))
779
			kvm_timer_sync_user(vcpu);
780

781 782
		kvm_arch_vcpu_ctxsync_fp(vcpu);

783 784 785 786 787 788 789 790 791 792 793 794 795
		/*
		 * We may have taken a host interrupt in HYP mode (ie
		 * while executing the guest). This interrupt is still
		 * pending, as we haven't serviced it yet!
		 *
		 * We're now back in SVC mode, with interrupts
		 * disabled.  Enabling the interrupts now will have
		 * the effect of taking the interrupt again, in SVC
		 * mode this time.
		 */
		local_irq_enable();

		/*
796
		 * We do local_irq_enable() before calling guest_exit() so
797 798
		 * that if a timer interrupt hits while running the guest we
		 * account that tick as being spent in the guest.  We enable
799
		 * preemption after calling guest_exit() so that if we get
800 801 802
		 * preempted we make sure ticks after that is not counted as
		 * guest time.
		 */
803
		guest_exit();
804
		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
805

806
		/* Exit types that need handling before we can be preempted */
807
		handle_exit_early(vcpu, ret);
808

809 810
		preempt_enable();

811
		ret = handle_exit(vcpu, ret);
812 813
	}

814
	/* Tell userspace about in-kernel device output levels */
815 816 817 818
	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
		kvm_timer_update_run(vcpu);
		kvm_pmu_update_run(vcpu);
	}
819

820 821
	kvm_sigset_deactivate(vcpu);

822
	vcpu_put(vcpu);
823
	return ret;
824 825
}

826 827 828 829
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
{
	int bit_index;
	bool set;
830
	unsigned long *hcr;
831 832 833 834 835 836

	if (number == KVM_ARM_IRQ_CPU_IRQ)
		bit_index = __ffs(HCR_VI);
	else /* KVM_ARM_IRQ_CPU_FIQ */
		bit_index = __ffs(HCR_VF);

837
	hcr = vcpu_hcr(vcpu);
838
	if (level)
839
		set = test_and_set_bit(bit_index, hcr);
840
	else
841
		set = test_and_clear_bit(bit_index, hcr);
842 843 844 845 846 847 848 849 850 851 852 853

	/*
	 * If we didn't change anything, no need to wake up or kick other CPUs
	 */
	if (set == level)
		return 0;

	/*
	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
	 * trigger a world-switch round on the running physical CPU to set the
	 * virtual IRQ/FIQ fields in the HCR appropriately.
	 */
854
	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
855 856 857 858 859
	kvm_vcpu_kick(vcpu);

	return 0;
}

860 861
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
			  bool line_status)
862 863 864 865 866 867 868 869 870
{
	u32 irq = irq_level->irq;
	unsigned int irq_type, vcpu_idx, irq_num;
	int nrcpus = atomic_read(&kvm->online_vcpus);
	struct kvm_vcpu *vcpu = NULL;
	bool level = irq_level->level;

	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
871
	vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
872 873 874 875
	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;

	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);

876 877 878 879
	switch (irq_type) {
	case KVM_ARM_IRQ_TYPE_CPU:
		if (irqchip_in_kernel(kvm))
			return -ENXIO;
880

881 882
		if (vcpu_idx >= nrcpus)
			return -EINVAL;
883

884 885 886
		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
		if (!vcpu)
			return -EINVAL;
887

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
			return -EINVAL;

		return vcpu_interrupt_line(vcpu, irq_num, level);
	case KVM_ARM_IRQ_TYPE_PPI:
		if (!irqchip_in_kernel(kvm))
			return -ENXIO;

		if (vcpu_idx >= nrcpus)
			return -EINVAL;

		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
		if (!vcpu)
			return -EINVAL;

		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
			return -EINVAL;
905

906
		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
907 908 909 910
	case KVM_ARM_IRQ_TYPE_SPI:
		if (!irqchip_in_kernel(kvm))
			return -ENXIO;

911
		if (irq_num < VGIC_NR_PRIVATE_IRQS)
912 913
			return -EINVAL;

914
		return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
915 916 917
	}

	return -EINVAL;
918 919
}

920 921 922
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
			       const struct kvm_vcpu_init *init)
{
923
	unsigned int i, ret;
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	int phys_target = kvm_target_cpu();

	if (init->target != phys_target)
		return -EINVAL;

	/*
	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
	 * use the same target.
	 */
	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
		return -EINVAL;

	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
	for (i = 0; i < sizeof(init->features) * 8; i++) {
		bool set = (init->features[i / 32] & (1 << (i % 32)));

		if (set && i >= KVM_VCPU_MAX_FEATURES)
			return -ENOENT;

		/*
		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
		 * use the same feature set.
		 */
		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
		    test_bit(i, vcpu->arch.features) != set)
			return -EINVAL;

		if (set)
			set_bit(i, vcpu->arch.features);
	}

	vcpu->arch.target = phys_target;

	/* Now we know what it is, we can reset it. */
958 959 960 961 962
	ret = kvm_reset_vcpu(vcpu);
	if (ret) {
		vcpu->arch.target = -1;
		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
	}
963

964 965
	return ret;
}
966

967 968 969 970 971 972 973 974 975
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
					 struct kvm_vcpu_init *init)
{
	int ret;

	ret = kvm_vcpu_set_target(vcpu, init);
	if (ret)
		return ret;

976 977 978
	/*
	 * Ensure a rebooted VM will fault in RAM pages and detect if the
	 * guest MMU is turned off and flush the caches as needed.
979
	 *
980 981 982 983
	 * S2FWB enforces all memory accesses to RAM being cacheable,
	 * ensuring that the data side is always coherent. We still
	 * need to invalidate the I-cache though, as FWB does *not*
	 * imply CTR_EL0.DIC.
984
	 */
985 986 987 988 989 990
	if (vcpu->arch.has_run_once) {
		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
			stage2_unmap_vm(vcpu->kvm);
		else
			__flush_icache_all();
	}
991

992 993
	vcpu_reset_hcr(vcpu);

994
	/*
995
	 * Handle the "start in power-off" case.
996
	 */
997
	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
A
Andrew Jones 已提交
998
		vcpu_power_off(vcpu);
999
	else
1000
		vcpu->arch.power_off = false;
1001 1002 1003 1004

	return 0;
}

1005 1006 1007 1008 1009 1010 1011
static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
1012
		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
		break;
	}

	return ret;
}

static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
1026
		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		break;
	}

	return ret;
}

static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{
	int ret = -ENXIO;

	switch (attr->group) {
	default:
1040
		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1041 1042 1043 1044 1045 1046
		break;
	}

	return ret;
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
				   struct kvm_vcpu_events *events)
{
	memset(events, 0, sizeof(*events));

	return __kvm_arm_vcpu_get_events(vcpu, events);
}

static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
				   struct kvm_vcpu_events *events)
{
	int i;

	/* check whether the reserved field is zero */
	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
		if (events->reserved[i])
			return -EINVAL;

	/* check whether the pad field is zero */
	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
		if (events->exception.pad[i])
			return -EINVAL;

	return __kvm_arm_vcpu_set_events(vcpu, events);
}

1073 1074 1075 1076 1077
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
1078
	struct kvm_device_attr attr;
1079 1080
	long r;

1081 1082 1083 1084
	switch (ioctl) {
	case KVM_ARM_VCPU_INIT: {
		struct kvm_vcpu_init init;

1085
		r = -EFAULT;
1086
		if (copy_from_user(&init, argp, sizeof(init)))
1087
			break;
1088

1089 1090
		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
		break;
1091 1092 1093 1094
	}
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
1095

1096
		r = -ENOEXEC;
1097
		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1098
			break;
1099

1100
		r = -EFAULT;
1101
		if (copy_from_user(&reg, argp, sizeof(reg)))
1102 1103
			break;

1104
		if (ioctl == KVM_SET_ONE_REG)
1105
			r = kvm_arm_set_reg(vcpu, &reg);
1106
		else
1107 1108
			r = kvm_arm_get_reg(vcpu, &reg);
		break;
1109 1110 1111 1112 1113 1114
	}
	case KVM_GET_REG_LIST: {
		struct kvm_reg_list __user *user_list = argp;
		struct kvm_reg_list reg_list;
		unsigned n;

1115
		r = -ENOEXEC;
1116
		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1117
			break;
1118

1119 1120 1121 1122
		r = -EPERM;
		if (!kvm_arm_vcpu_is_finalized(vcpu))
			break;

1123
		r = -EFAULT;
1124
		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1125
			break;
1126 1127 1128
		n = reg_list.n;
		reg_list.n = kvm_arm_num_regs(vcpu);
		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1129 1130
			break;
		r = -E2BIG;
1131
		if (n < reg_list.n)
1132 1133 1134
			break;
		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
		break;
1135
	}
1136
	case KVM_SET_DEVICE_ATTR: {
1137
		r = -EFAULT;
1138
		if (copy_from_user(&attr, argp, sizeof(attr)))
1139 1140 1141
			break;
		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
		break;
1142 1143
	}
	case KVM_GET_DEVICE_ATTR: {
1144
		r = -EFAULT;
1145
		if (copy_from_user(&attr, argp, sizeof(attr)))
1146 1147 1148
			break;
		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
		break;
1149 1150
	}
	case KVM_HAS_DEVICE_ATTR: {
1151
		r = -EFAULT;
1152
		if (copy_from_user(&attr, argp, sizeof(attr)))
1153 1154 1155
			break;
		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
		break;
1156
	}
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	case KVM_GET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		if (kvm_arm_vcpu_get_events(vcpu, &events))
			return -EINVAL;

		if (copy_to_user(argp, &events, sizeof(events)))
			return -EFAULT;

		return 0;
	}
	case KVM_SET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		if (copy_from_user(&events, argp, sizeof(events)))
			return -EFAULT;

		return kvm_arm_vcpu_set_events(vcpu, &events);
	}
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	case KVM_ARM_VCPU_FINALIZE: {
		int what;

		if (!kvm_vcpu_initialized(vcpu))
			return -ENOEXEC;

		if (get_user(what, (const int __user *)argp))
			return -EFAULT;

		return kvm_arm_vcpu_finalize(vcpu, what);
	}
1187
	default:
1188
		r = -EINVAL;
1189
	}
1190 1191

	return r;
1192 1193
}

1194
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1195
{
1196

1197 1198
}

1199 1200
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
1201
{
1202
	kvm_flush_remote_tlbs(kvm);
1203 1204
}

1205 1206 1207
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
					struct kvm_arm_device_addr *dev_addr)
{
1208 1209 1210 1211 1212 1213 1214 1215 1216
	unsigned long dev_id, type;

	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
		KVM_ARM_DEVICE_ID_SHIFT;
	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
		KVM_ARM_DEVICE_TYPE_SHIFT;

	switch (dev_id) {
	case KVM_ARM_DEVICE_VGIC_V2:
1217 1218
		if (!vgic_present)
			return -ENXIO;
1219
		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1220 1221 1222
	default:
		return -ENODEV;
	}
1223 1224
}

1225 1226 1227
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
1228 1229 1230 1231
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;

	switch (ioctl) {
1232
	case KVM_CREATE_IRQCHIP: {
1233
		int ret;
1234 1235
		if (!vgic_present)
			return -ENXIO;
1236 1237 1238 1239
		mutex_lock(&kvm->lock);
		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
		mutex_unlock(&kvm->lock);
		return ret;
1240
	}
1241 1242 1243 1244 1245 1246 1247
	case KVM_ARM_SET_DEVICE_ADDR: {
		struct kvm_arm_device_addr dev_addr;

		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
			return -EFAULT;
		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
	}
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
	case KVM_ARM_PREFERRED_TARGET: {
		int err;
		struct kvm_vcpu_init init;

		err = kvm_vcpu_preferred_target(&init);
		if (err)
			return err;

		if (copy_to_user(argp, &init, sizeof(init)))
			return -EFAULT;

		return 0;
	}
1261 1262 1263
	default:
		return -EINVAL;
	}
1264 1265
}

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
static unsigned long nvhe_percpu_size(void)
{
	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
}

static unsigned long nvhe_percpu_order(void)
{
	unsigned long size = nvhe_percpu_size();

	return size ? get_order(size) : 0;
}

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
static int kvm_map_vectors(void)
{
	/*
	 * SV2  = ARM64_SPECTRE_V2
	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
	 *
	 * !SV2 + !HEL2 -> use direct vectors
	 *  SV2 + !HEL2 -> use hardened vectors in place
	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
	 *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
	 */
	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
	}

	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;

		/*
		 * Always allocate a spare vector slot, as we don't
		 * know yet which CPUs have a BP hardening slot that
		 * we can reuse.
		 */
		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
		return create_hyp_exec_mappings(vect_pa, size,
						&__kvm_bp_vect_base);
	}

	return 0;
}

1313
static void cpu_init_hyp_mode(void)
1314
{
1315
	phys_addr_t pgd_ptr;
1316 1317
	unsigned long hyp_stack_ptr;
	unsigned long vector_ptr;
1318
	unsigned long tpidr_el2;
1319
	struct arm_smccc_res res;
1320 1321

	/* Switch from the HYP stub to our own HYP init vector */
1322
	__hyp_set_vectors(kvm_get_idmap_vector());
1323

1324 1325 1326 1327 1328
	/*
	 * Calculate the raw per-cpu offset without a translation from the
	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
	 * so that we can use adr_l to access per-cpu variables in EL2.
	 */
1329 1330
	tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
		    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1331

1332
	pgd_ptr = kvm_mmu_get_httbr();
1333
	hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
1334
	hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
1335
	vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
1336

1337 1338 1339 1340 1341 1342 1343
	/*
	 * Call initialization code, and switch to the full blown HYP code.
	 * If the cpucaps haven't been finalized yet, something has gone very
	 * wrong, and hyp will crash and burn when it uses any
	 * cpus_have_const_cap() wrapper.
	 */
	BUG_ON(!system_capabilities_finalized());
1344 1345 1346
	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
			  pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
1347 1348 1349 1350 1351 1352

	/*
	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
	 * at EL2.
	 */
	if (this_cpu_has_cap(ARM64_SSBS) &&
1353
	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
1354
		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
1355
	}
1356 1357
}

1358 1359 1360 1361 1362 1363
static void cpu_hyp_reset(void)
{
	if (!is_kernel_in_hyp_mode())
		__hyp_reset_vectors();
}

1364 1365
static void cpu_hyp_reinit(void)
{
1366
	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
1367

1368 1369
	cpu_hyp_reset();

1370
	*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
1371

1372
	if (is_kernel_in_hyp_mode())
1373
		kvm_timer_init_vhe();
1374
	else
1375
		cpu_init_hyp_mode();
1376

1377
	kvm_arm_init_debug();
1378 1379 1380

	if (vgic_present)
		kvm_vgic_init_cpu_hardware();
1381 1382
}

1383 1384 1385
static void _kvm_arch_hardware_enable(void *discard)
{
	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1386
		cpu_hyp_reinit();
1387
		__this_cpu_write(kvm_arm_hardware_enabled, 1);
1388
	}
1389
}
1390

1391 1392 1393 1394
int kvm_arch_hardware_enable(void)
{
	_kvm_arch_hardware_enable(NULL);
	return 0;
1395 1396
}

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
static void _kvm_arch_hardware_disable(void *discard)
{
	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
		cpu_hyp_reset();
		__this_cpu_write(kvm_arm_hardware_enabled, 0);
	}
}

void kvm_arch_hardware_disable(void)
{
	_kvm_arch_hardware_disable(NULL);
}
1409

1410 1411 1412 1413 1414
#ifdef CONFIG_CPU_PM
static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
				    unsigned long cmd,
				    void *v)
{
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	/*
	 * kvm_arm_hardware_enabled is left with its old value over
	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
	 * re-enable hyp.
	 */
	switch (cmd) {
	case CPU_PM_ENTER:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
			/*
			 * don't update kvm_arm_hardware_enabled here
			 * so that the hardware will be re-enabled
			 * when we resume. See below.
			 */
			cpu_hyp_reset();

1430
		return NOTIFY_OK;
1431
	case CPU_PM_ENTER_FAILED:
1432 1433 1434 1435
	case CPU_PM_EXIT:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
			/* The hardware was enabled before suspend. */
			cpu_hyp_reinit();
1436

1437 1438 1439 1440 1441
		return NOTIFY_OK;

	default:
		return NOTIFY_DONE;
	}
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
}

static struct notifier_block hyp_init_cpu_pm_nb = {
	.notifier_call = hyp_init_cpu_pm_notifier,
};

static void __init hyp_cpu_pm_init(void)
{
	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
1452 1453 1454 1455
static void __init hyp_cpu_pm_exit(void)
{
	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
}
1456 1457 1458 1459
#else
static inline void hyp_cpu_pm_init(void)
{
}
1460 1461 1462
static inline void hyp_cpu_pm_exit(void)
{
}
1463 1464
#endif

1465 1466
static int init_common_resources(void)
{
1467
	return kvm_set_ipa_limit();
1468 1469 1470 1471
}

static int init_subsystems(void)
{
1472
	int err = 0;
1473

1474
	/*
1475
	 * Enable hardware so that subsystem initialisation can access EL2.
1476
	 */
1477
	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1478 1479 1480 1481 1482 1483

	/*
	 * Register CPU lower-power notifier
	 */
	hyp_cpu_pm_init();

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	/*
	 * Init HYP view of VGIC
	 */
	err = kvm_vgic_hyp_init();
	switch (err) {
	case 0:
		vgic_present = true;
		break;
	case -ENODEV:
	case -ENXIO:
		vgic_present = false;
1495
		err = 0;
1496 1497
		break;
	default:
1498
		goto out;
1499 1500 1501 1502 1503
	}

	/*
	 * Init HYP architected timer support
	 */
1504
	err = kvm_timer_hyp_init(vgic_present);
1505
	if (err)
1506
		goto out;
1507 1508 1509 1510

	kvm_perf_init();
	kvm_coproc_table_init();

1511 1512 1513 1514
out:
	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);

	return err;
1515 1516 1517 1518 1519 1520 1521
}

static void teardown_hyp_mode(void)
{
	int cpu;

	free_hyp_pgds();
1522
	for_each_possible_cpu(cpu) {
1523
		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1524 1525
		free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
	}
1526 1527
}

1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
/**
 * Inits Hyp-mode on all online CPUs
 */
static int init_hyp_mode(void)
{
	int cpu;
	int err = 0;

	/*
	 * Allocate Hyp PGD and setup Hyp identity mapping
	 */
	err = kvm_mmu_init();
	if (err)
		goto out_err;

	/*
	 * Allocate stack pages for Hypervisor-mode
	 */
	for_each_possible_cpu(cpu) {
		unsigned long stack_page;

		stack_page = __get_free_page(GFP_KERNEL);
		if (!stack_page) {
			err = -ENOMEM;
1552
			goto out_err;
1553 1554 1555 1556 1557
		}

		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
	}

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
	/*
	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
	 */
	for_each_possible_cpu(cpu) {
		struct page *page;
		void *page_addr;

		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
		if (!page) {
			err = -ENOMEM;
			goto out_err;
		}

		page_addr = page_address(page);
		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
		kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
	}

1576 1577 1578
	/*
	 * Map the Hyp-code called directly from the host
	 */
1579
	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1580
				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1581 1582
	if (err) {
		kvm_err("Cannot map world-switch code\n");
1583
		goto out_err;
1584 1585
	}

1586
	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1587
				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1588 1589
	if (err) {
		kvm_err("Cannot map rodata section\n");
M
Marc Zyngier 已提交
1590 1591 1592 1593 1594 1595 1596
		goto out_err;
	}

	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
	if (err) {
		kvm_err("Cannot map bss section\n");
1597
		goto out_err;
1598 1599
	}

1600 1601 1602 1603 1604 1605
	err = kvm_map_vectors();
	if (err) {
		kvm_err("Cannot map vectors\n");
		goto out_err;
	}

1606 1607 1608 1609 1610
	/*
	 * Map the Hyp stack pages
	 */
	for_each_possible_cpu(cpu) {
		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1611 1612
		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
					  PAGE_HYP);
1613 1614 1615

		if (err) {
			kvm_err("Cannot map hyp stack\n");
1616
			goto out_err;
1617 1618 1619
		}
	}

1620 1621 1622
	/*
	 * Map Hyp percpu pages
	 */
1623
	for_each_possible_cpu(cpu) {
1624 1625
		char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
		char *percpu_end = percpu_begin + nvhe_percpu_size();
1626

1627
		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
1628 1629

		if (err) {
1630
			kvm_err("Cannot map hyp percpu region\n");
1631 1632
			goto out_err;
		}
1633 1634 1635
	}

	return 0;
1636

1637
out_err:
1638
	teardown_hyp_mode();
1639 1640 1641 1642
	kvm_err("error initializing Hyp mode: %d\n", err);
	return err;
}

1643 1644 1645 1646 1647
static void check_kvm_target_cpu(void *ret)
{
	*(int *)ret = kvm_target_cpu();
}

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
	struct kvm_vcpu *vcpu;
	int i;

	mpidr &= MPIDR_HWID_BITMASK;
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
			return vcpu;
	}
	return NULL;
}

1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
bool kvm_arch_has_irq_bypass(void)
{
	return true;
}

int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

1672 1673
	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
					  &irqfd->irq_entry);
1674 1675 1676 1677 1678 1679 1680
}
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

1681 1682
	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
				     &irqfd->irq_entry);
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
}

void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

	kvm_arm_halt_guest(irqfd->kvm);
}

void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

	kvm_arm_resume_guest(irqfd->kvm);
}

1701 1702 1703
/**
 * Initialize Hyp-mode and memory mappings on all CPUs.
 */
1704 1705
int kvm_arch_init(void *opaque)
{
1706
	int err;
1707
	int ret, cpu;
1708
	bool in_hyp_mode;
1709 1710

	if (!is_hyp_mode_available()) {
1711
		kvm_info("HYP mode not available\n");
1712 1713 1714
		return -ENODEV;
	}

1715 1716 1717 1718
	in_hyp_mode = is_kernel_in_hyp_mode();

	if (!in_hyp_mode && kvm_arch_requires_vhe()) {
		kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1719 1720 1721
		return -ENODEV;
	}

1722 1723 1724 1725
	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
			 "Only trusted guests should be used on this system.\n");

1726 1727 1728 1729 1730 1731
	for_each_online_cpu(cpu) {
		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
		if (ret < 0) {
			kvm_err("Error, CPU %d not supported!\n", cpu);
			return -ENODEV;
		}
1732 1733
	}

1734
	err = init_common_resources();
1735
	if (err)
1736
		return err;
1737

1738
	err = kvm_arm_init_sve();
1739 1740 1741
	if (err)
		return err;

1742
	if (!in_hyp_mode) {
1743
		err = init_hyp_mode();
1744 1745 1746
		if (err)
			goto out_err;
	}
1747

1748 1749 1750
	err = init_subsystems();
	if (err)
		goto out_hyp;
1751

1752 1753 1754 1755 1756
	if (in_hyp_mode)
		kvm_info("VHE mode initialized successfully\n");
	else
		kvm_info("Hyp mode initialized successfully\n");

1757
	return 0;
1758 1759

out_hyp:
1760
	hyp_cpu_pm_exit();
1761 1762
	if (!in_hyp_mode)
		teardown_hyp_mode();
1763 1764
out_err:
	return err;
1765 1766 1767 1768 1769
}

/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
1770
	kvm_perf_teardown();
1771 1772 1773 1774 1775 1776 1777 1778 1779
}

static int arm_init(void)
{
	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
	return rc;
}

module_init(arm_init);