kvm_host.h 12.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__

22
#include <linux/errno.h>
23 24
#include <linux/types.h>
#include <linux/kvm_types.h>
25
#include <asm/cputype.h>
26 27
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
C
Christoffer Dall 已提交
28
#include <asm/kvm_mmio.h>
29
#include <asm/fpstate.h>
30
#include <asm/smp_plat.h>
31
#include <kvm/arm_arch_timer.h>
32

33 34
#define __KVM_HAVE_ARCH_INTC_INITIALIZED

35
#define KVM_USER_MEM_SLOTS 32
36
#define KVM_HAVE_ONE_REG
37
#define KVM_HALT_POLL_NS_DEFAULT 500000
38

39
#define KVM_VCPU_MAX_FEATURES 2
40

41
#include <kvm/arm_vgic.h>
42

V
Vladimir Murzin 已提交
43 44 45 46

#ifdef CONFIG_ARM_GIC_V3
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
#else
47
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
V
Vladimir Murzin 已提交
48
#endif
49

50
#define KVM_REQ_SLEEP \
51
	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
52
#define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
53
#define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
54

55 56
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

57
static inline int kvm_arm_init_sve(void) { return 0; }
58

59
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
60
int __attribute_const__ kvm_target_cpu(void);
61 62 63
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);

64 65 66 67 68
struct kvm_vmid {
	/* The VMID generation used for the virt. memory system */
	u64    vmid_gen;
	u32    vmid;
};
69

70
struct kvm_arch {
71 72 73
	/* The last vcpu id that ran on each physical CPU */
	int __percpu *last_vcpu_ran;

74 75 76 77 78 79
	/*
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */

	/* The VMID generation used for the virt. memory system */
80
	struct kvm_vmid vmid;
81 82 83

	/* Stage-2 page table */
	pgd_t *pgd;
84
	phys_addr_t pgd_phys;
85 86 87

	/* Interrupt controller */
	struct vgic_dist	vgic;
88
	int max_vcpus;
89 90 91

	/* Mandated version of PSCI */
	u32 psci_version;
92 93 94 95 96 97 98 99 100 101 102 103 104
};

#define KVM_NR_MEM_OBJS     40

/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

105 106 107 108 109 110
struct kvm_vcpu_fault_info {
	u32 hsr;		/* Hyp Syndrome Register */
	u32 hxfar;		/* Hyp Data/Inst. Fault Address Register */
	u32 hpfar;		/* Hyp IPA Fault Address Register */
};

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
/*
 * 0 is reserved as an invalid value.
 * Order should be kept in sync with the save/restore code.
 */
enum vcpu_sysreg {
	__INVALID_SYSREG__,
	c0_MPIDR,		/* MultiProcessor ID Register */
	c0_CSSELR,		/* Cache Size Selection Register */
	c1_SCTLR,		/* System Control Register */
	c1_ACTLR,		/* Auxiliary Control Register */
	c1_CPACR,		/* Coprocessor Access Control */
	c2_TTBR0,		/* Translation Table Base Register 0 */
	c2_TTBR0_high,		/* TTBR0 top 32 bits */
	c2_TTBR1,		/* Translation Table Base Register 1 */
	c2_TTBR1_high,		/* TTBR1 top 32 bits */
	c2_TTBCR,		/* Translation Table Base Control R. */
	c3_DACR,		/* Domain Access Control Register */
	c5_DFSR,		/* Data Fault Status Register */
	c5_IFSR,		/* Instruction Fault Status Register */
	c5_ADFSR,		/* Auxilary Data Fault Status R */
	c5_AIFSR,		/* Auxilary Instrunction Fault Status R */
	c6_DFAR,		/* Data Fault Address Register */
	c6_IFAR,		/* Instruction Fault Address Register */
	c7_PAR,			/* Physical Address Register */
	c7_PAR_high,		/* PAR top 32 bits */
	c9_L2CTLR,		/* Cortex A15/A7 L2 Control Register */
	c10_PRRR,		/* Primary Region Remap Register */
	c10_NMRR,		/* Normal Memory Remap Register */
	c12_VBAR,		/* Vector Base Address Register */
	c13_CID,		/* Context ID Register */
	c13_TID_URW,		/* Thread ID, User R/W */
	c13_TID_URO,		/* Thread ID, User R/O */
	c13_TID_PRIV,		/* Thread ID, Privileged */
	c14_CNTKCTL,		/* Timer Control Register (PL1) */
	c10_AMAIR0,		/* Auxilary Memory Attribute Indirection Reg0 */
	c10_AMAIR1,		/* Auxilary Memory Attribute Indirection Reg1 */
	NR_CP15_REGS		/* Number of regs (incl. invalid) */
};

150
struct kvm_cpu_context {
151
	struct kvm_regs	gp_regs;
152
	struct vfp_hard_struct vfp;
153
	u32 cp15[NR_CP15_REGS];
154 155
};

156 157 158 159 160
struct kvm_host_data {
	struct kvm_cpu_context host_ctxt;
};

typedef struct kvm_host_data kvm_host_data_t;
161

162
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
163 164 165 166 167 168
					     int cpu)
{
	/* The host's MPIDR is immutable, so let's set it up at boot time */
	cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
}

169 170 171 172 173 174 175
struct vcpu_reset_state {
	unsigned long	pc;
	unsigned long	r0;
	bool		be;
	bool		reset;
};

176
struct kvm_vcpu_arch {
177 178
	struct kvm_cpu_context ctxt;

179 180 181 182 183 184
	int target; /* Processor target */
	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);

	/* The CPU type we expose to the VM */
	u32 midr;

185 186 187
	/* HYP trapping configuration */
	u32 hcr;

188
	/* Exception Information */
189
	struct kvm_vcpu_fault_info fault;
190

191
	/* Host FP context */
192
	struct kvm_cpu_context *host_cpu_context;
193

194 195
	/* VGIC state */
	struct vgic_cpu vgic_cpu;
196
	struct arch_timer_cpu timer_cpu;
197

198 199 200 201
	/*
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */
202

203 204
	/* vcpu power-off state */
	bool power_off;
205

206 207 208
	 /* Don't run the guest (internal implementation need) */
	bool pause;

C
Christoffer Dall 已提交
209 210 211
	/* IO related fields */
	struct kvm_decode mmio_decode;

212 213
	/* Cache some mmu pages needed inside spinlock regions */
	struct kvm_mmu_memory_cache mmu_page_cache;
214

215 216
	struct vcpu_reset_state reset_state;

217 218
	/* Detect first run of a vcpu */
	bool has_run_once;
219 220 221
};

struct kvm_vm_stat {
222
	ulong remote_tlb_flush;
223 224 225
};

struct kvm_vcpu_stat {
226 227 228 229 230
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 hvc_exit_stat;
231 232 233 234 235
	u64 wfe_exit_stat;
	u64 wfi_exit_stat;
	u64 mmio_exit_user;
	u64 mmio_exit_kernel;
	u64 exits;
236 237
};

238 239
#define vcpu_cp15(v,r)	(v)->arch.ctxt.cp15[r]

240
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
241 242 243 244
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
245

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
unsigned long __kvm_call_hyp(void *hypfn, ...);

/*
 * The has_vhe() part doesn't get emitted, but is used for type-checking.
 */
#define kvm_call_hyp(f, ...)						\
	do {								\
		if (has_vhe()) {					\
			f(__VA_ARGS__);					\
		} else {						\
			__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
		}							\
	} while(0)

#define kvm_call_hyp_ret(f, ...)					\
	({								\
		typeof(f(__VA_ARGS__)) ret;				\
									\
		if (has_vhe()) {					\
			ret = f(__VA_ARGS__);				\
		} else {						\
			ret = __kvm_call_hyp(kvm_ksym_ref(f),		\
					     ##__VA_ARGS__);		\
		}							\
									\
		ret;							\
	})
273

274
void force_vm_exit(const cpumask_t *mask);
275 276 277 278 279
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
			      struct kvm_vcpu_events *events);

int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
			      struct kvm_vcpu_events *events);
280 281 282 283

#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
			unsigned long start, unsigned long end);
284
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
285

286 287
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
288 289
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
290

291 292
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
293 294
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
295 296 297 298 299 300

int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);

301 302 303
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
		int exception_index);

304 305 306
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
				     int exception_index) {}

M
Marc Zyngier 已提交
307
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
308 309 310 311
				       unsigned long hyp_stack_ptr,
				       unsigned long vector_ptr)
{
	/*
312 313 314 315
	 * Call initialization code, and switch to the full blown HYP
	 * code. The init code doesn't need to preserve these
	 * registers as r0-r3 are already callee saved according to
	 * the AAPCS.
M
Marc Zyngier 已提交
316
	 * Note that we slightly misuse the prototype by casting the
317 318
	 * stack pointer to a void *.

M
Marc Zyngier 已提交
319 320 321 322
	 * The PGDs are always passed as the third argument, in order
	 * to be passed into r2-r3 to the init code (yes, this is
	 * compliant with the PCS!).
	 */
323

324
	__kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
325 326
}

327 328
static inline void __cpu_init_stage2(void)
{
329
	kvm_call_hyp(__init_stage2_translation);
330 331
}

332
static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
333 334 335 336
{
	return 0;
}

337 338 339
int kvm_perf_init(void);
int kvm_perf_teardown(void);

340 341
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);

342 343
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

344
static inline bool kvm_arch_requires_vhe(void) { return false; }
345 346 347 348
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
349
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
350

351 352 353
static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
354
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
355 356 357 358 359 360 361

int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
			       struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
			       struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
			       struct kvm_device_attr *attr);
362

363 364 365 366 367 368 369
/*
 * VFP/NEON switching is all done by the hyp switch code, so no need to
 * coordinate with host context handling for this state:
 */
static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
370

371 372
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
373 374 375

static inline bool kvm_arm_harden_branch_predictor(void)
{
376 377 378 379 380 381 382 383 384 385 386
	switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
	case ARM_CPU_PART_BRAHMA_B15:
	case ARM_CPU_PART_CORTEX_A12:
	case ARM_CPU_PART_CORTEX_A15:
	case ARM_CPU_PART_CORTEX_A17:
		return true;
#endif
	default:
		return false;
	}
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400
#define KVM_SSBD_UNKNOWN		-1
#define KVM_SSBD_FORCE_DISABLE		0
#define KVM_SSBD_KERNEL		1
#define KVM_SSBD_FORCE_ENABLE		2
#define KVM_SSBD_MITIGATED		3

static inline int kvm_arm_have_ssbd(void)
{
	/* No way to detect it yet, pretend it is not there. */
	return KVM_SSBD_UNKNOWN;
}

401 402 403
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}

404 405 406 407
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);

408
static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
409
{
410 411 412 413
	/*
	 * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
	 * so any non-zero value used as type is illegal.
	 */
414 415 416 417 418
	if (type)
		return -EINVAL;
	return 0;
}

419
static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
420 421 422 423 424 425 426 427
{
	return -EINVAL;
}

static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
{
	return true;
}
428

429
#endif /* __ARM_KVM_HOST_H__ */