kvm_host.h 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__

22 23
#include <linux/types.h>
#include <linux/kvm_types.h>
24 25
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
C
Christoffer Dall 已提交
26
#include <asm/kvm_mmio.h>
27
#include <asm/fpstate.h>
28
#include <kvm/arm_arch_timer.h>
29

30 31
#define __KVM_HAVE_ARCH_INTC_INITIALIZED

32
#define KVM_USER_MEM_SLOTS 32
33 34
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35
#define KVM_HAVE_ONE_REG
36
#define KVM_HALT_POLL_NS_DEFAULT 500000
37

38
#define KVM_VCPU_MAX_FEATURES 2
39

40
#include <kvm/arm_vgic.h>
41

42 43
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS

44 45
#define KVM_REQ_VCPU_EXIT	8

46
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
47
int __attribute_const__ kvm_target_cpu(void);
48 49 50 51 52 53 54
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);

struct kvm_arch {
	/* VTTBR value associated with below pgd and vmid */
	u64    vttbr;

55 56 57
	/* Timer */
	struct arch_timer_kvm	timer;

58 59 60 61 62 63 64 65 66 67 68
	/*
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */

	/* The VMID generation used for the virt. memory system */
	u64    vmid_gen;
	u32    vmid;

	/* Stage-2 page table */
	pgd_t *pgd;
69 70 71

	/* Interrupt controller */
	struct vgic_dist	vgic;
72
	int max_vcpus;
73 74 75 76 77 78 79 80 81 82 83 84 85
};

#define KVM_NR_MEM_OBJS     40

/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

86 87 88 89 90 91
struct kvm_vcpu_fault_info {
	u32 hsr;		/* Hyp Syndrome Register */
	u32 hxfar;		/* Hyp Data/Inst. Fault Address Register */
	u32 hpfar;		/* Hyp IPA Fault Address Register */
};

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * 0 is reserved as an invalid value.
 * Order should be kept in sync with the save/restore code.
 */
enum vcpu_sysreg {
	__INVALID_SYSREG__,
	c0_MPIDR,		/* MultiProcessor ID Register */
	c0_CSSELR,		/* Cache Size Selection Register */
	c1_SCTLR,		/* System Control Register */
	c1_ACTLR,		/* Auxiliary Control Register */
	c1_CPACR,		/* Coprocessor Access Control */
	c2_TTBR0,		/* Translation Table Base Register 0 */
	c2_TTBR0_high,		/* TTBR0 top 32 bits */
	c2_TTBR1,		/* Translation Table Base Register 1 */
	c2_TTBR1_high,		/* TTBR1 top 32 bits */
	c2_TTBCR,		/* Translation Table Base Control R. */
	c3_DACR,		/* Domain Access Control Register */
	c5_DFSR,		/* Data Fault Status Register */
	c5_IFSR,		/* Instruction Fault Status Register */
	c5_ADFSR,		/* Auxilary Data Fault Status R */
	c5_AIFSR,		/* Auxilary Instrunction Fault Status R */
	c6_DFAR,		/* Data Fault Address Register */
	c6_IFAR,		/* Instruction Fault Address Register */
	c7_PAR,			/* Physical Address Register */
	c7_PAR_high,		/* PAR top 32 bits */
	c9_L2CTLR,		/* Cortex A15/A7 L2 Control Register */
	c10_PRRR,		/* Primary Region Remap Register */
	c10_NMRR,		/* Normal Memory Remap Register */
	c12_VBAR,		/* Vector Base Address Register */
	c13_CID,		/* Context ID Register */
	c13_TID_URW,		/* Thread ID, User R/W */
	c13_TID_URO,		/* Thread ID, User R/O */
	c13_TID_PRIV,		/* Thread ID, Privileged */
	c14_CNTKCTL,		/* Timer Control Register (PL1) */
	c10_AMAIR0,		/* Auxilary Memory Attribute Indirection Reg0 */
	c10_AMAIR1,		/* Auxilary Memory Attribute Indirection Reg1 */
	NR_CP15_REGS		/* Number of regs (incl. invalid) */
};

131
struct kvm_cpu_context {
132
	struct kvm_regs	gp_regs;
133
	struct vfp_hard_struct vfp;
134
	u32 cp15[NR_CP15_REGS];
135 136 137
};

typedef struct kvm_cpu_context kvm_cpu_context_t;
138

139
struct kvm_vcpu_arch {
140 141
	struct kvm_cpu_context ctxt;

142 143 144 145 146 147
	int target; /* Processor target */
	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);

	/* The CPU type we expose to the VM */
	u32 midr;

148 149 150 151 152 153
	/* HYP trapping configuration */
	u32 hcr;

	/* Interrupt related fields */
	u32 irq_lines;		/* IRQ and FIQ levels */

154
	/* Exception Information */
155
	struct kvm_vcpu_fault_info fault;
156

157 158
	/* Host FP context */
	kvm_cpu_context_t *host_cpu_context;
159

160 161
	/* VGIC state */
	struct vgic_cpu vgic_cpu;
162
	struct arch_timer_cpu timer_cpu;
163

164 165 166 167
	/*
	 * Anything that is not used directly from assembly code goes
	 * here.
	 */
168

169 170
	/* vcpu power-off state */
	bool power_off;
171

172 173 174
	 /* Don't run the guest (internal implementation need) */
	bool pause;

C
Christoffer Dall 已提交
175 176 177
	/* IO related fields */
	struct kvm_decode mmio_decode;

178 179
	/* Cache some mmu pages needed inside spinlock regions */
	struct kvm_mmu_memory_cache mmu_page_cache;
180 181 182

	/* Detect first run of a vcpu */
	bool has_run_once;
183 184 185 186 187 188 189
};

struct kvm_vm_stat {
	u32 remote_tlb_flush;
};

struct kvm_vcpu_stat {
190
	u32 halt_successful_poll;
191
	u32 halt_attempted_poll;
192
	u32 halt_poll_invalid;
193
	u32 halt_wakeup;
194 195 196 197 198 199
	u32 hvc_exit_stat;
	u64 wfe_exit_stat;
	u64 wfi_exit_stat;
	u64 mmio_exit_user;
	u64 mmio_exit_kernel;
	u64 exits;
200 201
};

202 203
#define vcpu_cp15(v,r)	(v)->arch.ctxt.cp15[r]

204
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
205 206 207 208
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
209
unsigned long kvm_call_hyp(void *hypfn, ...);
210
void force_vm_exit(const cpumask_t *mask);
211 212 213 214 215 216 217

#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
			unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);

218 219
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
220 221
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
222

223
/* We do not have shadow page tables, hence the empty hooks */
224 225 226 227 228
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
							 unsigned long address)
{
}

229 230
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
231 232
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
233 234
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
235 236 237 238 239 240

int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);

241 242 243
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
		int exception_index);

M
Marc Zyngier 已提交
244
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
245 246 247 248
				       unsigned long hyp_stack_ptr,
				       unsigned long vector_ptr)
{
	/*
249 250 251 252 253 254 255 256 257 258 259 260 261
	 * Call initialization code, and switch to the full blown HYP
	 * code. The init code doesn't need to preserve these
	 * registers as r0-r3 are already callee saved according to
	 * the AAPCS.
	 * Note that we slightly misuse the prototype by casing the
	 * stack pointer to a void *.
	 *
	 * We don't have enough registers to perform the full init in
	 * one go.  Install the boot PGD first, and then install the
	 * runtime PGD, stack pointer and vectors. The PGDs are always
	 * passed as the third argument, in order to be passed into
	 * r2-r3 to the init code (yes, this is compliant with the
	 * PCS!).
262
	 */
263 264 265 266

	kvm_call_hyp(NULL, 0, boot_pgd_ptr);

	kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
267 268
}

269 270
static inline void __cpu_init_stage2(void)
{
271
	kvm_call_hyp(__init_stage2_translation);
272 273
}

M
Marc Zyngier 已提交
274
static inline void __cpu_reset_hyp_mode(phys_addr_t phys_idmap_start)
275 276 277
{
	/*
	 * TODO
M
Marc Zyngier 已提交
278
	 * kvm_call_reset(phys_idmap_start);
279 280 281
	 */
}

282 283 284 285 286
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
	return 0;
}

287 288 289
int kvm_perf_init(void);
int kvm_perf_teardown(void);

290 291
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);

292 293
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

294 295 296 297
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
298
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
299

300 301 302
static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
303
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
					     struct kvm_device_attr *attr)
{
	return -ENXIO;
}
static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
					     struct kvm_device_attr *attr)
{
	return -ENXIO;
}
static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
					     struct kvm_device_attr *attr)
{
	return -ENXIO;
}
319

320
#endif /* __ARM_KVM_HOST_H__ */