kvm_book3s.h 14.8 KB
Newer Older
A
Alexander Graf 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#ifndef __ASM_KVM_BOOK3S_H__
#define __ASM_KVM_BOOK3S_H__

#include <linux/types.h>
#include <linux/kvm_host.h>
25
#include <asm/kvm_book3s_asm.h>
A
Alexander Graf 已提交
26 27

struct kvmppc_bat {
28
	u64 raw;
A
Alexander Graf 已提交
29 30 31 32 33
	u32 bepi;
	u32 bepi_mask;
	u32 brpn;
	u8 wimg;
	u8 pp;
A
Alexander Graf 已提交
34 35
	bool vs		: 1;
	bool vp		: 1;
A
Alexander Graf 已提交
36 37 38 39 40 41
};

struct kvmppc_sid_map {
	u64 guest_vsid;
	u64 guest_esid;
	u64 host_vsid;
A
Alexander Graf 已提交
42
	bool valid	: 1;
A
Alexander Graf 已提交
43 44 45 46 47 48
};

#define SID_MAP_BITS    9
#define SID_MAP_NUM     (1 << SID_MAP_BITS)
#define SID_MAP_MASK    (SID_MAP_NUM - 1)

49 50 51 52 53 54 55
#ifdef CONFIG_PPC_BOOK3S_64
#define SID_CONTEXTS	1
#else
#define SID_CONTEXTS	128
#define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
#endif

56 57 58 59 60
struct hpte_cache {
	struct hlist_node list_pte;
	struct hlist_node list_pte_long;
	struct hlist_node list_vpte;
	struct hlist_node list_vpte_long;
61 62 63
#ifdef CONFIG_PPC_BOOK3S_64
	struct hlist_node list_vpte_64k;
#endif
64
	struct rcu_head rcu_head;
65
	u64 host_vpn;
66 67 68
	u64 pfn;
	ulong slot;
	struct kvmppc_pte pte;
69
	int pagesize;
70 71
};

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/*
 * Struct for a virtual core.
 * Note: entry_exit_map combines a bitmap of threads that have entered
 * in the bottom 8 bits and a bitmap of threads that have exited in the
 * next 8 bits.  This is so that we can atomically set the entry bit
 * iff the exit map is 0 without taking a lock.
 */
struct kvmppc_vcore {
	int n_runnable;
	int num_threads;
	int entry_exit_map;
	int napping_threads;
	int first_vcpuid;
	u16 pcpu;
	u16 last_cpu;
	u8 vcore_state;
	u8 in_guest;
89
	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
90 91 92 93 94 95 96 97 98
	struct list_head preempt_list;
	spinlock_t lock;
	struct swait_queue_head wq;
	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
	u64 stolen_tb;
	u64 preempt_tb;
	struct kvm_vcpu *runner;
	struct kvm *kvm;
	u64 tb_offset;		/* guest timebase - host timebase */
99
	u64 tb_offset_applied;	/* timebase offset currently in force */
100 101 102 103
	ulong lpcr;
	u32 arch_compat;
	ulong pcr;
	ulong dpdes;		/* doorbell state (POWER8) */
104
	ulong vtb;		/* virtual timebase */
105
	ulong conferring_threads;
106
	unsigned int halt_poll_ns;
107
	atomic_t online_count;
108 109
};

A
Alexander Graf 已提交
110 111 112 113 114 115 116 117 118 119
struct kvmppc_vcpu_book3s {
	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
	struct {
		u64 esid;
		u64 vsid;
	} slb_shadow[64];
	u8 slb_shadow_max;
	struct kvmppc_bat ibat[8];
	struct kvmppc_bat dbat[8];
	u64 hid[6];
A
Alexander Graf 已提交
120
	u64 gqr[8];
A
Alexander Graf 已提交
121 122 123
	u64 sdr1;
	u64 hior;
	u64 msr_mask;
124
	u64 vtb;
125 126
#ifdef CONFIG_PPC_BOOK3S_32
	u32 vsid_pool[VSID_POOL_SIZE];
127
	u32 vsid_next;
128
#else
129 130 131
	u64 proto_vsid_first;
	u64 proto_vsid_max;
	u64 proto_vsid_next;
132 133
#endif
	int context_id[SID_CONTEXTS];
134

135 136
	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */

137 138 139 140
	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
141 142 143
#ifdef CONFIG_PPC_BOOK3S_64
	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
#endif
144 145
	int hpte_cache_count;
	spinlock_t mmu_lock;
A
Alexander Graf 已提交
146 147
};

148 149 150
#define VSID_REAL	0x07ffffffffc00000ULL
#define VSID_BAT	0x07ffffffffb00000ULL
#define VSID_64K	0x0800000000000000ULL
151
#define VSID_1T		0x1000000000000000ULL
A
Alexander Graf 已提交
152 153
#define VSID_REAL_DR	0x2000000000000000ULL
#define VSID_REAL_IR	0x4000000000000000ULL
154
#define VSID_PR		0x8000000000000000ULL
A
Alexander Graf 已提交
155

A
Alexander Graf 已提交
156
extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
A
Alexander Graf 已提交
157
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
A
Alexander Graf 已提交
158
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
A
Alexander Graf 已提交
159 160 161
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
162
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
163 164 165
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
			       bool iswrite);
extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
A
Alexander Graf 已提交
166
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
167
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
A
Alexander Graf 已提交
168
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
169 170 171 172 173
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
			struct kvm_vcpu *vcpu, unsigned long addr,
			unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
			unsigned long slb_v, unsigned long valid);
174 175
extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
			unsigned long gpa, gva_t ea, int is_store);
176 177 178

extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
179
extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
180 181 182 183 184
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
185
extern int kvmppc_mmu_hv_init(void);
186
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
187

188 189 190
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
			struct kvm_vcpu *vcpu,
			unsigned long ea, unsigned long dsisr);
191 192
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
			struct kvmppc_pte *gpte, bool data, bool iswrite);
193
extern int kvmppc_init_vm_radix(struct kvm *kvm);
194 195 196
extern void kvmppc_free_radix(struct kvm *kvm);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
197 198 199 200 201 202
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			unsigned long gfn);
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			unsigned long gfn);
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			unsigned long gfn);
203 204
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
			struct kvm_memory_slot *memslot, unsigned long *map);
205
extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
206

207
/* XXX remove this export when load_last_inst() is generic */
208
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
A
Alexander Graf 已提交
209
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
210 211
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
					  unsigned int vec);
212
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
213
extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
214 215
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
			   bool upper, u32 val);
216
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
217
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
D
Dan Williams 已提交
218 219
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
			bool writing, bool *writable);
220 221
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
			unsigned long *rmap, long pte_index, int realmode);
222 223
extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
			unsigned long gfn, unsigned long psize);
224
extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
225
			unsigned long pte_index);
226
void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
227
			unsigned long pte_index);
228 229
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
			unsigned long *nb_ret);
230 231
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
			unsigned long gpa, bool dirty);
232 233 234
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel,
			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
235 236 237
extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
			unsigned long pte_index, unsigned long avpn,
			unsigned long *hpret);
238
extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
239
			struct kvm_memory_slot *memslot, unsigned long *map);
240 241 242
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
			struct kvm_memory_slot *memslot,
			unsigned long *map);
243 244
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
			unsigned long mask);
245
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
A
Alexander Graf 已提交
246

247 248 249 250
extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);

251
extern void kvmppc_entry_trampoline(void);
252
extern void kvmppc_hv_entry_trampoline(void);
253 254
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
255
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
256
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
257 258
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
259 260
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
261 262 263 264

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
265
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
266
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
267 268 269
#else
static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
270
static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
271
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
272 273
#endif

274 275
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);

276
extern int kvm_irq_bypass;
A
Alexander Graf 已提交
277 278 279

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
280
	return vcpu->arch.book3s;
A
Alexander Graf 已提交
281 282
}

283 284 285 286 287 288 289 290 291
/* Also add subarch specific defines */

#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
#include <asm/kvm_book3s_32.h>
#endif
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_64.h>
#endif

292 293
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
294
	vcpu->arch.regs.gpr[num] = val;
295 296 297 298
}

static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
299
	return vcpu->arch.regs.gpr[num];
300 301 302 303
}

static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
304
	vcpu->arch.regs.ccr = val;
305 306 307 308
}

static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
309
	return vcpu->arch.regs.ccr;
310 311
}

312
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
313
{
314
	vcpu->arch.regs.xer = val;
315 316
}

317
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
318
{
319
	return vcpu->arch.regs.xer;
320 321 322 323
}

static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
324
	vcpu->arch.regs.ctr = val;
325 326 327 328
}

static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
329
	return vcpu->arch.regs.ctr;
330 331 332 333
}

static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
334
	vcpu->arch.regs.link = val;
335 336 337 338
}

static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
339
	return vcpu->arch.regs.link;
340 341 342 343
}

static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
344
	vcpu->arch.regs.nip = val;
345 346 347 348
}

static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
349
	return vcpu->arch.regs.nip;
350 351
}

352
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
353
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
354
{
355
	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
356
}
357 358 359

static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
360 361 362
	return vcpu->arch.fault_dar;
}

363 364 365 366 367
static inline bool is_kvmppc_resume_guest(int r)
{
	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
}

368 369 370 371 372 373 374
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
{
	/* Only PR KVM supports the magic page */
	return !is_kvmppc_hv_enabled(vcpu->kvm);
}

375 376 377
extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);

378 379 380 381 382
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
 * instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3			0x113724FA
#define OSI_SC_MAGIC_R4			0x77810F9B

A
Alexander Graf 已提交
383
#define INS_DCBZ			0x7c0007ec
384 385
/* TO = 31 for unconditional trap */
#define INS_TW				0x7fe00008
A
Alexander Graf 已提交
386

387 388 389
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS			(LPID_RSVD + 1)

390 391 392
#define SPLIT_HACK_MASK			0xff000000
#define SPLIT_HACK_OFFS			0xfb000000

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
/*
 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
 * (but not its actual threading mode, which is not available) to avoid
 * collisions.
 *
 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
 * 0) unchanged: if the guest is filling each VCORE completely then it will be
 * using consecutive IDs and it will fill the space without any packing.
 *
 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
 * added to avoid collisions.
 *
 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
 * can be safely packed into the second half of each VCORE by adding an offset
 * of (stride / 2).
 *
 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
 *
 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
 * must be free to use.
 *
 * (The offsets for each block are stored in block_offsets[], indexed by the
 * block number if the stride is 8. For cases where the guest's stride is less
 * than 8, we can re-use the block_offsets array by multiplying the block
 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
 */
static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
{
	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
	int stride = kvm->arch.emul_smt_mode;
	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
	u32 packed_id;

	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
		return 0;
	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
		return 0;
	return packed_id;
}

A
Alexander Graf 已提交
440
#endif /* __ASM_KVM_BOOK3S_H__ */