kvm_ppc.h 21.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2008
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#ifndef __POWERPC_KVM_PPC_H__
#define __POWERPC_KVM_PPC_H__

/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
 * dependencies. */

#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
31
#include <linux/bug.h>
32 33
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
34 35
#else
#include <asm/kvm_booke.h>
36
#endif
37 38 39
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/paca.h>
#endif
40 41 42 43 44

enum emulation_result {
	EMULATE_DONE,         /* no further processing */
	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
	EMULATE_FAIL,         /* can't emulate this instruction */
45
	EMULATE_AGAIN,        /* something went wrong. go again */
46
	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
47 48
};

49 50 51 52 53
enum instruction_type {
	INST_GENERIC,
	INST_SC,		/* system call */
};

54 55 56 57 58 59 60 61 62 63
enum xlate_instdata {
	XLATE_INST,		/* translate instruction address */
	XLATE_DATA		/* translate data address */
};

enum xlate_readwrite {
	XLATE_READ,		/* check for read permissions */
	XLATE_WRITE		/* check for write permissions */
};

64
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
65
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
A
Alexander Graf 已提交
66
extern void kvmppc_handler_highmem(void);
67 68 69 70

extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
                              unsigned int rt, unsigned int bytes,
71
			      int is_default_endian);
A
Alexander Graf 已提交
72 73
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
                               unsigned int rt, unsigned int bytes,
74
			       int is_default_endian);
75
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 77
			       u64 val, unsigned int bytes,
			       int is_default_endian);
78

79 80 81
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
				 enum instruction_type type, u32 *inst);

82 83 84 85
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
		     bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
		     bool data);
86 87
extern int kvmppc_emulate_instruction(struct kvm_run *run,
                                      struct kvm_vcpu *vcpu);
88
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
89
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
90
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
91
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
92
extern void kvmppc_decrementer_func(unsigned long data);
93
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
94 95
extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
96

97 98
/* Core-specific hooks */

99
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
100
                           unsigned int gtlb_idx);
101
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
102
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
103
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
A
Alexander Graf 已提交
104
extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
105 106
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
107 108
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
                              gva_t eaddr);
109 110
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
111 112 113
extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
			struct kvmppc_pte *pte);
114

115 116 117
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
                                                unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
118
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
119
extern int kvmppc_core_check_processor_compat(void);
120 121
extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
                                      struct kvm_translation *tr);
122 123 124 125

extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);

126
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
127
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
128
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
129
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
130
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
131 132
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                       struct kvm_interrupt *irq);
133
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
134 135 136 137 138 139 140 141
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
					ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
					   ulong dear_flags,
					   ulong esr_flags);
extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
					   ulong esr_flags);
142
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
143
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
144

145 146 147
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);

148
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
149
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
150
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
151

152 153
extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
154 155 156
extern void kvmppc_free_hpt(struct kvm *kvm);
extern long kvmppc_prepare_vrma(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem);
157
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
158
			struct kvm_memory_slot *memslot, unsigned long porder);
159
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
160

161 162
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
				struct kvm_create_spapr_tce *args);
163 164
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba, unsigned long tce);
165 166
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba);
167 168
extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri);
169 170
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
171 172
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
173 174
extern void kvmppc_core_free_memslot(struct kvm *kvm,
				     struct kvm_memory_slot *free,
175
				     struct kvm_memory_slot *dont);
176 177
extern int kvmppc_core_create_memslot(struct kvm *kvm,
				      struct kvm_memory_slot *slot,
178
				      unsigned long npages);
179
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
180
				struct kvm_memory_slot *memslot,
181 182
				struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
183
				struct kvm_userspace_memory_region *mem,
184
				const struct kvm_memory_slot *old);
185 186
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
				      struct kvm_ppc_smmu_info *info);
187 188
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
189

190 191 192
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);

193 194
extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);

195 196
extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);

197 198
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);

199 200 201
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
202 203 204 205
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
				u32 priority);
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
				u32 *priority);
206 207
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
208

209 210 211 212 213 214 215 216 217 218 219 220
union kvmppc_one_reg {
	u32	wval;
	u64	dval;
	vector128 vval;
	u64	vsxval[2];
	struct {
		u64	addr;
		u64	length;
	}	vpaval;
};

struct kvmppc_ops {
221
	struct module *owner;
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
			   union kvmppc_one_reg *val);
	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
			   union kvmppc_one_reg *val);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);
	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
	int (*check_requests)(struct kvm_vcpu *vcpu);
	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
	int (*prepare_memory_region)(struct kvm *kvm,
				     struct kvm_memory_slot *memslot,
				     struct kvm_userspace_memory_region *mem);
	void (*commit_memory_region)(struct kvm *kvm,
				     struct kvm_userspace_memory_region *mem,
				     const struct kvm_memory_slot *old);
	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
			   unsigned long end);
	int (*age_hva)(struct kvm *kvm, unsigned long hva);
	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
	void (*free_memslot)(struct kvm_memory_slot *free,
			     struct kvm_memory_slot *dont);
	int (*create_memslot)(struct kvm_memory_slot *slot,
			      unsigned long npages);
	int (*init_vm)(struct kvm *kvm);
	void (*destroy_vm)(struct kvm *kvm);
	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
			  unsigned int inst, int *advance);
	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
			      unsigned long arg);
264
	int (*hcall_implemented)(unsigned long hcall);
265 266
};

267 268
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
269

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
					enum instruction_type type, u32 *inst)
{
	int ret = EMULATE_DONE;
	u32 fetched_inst;

	/* Load the instruction manually if it failed to do so in the
	 * exit path */
	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);

	/*  Write fetch_failed unswapped if the fetch failed */
	if (ret == EMULATE_DONE)
		fetched_inst = kvmppc_need_byteswap(vcpu) ?
				swab32(vcpu->arch.last_inst) :
				vcpu->arch.last_inst;
	else
		fetched_inst = vcpu->arch.last_inst;

	*inst = fetched_inst;
	return ret;
}

293 294 295 296 297
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
{
	return kvm->arch.kvm_ops == kvmppc_hv_ops;
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
/*
 * Cuts out inst bits with ordering according to spec.
 * That means the leftmost bit is zero. All given bits are included.
 */
static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
{
	u32 r;
	u32 mask;

	BUG_ON(msb > lsb);

	mask = (1 << (lsb - msb + 1)) - 1;
	r = (inst >> (63 - lsb)) & mask;

	return r;
}

/*
 * Replaces inst bits with ordering according to spec.
 */
static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
{
	u32 r;
	u32 mask;

	BUG_ON(msb > lsb);

	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);

	return r;
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
#define one_reg_size(id)	\
	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))

#define get_reg_val(id, reg)	({		\
	union kvmppc_one_reg __u;		\
	switch (one_reg_size(id)) {		\
	case 4: __u.wval = (reg); break;	\
	case 8: __u.dval = (reg); break;	\
	default: BUG();				\
	}					\
	__u;					\
})


#define set_reg_val(id, val)	({		\
	u64 __v;				\
	switch (one_reg_size(id)) {		\
	case 4: __v = (val).wval; break;	\
	case 8: __v = (val).dval; break;	\
	default: BUG();				\
	}					\
	__v;					\
})

355
int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
S
Scott Wood 已提交
356 357
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);

358
int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
S
Scott Wood 已提交
359 360
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);

361 362
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
363 364
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
365

S
Scott Wood 已提交
366 367
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);

368 369
struct openpic;

370
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371
extern void kvm_cma_reserve(void) __init;
372 373 374 375
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
	paca[cpu].kvm_hstate.xics_phys = addr;
}
376

377 378
static inline u32 kvmppc_get_xics_latch(void)
{
379
	u32 xirr;
380

381
	xirr = get_paca()->kvm_hstate.saved_xirr;
382 383 384 385 386 387 388 389 390
	get_paca()->kvm_hstate.saved_xirr = 0;
	return xirr;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
{
	paca[cpu].kvm_hstate.host_ipi = host_ipi;
}

391 392
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
393
	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
394
}
395

396 397 398 399
extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);

400
#else
401 402 403
static inline void __init kvm_cma_reserve(void)
{}

404 405
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
406

407 408 409 410 411 412 413 414 415 416 417 418
static inline u32 kvmppc_get_xics_latch(void)
{
	return 0;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
{}

static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
	kvm_vcpu_kick(vcpu);
}
419 420 421

static inline bool kvm_hv_mode_active(void)		{ return false; }

422 423 424 425 426 427 428 429 430 431 432
#endif

#ifdef CONFIG_KVM_XICS
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
}
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
433 434
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
435 436
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
			struct kvm_vcpu *vcpu, u32 cpu);
437 438 439 440 441 442 443 444 445 446 447 448
#else
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
	{ return 0; }
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
					 unsigned long server)
	{ return -EINVAL; }
static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
					struct kvm_irq_level *args)
	{ return -ENOTTY; }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
	{ return 0; }
449 450
#endif

451 452 453 454 455 456 457 458 459 460 461
static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_KVM_BOOKE_HV
	return mfspr(SPRN_GEPR);
#elif defined(CONFIG_BOOKE)
	return vcpu->arch.epr;
#else
	return 0;
#endif
}

462 463 464 465 466 467 468 469 470
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
{
#ifdef CONFIG_KVM_BOOKE_HV
	mtspr(SPRN_GEPR, epr);
#elif defined(CONFIG_BOOKE)
	vcpu->arch.epr = epr;
#endif
}

471 472 473
#ifdef CONFIG_KVM_MPIC

void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
474 475 476
int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
			     u32 cpu);
void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
477 478 479 480 481 482 483

#else

static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
{
}

S
Scott Wood 已提交
484 485 486 487 488 489 490 491 492 493 494
static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
		struct kvm_vcpu *vcpu, u32 cpu)
{
	return -EINVAL;
}

static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
		struct kvm_vcpu *vcpu)
{
}

495 496
#endif /* CONFIG_KVM_MPIC */

S
Scott Wood 已提交
497 498 499 500 501
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
			      struct kvm_config_tlb *cfg);
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
			     struct kvm_dirty_tlb *cfg);

502 503 504 505 506
long kvmppc_alloc_lpid(void);
void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);

507 508 509
static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
{
	struct page *page;
510 511 512 513 514 515 516 517
	/*
	 * We can only access pages that the kernel maps
	 * as memory. Bail out for unmapped ones.
	 */
	if (!pfn_valid(pfn))
		return;

	/* Clear i-cache for new pages */
518 519 520 521 522 523 524
	page = pfn_to_page(pfn);
	if (!test_bit(PG_arch_1, &page->flags)) {
		flush_dcache_icache_page(page);
		set_bit(PG_arch_1, &page->flags);
	}
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
/*
 * Shared struct helpers. The shared struct can be little or big endian,
 * depending on the guest endianness. So expose helpers to all of them.
 */
static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
	/* Only Book3S_64 PR supports bi-endian for now */
	return vcpu->arch.shared_big_endian;
#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
	/* Book3s_64 HV on little endian is always little endian */
	return false;
#else
	return true;
#endif
}

542
#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
543 544
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
{									\
545
	return mfspr(bookehv_spr);					\
546 547
}									\

548
#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
549 550
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
{									\
551
	mtspr(bookehv_spr, val);						\
552 553
}									\

554
#define SHARED_WRAPPER_GET(reg, size)					\
555
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
{									\
	if (kvmppc_shared_big_endian(vcpu))				\
	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
	else								\
	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
}									\

#define SHARED_WRAPPER_SET(reg, size)					\
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
{									\
	if (kvmppc_shared_big_endian(vcpu))				\
	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
	else								\
	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
}									\

#define SHARED_WRAPPER(reg, size)					\
	SHARED_WRAPPER_GET(reg, size)					\
	SHARED_WRAPPER_SET(reg, size)					\

576 577 578
#define SPRNG_WRAPPER(reg, bookehv_spr)					\
	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
579 580 581

#ifdef CONFIG_KVM_BOOKE_HV

582 583
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
	SPRNG_WRAPPER(reg, bookehv_spr)					\
584 585 586

#else

587
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
588 589 590 591
	SHARED_WRAPPER(reg, size)					\

#endif

592
SHARED_WRAPPER(critical, 64)
593 594 595 596 597 598 599
SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
600
SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
SHARED_WRAPPER_GET(msr, 64)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
	if (kvmppc_shared_big_endian(vcpu))
	       vcpu->arch.shared->msr = cpu_to_be64(val);
	else
	       vcpu->arch.shared->msr = cpu_to_le64(val);
}
SHARED_WRAPPER(dsisr, 32)
SHARED_WRAPPER(int_pending, 32)
SHARED_WRAPPER(sprg4, 64)
SHARED_WRAPPER(sprg5, 64)
SHARED_WRAPPER(sprg6, 64)
SHARED_WRAPPER(sprg7, 64)

static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
	if (kvmppc_shared_big_endian(vcpu))
	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
	else
	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
}

static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
{
	if (kvmppc_shared_big_endian(vcpu))
	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
	else
	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
}

632 633 634 635 636 637
/*
 * Please call after prepare_to_enter. This function puts the lazy ee and irq
 * disabled tracking state back to normal mode, without actually enabling
 * interrupts.
 */
static inline void kvmppc_fix_ee_before_entry(void)
638
{
639 640
	trace_hardirqs_on();

641
#ifdef CONFIG_PPC64
S
Scott Wood 已提交
642 643 644 645 646 647
	/*
	 * To avoid races, the caller must have gone directly from having
	 * interrupts fully-enabled to hard-disabled.
	 */
	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);

648 649 650 651 652
	/* Only need to enable IRQs by hard enabling them after this */
	local_paca->irq_happened = 0;
	local_paca->soft_enabled = 1;
#endif
}
653

654 655 656
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
{
	ulong ea;
657
	ulong msr_64bit = 0;
658 659 660 661 662

	ea = kvmppc_get_gpr(vcpu, rb);
	if (ra)
		ea += kvmppc_get_gpr(vcpu, ra);

663 664 665 666 667 668
#if defined(CONFIG_PPC_BOOK3E_64)
	msr_64bit = MSR_CM;
#elif defined(CONFIG_PPC_BOOK3S_64)
	msr_64bit = MSR_SF;
#endif

669
	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
670 671
		ea = (uint32_t)ea;

672 673 674
	return ea;
}

675 676
extern void xics_wake_cpu(int cpu);

677
#endif /* __POWERPC_KVM_PPC_H__ */