kvm_ppc.h 26.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2008
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#ifndef __POWERPC_KVM_PPC_H__
#define __POWERPC_KVM_PPC_H__

/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
 * dependencies. */

#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
31
#include <linux/bug.h>
32 33
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
34 35
#else
#include <asm/kvm_booke.h>
36
#endif
37 38 39
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/paca.h>
#endif
40

41 42 43 44 45 46
/*
 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
 * for supporting software breakpoint.
 */
#define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00

47 48 49 50
enum emulation_result {
	EMULATE_DONE,         /* no further processing */
	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
	EMULATE_FAIL,         /* can't emulate this instruction */
51
	EMULATE_AGAIN,        /* something went wrong. go again */
52
	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 54
};

55 56 57 58 59
enum instruction_type {
	INST_GENERIC,
	INST_SC,		/* system call */
};

60 61 62 63 64 65 66 67 68 69
enum xlate_instdata {
	XLATE_INST,		/* translate instruction address */
	XLATE_DATA		/* translate data address */
};

enum xlate_readwrite {
	XLATE_READ,		/* check for read permissions */
	XLATE_WRITE		/* check for write permissions */
};

70
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
A
Alexander Graf 已提交
72
extern void kvmppc_handler_highmem(void);
73 74 75 76

extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
                              unsigned int rt, unsigned int bytes,
77
			      int is_default_endian);
A
Alexander Graf 已提交
78 79
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
                               unsigned int rt, unsigned int bytes,
80
			       int is_default_endian);
81
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 83
			       u64 val, unsigned int bytes,
			       int is_default_endian);
84

85 86 87
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
				 enum instruction_type type, u32 *inst);

88 89 90 91
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
		     bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
		     bool data);
92 93
extern int kvmppc_emulate_instruction(struct kvm_run *run,
                                      struct kvm_vcpu *vcpu);
94
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
97
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 101
extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102

103 104
/* Core-specific hooks */

105
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106
                           unsigned int gtlb_idx);
107
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
A
Alexander Graf 已提交
110
extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111 112
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 114
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
                              gva_t eaddr);
115 116
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117 118 119
extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
			struct kvmppc_pte *pte);
120

121 122 123
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
                                                unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125
extern int kvmppc_core_check_processor_compat(void);
126 127
extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
                                      struct kvm_translation *tr);
128 129 130 131

extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);

132
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 138
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                       struct kvm_interrupt *irq);
139
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 141 142 143 144 145 146 147
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
					ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
					   ulong dear_flags,
					   ulong esr_flags);
extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
					   ulong esr_flags);
148
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150

151 152 153
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);

154
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
156
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
157

158 159
extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
160 161 162
extern void kvmppc_free_hpt(struct kvm *kvm);
extern long kvmppc_prepare_vrma(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem);
163
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
164
			struct kvm_memory_slot *memslot, unsigned long porder);
165
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
166

167
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
168
				struct kvm_create_spapr_tce_64 *args);
169 170
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
		struct kvm_vcpu *vcpu, unsigned long liobn);
171 172 173 174
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
		unsigned long ioba, unsigned long npages);
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
		unsigned long tce);
175 176 177 178
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
		unsigned long *ua, unsigned long **prmap);
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
		unsigned long idx, unsigned long tce);
179 180
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba, unsigned long tce);
181 182 183 184 185 186
extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		unsigned long liobn, unsigned long ioba,
		unsigned long tce_list, unsigned long npages);
extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
		unsigned long liobn, unsigned long ioba,
		unsigned long tce_value, unsigned long npages);
187 188
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			     unsigned long ioba);
189 190
extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
191 192
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
193 194
extern void kvmppc_core_free_memslot(struct kvm *kvm,
				     struct kvm_memory_slot *free,
195
				     struct kvm_memory_slot *dont);
196 197
extern int kvmppc_core_create_memslot(struct kvm *kvm,
				      struct kvm_memory_slot *slot,
198
				      unsigned long npages);
199
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
200
				struct kvm_memory_slot *memslot,
201
				const struct kvm_userspace_memory_region *mem);
202
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
203
				const struct kvm_userspace_memory_region *mem,
204 205
				const struct kvm_memory_slot *old,
				const struct kvm_memory_slot *new);
206 207
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
				      struct kvm_ppc_smmu_info *info);
208 209
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
210

211 212 213
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);

214 215
extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);

216 217
extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);

218 219
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);

220 221 222
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
223 224 225 226
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
				u32 priority);
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
				u32 *priority);
227 228
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
229

230 231 232
void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);

233 234 235 236 237 238 239 240 241 242 243 244
union kvmppc_one_reg {
	u32	wval;
	u64	dval;
	vector128 vval;
	u64	vsxval[2];
	struct {
		u64	addr;
		u64	length;
	}	vpaval;
};

struct kvmppc_ops {
245
	struct module *owner;
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
			   union kvmppc_one_reg *val);
	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
			   union kvmppc_one_reg *val);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);
	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
	int (*check_requests)(struct kvm_vcpu *vcpu);
	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
	int (*prepare_memory_region)(struct kvm *kvm,
				     struct kvm_memory_slot *memslot,
263
				     const struct kvm_userspace_memory_region *mem);
264
	void (*commit_memory_region)(struct kvm *kvm,
265
				     const struct kvm_userspace_memory_region *mem,
266 267
				     const struct kvm_memory_slot *old,
				     const struct kvm_memory_slot *new);
268 269 270
	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
			   unsigned long end);
A
Andres Lagar-Cavilla 已提交
271
	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
	void (*free_memslot)(struct kvm_memory_slot *free,
			     struct kvm_memory_slot *dont);
	int (*create_memslot)(struct kvm_memory_slot *slot,
			      unsigned long npages);
	int (*init_vm)(struct kvm *kvm);
	void (*destroy_vm)(struct kvm *kvm);
	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
			  unsigned int inst, int *advance);
	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
			      unsigned long arg);
289
	int (*hcall_implemented)(unsigned long hcall);
290 291 292 293
	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
				       struct irq_bypass_producer *);
	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
					struct irq_bypass_producer *);
294 295
	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
296 297
};

298 299
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
300

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
					enum instruction_type type, u32 *inst)
{
	int ret = EMULATE_DONE;
	u32 fetched_inst;

	/* Load the instruction manually if it failed to do so in the
	 * exit path */
	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);

	/*  Write fetch_failed unswapped if the fetch failed */
	if (ret == EMULATE_DONE)
		fetched_inst = kvmppc_need_byteswap(vcpu) ?
				swab32(vcpu->arch.last_inst) :
				vcpu->arch.last_inst;
	else
		fetched_inst = vcpu->arch.last_inst;

	*inst = fetched_inst;
	return ret;
}

324 325 326 327 328
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
{
	return kvm->arch.kvm_ops == kvmppc_hv_ops;
}

329 330
extern int kvmppc_hwrng_present(void);

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
/*
 * Cuts out inst bits with ordering according to spec.
 * That means the leftmost bit is zero. All given bits are included.
 */
static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
{
	u32 r;
	u32 mask;

	BUG_ON(msb > lsb);

	mask = (1 << (lsb - msb + 1)) - 1;
	r = (inst >> (63 - lsb)) & mask;

	return r;
}

/*
 * Replaces inst bits with ordering according to spec.
 */
static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
{
	u32 r;
	u32 mask;

	BUG_ON(msb > lsb);

	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);

	return r;
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
#define one_reg_size(id)	\
	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))

#define get_reg_val(id, reg)	({		\
	union kvmppc_one_reg __u;		\
	switch (one_reg_size(id)) {		\
	case 4: __u.wval = (reg); break;	\
	case 8: __u.dval = (reg); break;	\
	default: BUG();				\
	}					\
	__u;					\
})


#define set_reg_val(id, val)	({		\
	u64 __v;				\
	switch (one_reg_size(id)) {		\
	case 4: __v = (val).wval; break;	\
	case 8: __v = (val).dval; break;	\
	default: BUG();				\
	}					\
	__v;					\
})

388
int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
S
Scott Wood 已提交
389 390
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);

391
int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
S
Scott Wood 已提交
392 393
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);

394 395
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
396 397
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
398

S
Scott Wood 已提交
399 400
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);

401 402
struct openpic;

403
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
404
extern void kvm_cma_reserve(void) __init;
405 406 407 408
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
	paca[cpu].kvm_hstate.xics_phys = addr;
}
409

410 411
static inline u32 kvmppc_get_xics_latch(void)
{
412
	u32 xirr;
413

414
	xirr = get_paca()->kvm_hstate.saved_xirr;
415 416 417 418 419 420 421 422 423
	get_paca()->kvm_hstate.saved_xirr = 0;
	return xirr;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
{
	paca[cpu].kvm_hstate.host_ipi = host_ipi;
}

424 425
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
426
	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
427
}
428

429 430 431 432
extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);

433
#else
434 435 436
static inline void __init kvm_cma_reserve(void)
{}

437 438
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
439

440 441 442 443 444 445 446 447 448 449 450 451
static inline u32 kvmppc_get_xics_latch(void)
{
	return 0;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
{}

static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
	kvm_vcpu_kick(vcpu);
}
452 453 454

static inline bool kvm_hv_mode_active(void)		{ return false; }

455 456 457 458 459 460 461
#endif

#ifdef CONFIG_KVM_XICS
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
}
462 463 464 465

static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
				struct kvm *kvm)
{
466
	if (kvm && kvm_irq_bypass)
467 468 469 470
		return kvm->arch.pimap;
	return NULL;
}

471 472
extern void kvmppc_alloc_host_rm_ops(void);
extern void kvmppc_free_host_rm_ops(void);
473
extern void kvmppc_free_pimap(struct kvm *kvm);
474
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
475 476 477 478
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
479 480
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
481 482
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
			struct kvm_vcpu *vcpu, u32 cpu);
483
extern void kvmppc_xics_ipi_action(void);
484 485 486 487
extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
				   unsigned long host_irq);
extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
				   unsigned long host_irq);
488 489 490 491
extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
					struct kvmppc_irq_map *irq_map,
					struct kvmppc_passthru_irqmap *pimap,
					bool *again);
492
extern int h_ipi_redirect;
493
#else
494 495 496
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
				struct kvm *kvm)
	{ return NULL; }
497 498
static inline void kvmppc_alloc_host_rm_ops(void) {};
static inline void kvmppc_free_host_rm_ops(void) {};
499
static inline void kvmppc_free_pimap(struct kvm *kvm) {};
500 501
static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
	{ return 0; }
502 503 504 505 506 507 508 509 510 511 512
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
	{ return 0; }
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
					 unsigned long server)
	{ return -EINVAL; }
static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
					struct kvm_irq_level *args)
	{ return -ENOTTY; }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
	{ return 0; }
513 514
#endif

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
/*
 * Prototypes for functions called only from assembler code.
 * Having prototypes reduces sparse errors.
 */
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			 unsigned long ioba, unsigned long tce);
long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
				  unsigned long liobn, unsigned long ioba,
				  unsigned long tce_list, unsigned long npages);
long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
			   unsigned long liobn, unsigned long ioba,
			   unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
                            unsigned int yield_count);
long kvmppc_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap);
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
                    long pte_index, unsigned long pteh, unsigned long ptel);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
                     unsigned long pte_index, unsigned long avpn);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
                      unsigned long pte_index, unsigned long avpn,
                      unsigned long va);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
                   unsigned long pte_index);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
                        unsigned long pte_index);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
                        unsigned long pte_index);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                          unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
                    unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
/*
 * Host-side operations we want to set up while running in real
 * mode in the guest operating on the xics.
 * Currently only VCPU wakeup is supported.
 */

union kvmppc_rm_state {
	unsigned long raw;
	struct {
		u32 in_host;
		u32 rm_action;
	};
};

struct kvmppc_host_rm_core {
	union kvmppc_rm_state rm_state;
	void *rm_data;
	char pad[112];
};

struct kvmppc_host_rm_ops {
	struct kvmppc_host_rm_core	*rm_core;
	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
};

extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;

584 585 586 587 588 589 590 591 592 593 594
static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_KVM_BOOKE_HV
	return mfspr(SPRN_GEPR);
#elif defined(CONFIG_BOOKE)
	return vcpu->arch.epr;
#else
	return 0;
#endif
}

595 596 597 598 599 600 601 602 603
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
{
#ifdef CONFIG_KVM_BOOKE_HV
	mtspr(SPRN_GEPR, epr);
#elif defined(CONFIG_BOOKE)
	vcpu->arch.epr = epr;
#endif
}

604 605 606
#ifdef CONFIG_KVM_MPIC

void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
S
Scott Wood 已提交
607 608 609
int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
			     u32 cpu);
void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
610 611 612 613 614 615 616

#else

static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
{
}

S
Scott Wood 已提交
617 618 619 620 621 622 623 624 625 626 627
static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
		struct kvm_vcpu *vcpu, u32 cpu)
{
	return -EINVAL;
}

static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
		struct kvm_vcpu *vcpu)
{
}

628 629
#endif /* CONFIG_KVM_MPIC */

S
Scott Wood 已提交
630 631 632 633 634
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
			      struct kvm_config_tlb *cfg);
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
			     struct kvm_dirty_tlb *cfg);

635 636 637 638 639
long kvmppc_alloc_lpid(void);
void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);

D
Dan Williams 已提交
640
static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
641 642
{
	struct page *page;
643 644 645 646 647 648 649 650
	/*
	 * We can only access pages that the kernel maps
	 * as memory. Bail out for unmapped ones.
	 */
	if (!pfn_valid(pfn))
		return;

	/* Clear i-cache for new pages */
651 652 653 654 655 656 657
	page = pfn_to_page(pfn);
	if (!test_bit(PG_arch_1, &page->flags)) {
		flush_dcache_icache_page(page);
		set_bit(PG_arch_1, &page->flags);
	}
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
/*
 * Shared struct helpers. The shared struct can be little or big endian,
 * depending on the guest endianness. So expose helpers to all of them.
 */
static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
	/* Only Book3S_64 PR supports bi-endian for now */
	return vcpu->arch.shared_big_endian;
#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
	/* Book3s_64 HV on little endian is always little endian */
	return false;
#else
	return true;
#endif
}

675
#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
676 677
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
{									\
678
	return mfspr(bookehv_spr);					\
679 680
}									\

681
#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
682 683
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
{									\
684
	mtspr(bookehv_spr, val);						\
685 686
}									\

687
#define SHARED_WRAPPER_GET(reg, size)					\
688
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
{									\
	if (kvmppc_shared_big_endian(vcpu))				\
	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
	else								\
	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
}									\

#define SHARED_WRAPPER_SET(reg, size)					\
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
{									\
	if (kvmppc_shared_big_endian(vcpu))				\
	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
	else								\
	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
}									\

#define SHARED_WRAPPER(reg, size)					\
	SHARED_WRAPPER_GET(reg, size)					\
	SHARED_WRAPPER_SET(reg, size)					\

709 710 711
#define SPRNG_WRAPPER(reg, bookehv_spr)					\
	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
712 713 714

#ifdef CONFIG_KVM_BOOKE_HV

715 716
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
	SPRNG_WRAPPER(reg, bookehv_spr)					\
717 718 719

#else

720
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
721 722 723 724
	SHARED_WRAPPER(reg, size)					\

#endif

725
SHARED_WRAPPER(critical, 64)
726 727 728 729 730 731 732
SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
733
SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
SHARED_WRAPPER_GET(msr, 64)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
	if (kvmppc_shared_big_endian(vcpu))
	       vcpu->arch.shared->msr = cpu_to_be64(val);
	else
	       vcpu->arch.shared->msr = cpu_to_le64(val);
}
SHARED_WRAPPER(dsisr, 32)
SHARED_WRAPPER(int_pending, 32)
SHARED_WRAPPER(sprg4, 64)
SHARED_WRAPPER(sprg5, 64)
SHARED_WRAPPER(sprg6, 64)
SHARED_WRAPPER(sprg7, 64)

static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
	if (kvmppc_shared_big_endian(vcpu))
	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
	else
	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
}

static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
{
	if (kvmppc_shared_big_endian(vcpu))
	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
	else
	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
}

765 766 767 768 769 770
/*
 * Please call after prepare_to_enter. This function puts the lazy ee and irq
 * disabled tracking state back to normal mode, without actually enabling
 * interrupts.
 */
static inline void kvmppc_fix_ee_before_entry(void)
771
{
772 773
	trace_hardirqs_on();

774
#ifdef CONFIG_PPC64
S
Scott Wood 已提交
775 776 777 778 779 780
	/*
	 * To avoid races, the caller must have gone directly from having
	 * interrupts fully-enabled to hard-disabled.
	 */
	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);

781 782 783 784 785
	/* Only need to enable IRQs by hard enabling them after this */
	local_paca->irq_happened = 0;
	local_paca->soft_enabled = 1;
#endif
}
786

787 788 789
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
{
	ulong ea;
790
	ulong msr_64bit = 0;
791 792 793 794 795

	ea = kvmppc_get_gpr(vcpu, rb);
	if (ra)
		ea += kvmppc_get_gpr(vcpu, ra);

796 797 798 799 800 801
#if defined(CONFIG_PPC_BOOK3E_64)
	msr_64bit = MSR_CM;
#elif defined(CONFIG_PPC_BOOK3S_64)
	msr_64bit = MSR_SF;
#endif

802
	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
803 804
		ea = (uint32_t)ea;

805 806 807
	return ea;
}

808 809
extern void xics_wake_cpu(int cpu);

810
#endif /* __POWERPC_KVM_PPC_H__ */