kvm_book3s_64.h 12.7 KB
Newer Older
A
Alexander Graf 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright SUSE Linux Products GmbH 2010
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__

23 24
#include <linux/string.h>
#include <asm/bitops.h>
25 26
#include <asm/book3s/64/mmu-hash.h>

27 28 29 30
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
#define PPC_MIN_HPT_ORDER	18
#define PPC_MAX_HPT_ORDER	46

31
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
32
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
A
Alexander Graf 已提交
33
{
34
	preempt_disable();
A
Alexander Graf 已提交
35 36
	return &get_paca()->shadow_vcpu;
}
37 38 39 40 41

static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
{
	preempt_enable();
}
42
#endif
A
Alexander Graf 已提交
43

44
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
45 46 47 48 49 50

static inline bool kvm_is_radix(struct kvm *kvm)
{
	return kvm->arch.radix;
}

51
#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
52 53
#endif

54 55 56 57 58 59
/*
 * We use a lock bit in HPTE dword 0 to synchronize updates and
 * accesses to each HPTE, and another bit to indicate non-present
 * HPTEs.
 */
#define HPTE_V_HVLOCK	0x40UL
60
#define HPTE_V_ABSENT	0x20UL
61

62 63 64 65 66 67 68 69 70
/*
 * We use this bit in the guest_rpte field of the revmap entry
 * to indicate a modified HPTE.
 */
#define HPTE_GR_MODIFIED	(1ul << 62)

/* These bits are reserved in the guest view of the HPTE */
#define HPTE_GR_RESERVED	HPTE_GR_MODIFIED

71
static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
72 73
{
	unsigned long tmp, old;
74 75 76 77 78 79 80 81 82
	__be64 be_lockbit, be_bits;

	/*
	 * We load/store in native endian, but the HTAB is in big endian. If
	 * we byte swap all data we apply on the PTE we're implicitly correct
	 * again.
	 */
	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
	be_bits = cpu_to_be64(bits);
83 84 85 86

	asm volatile("	ldarx	%0,0,%2\n"
		     "	and.	%1,%0,%3\n"
		     "	bne	2f\n"
87
		     "	or	%0,%0,%4\n"
88 89
		     "  stdcx.	%0,0,%2\n"
		     "	beq+	2f\n"
90
		     "	mr	%1,%3\n"
91 92
		     "2:	isync"
		     : "=&r" (tmp), "=&r" (old)
93
		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
94 95 96 97
		     : "cc", "memory");
	return old == 0;
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
	hpte_v &= ~HPTE_V_HVLOCK;
	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
	hpte[0] = cpu_to_be64(hpte_v);
}

/* Without barrier */
static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
	hpte_v &= ~HPTE_V_HVLOCK;
	hpte[0] = cpu_to_be64(hpte_v);
}

112 113 114 115 116 117 118 119 120 121 122 123 124
/*
 * These functions encode knowledge of the POWER7/8/9 hardware
 * interpretations of the HPTE LP (large page size) field.
 */
static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
{
	unsigned int lphi;

	if (!(h & HPTE_V_LARGE))
		return 12;	/* 4kB */
	lphi = (l >> 16) & 0xf;
	switch ((l >> 12) & 0xf) {
	case 0:
125
		return !lphi ? 24 : 0;		/* 16MB */
126 127 128 129 130
		break;
	case 1:
		return 16;			/* 64kB */
		break;
	case 3:
131
		return !lphi ? 34 : 0;		/* 16GB */
132 133 134 135 136 137 138 139 140 141 142
		break;
	case 7:
		return (16 << 8) + 12;		/* 64kB in 4kB */
		break;
	case 8:
		if (!lphi)
			return (24 << 8) + 16;	/* 16MB in 64kkB */
		if (lphi == 3)
			return (24 << 8) + 12;	/* 16MB in 4kB */
		break;
	}
143
	return 0;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
}

static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
{
	return kvmppc_hpte_page_shifts(h, l) & 0xff;
}

static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
{
	int tmp = kvmppc_hpte_page_shifts(h, l);

	if (tmp >= 0x100)
		tmp >>= 8;
	return tmp;
}

static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
{
162 163 164 165 166
	int shift = kvmppc_hpte_actual_page_shift(v, r);

	if (shift)
		return 1ul << shift;
	return 0;
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
}

static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
{
	switch (base_shift) {
	case 12:
		switch (actual_shift) {
		case 12:
			return 0;
		case 16:
			return 7;
		case 24:
			return 0x38;
		}
		break;
	case 16:
		switch (actual_shift) {
		case 16:
			return 1;
		case 24:
			return 8;
		}
		break;
	case 24:
		return 0;
	}
	return -1;
}

196 197 198
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
					     unsigned long pte_index)
{
199
	int a_pgshift, b_pgshift;
200 201
	unsigned long rb = 0, va_low, sllp;

202 203 204 205
	b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
	if (a_pgshift >= 0x100) {
		b_pgshift &= 0xff;
		a_pgshift >>= 8;
206
	}
207

208 209 210 211 212 213 214 215 216 217
	/*
	 * Ignore the top 14 bits of va
	 * v have top two bits covering segment size, hence move
	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
	 * AVA field in v also have the lower 23 bits ignored.
	 * For base page size 4K we need 14 .. 65 bits (so need to
	 * collect extra 11 bits)
	 * For others we need 14..14+i
	 */
	/* This covers 14..54 bits of va*/
218
	rb = (v & ~0x7fUL) << 16;		/* AVA field */
219

220 221 222 223
	/*
	 * AVA in v had cleared lower 23 bits. We need to derive
	 * that from pteg index
	 */
224 225 226
	va_low = pte_index >> 3;
	if (v & HPTE_V_SECONDARY)
		va_low = ~va_low;
227 228 229 230 231 232
	/*
	 * get the vpn bits from va_low using reverse of hashing.
	 * In v we have va with 23 bits dropped and then left shifted
	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
	 * right shift it with (SID_SHIFT - (23 - 7))
	 */
233
	if (!(v & HPTE_V_1TB_SEG))
234
		va_low ^= v >> (SID_SHIFT - 16);
235
	else
236
		va_low ^= v >> (SID_SHIFT_1T - 16);
237
	va_low &= 0x7ff;
238

239
	if (b_pgshift <= 12) {
240 241 242 243
		if (a_pgshift > 12) {
			sllp = (a_pgshift == 16) ? 5 : 4;
			rb |= sllp << 5;	/*  AP field */
		}
244
		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
245
	} else {
246 247
		int aval_shift;
		/*
248
		 * remaining bits of AVA/LP fields
249 250
		 * Also contain the rr bits of LP
		 */
251
		rb |= (va_low << b_pgshift) & 0x7ff000;
252 253 254
		/*
		 * Now clear not needed LP bits based on actual psize
		 */
255
		rb &= ~((1ul << a_pgshift) - 1);
256 257 258 259 260
		/*
		 * AVAL field 58..77 - base_page_shift bits of va
		 * we have space for 58..64 bits, Missing bits should
		 * be zero filled. +1 is to take care of L bit shift
		 */
261
		aval_shift = 64 - (77 - b_pgshift) + 1;
262 263 264
		rb |= ((va_low << aval_shift) & 0xfe);

		rb |= 1;		/* L field */
265
		rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
266
	}
267
	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/* B field */
268 269 270
	return rb;
}

271 272 273 274 275
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static inline int hpte_is_writable(unsigned long ptel)
{
	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);

	return pp != PP_RXRX && pp != PP_RXXX;
}

static inline unsigned long hpte_make_readonly(unsigned long ptel)
{
	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
	else
		ptel |= PP_RXRX;
	return ptel;
}

292
static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
293
{
294
	unsigned int wimg = hptel & HPTE_R_WIMG;
295 296 297 298 299 300

	/* Handle SAO */
	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
	    cpu_has_feature(CPU_FTR_ARCH_206))
		wimg = HPTE_R_M;

301
	if (!is_ci)
302
		return wimg == HPTE_R_M;
303 304 305 306 307 308 309
	/*
	 * if host is mapped cache inhibited, make sure hptel also have
	 * cache inhibited.
	 */
	if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
		return false;
	return !!(wimg & HPTE_R_I);
310 311
}

312
/*
313
 * If it's present and writable, atomically set dirty and referenced bits and
314
 * return the PTE, otherwise return 0.
315
 */
316
static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
317
{
318 319 320
	pte_t old_pte, new_pte = __pte(0);

	while (1) {
321 322 323 324
		/*
		 * Make sure we don't reload from ptep
		 */
		old_pte = READ_ONCE(*ptep);
325
		/*
326
		 * wait until H_PAGE_BUSY is clear then set it atomically
327
		 */
328
		if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
329 330 331 332
			cpu_relax();
			continue;
		}
		/* If pte is not present return None */
333
		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
334
			return __pte(0);
335

336 337 338
		new_pte = pte_mkyoung(old_pte);
		if (writing && pte_write(old_pte))
			new_pte = pte_mkdirty(new_pte);
339

340
		if (pte_xchg(ptep, old_pte, new_pte))
341 342 343
			break;
	}
	return new_pte;
344 345
}

346 347 348 349
static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
{
	if (key)
		return PP_RWRX <= pp && pp <= PP_RXRX;
350
	return true;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
}

static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
{
	if (key)
		return pp == PP_RWRW;
	return pp <= PP_RWRW;
}

static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
{
	unsigned long skey;

	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
		((hpte_r & HPTE_R_KEY_LO) >> 9);
	return (amr >> (62 - 2 * skey)) & 3;
}

369 370 371 372 373 374 375 376 377 378 379 380 381
static inline void lock_rmap(unsigned long *rmap)
{
	do {
		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
			cpu_relax();
	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
}

static inline void unlock_rmap(unsigned long *rmap)
{
	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
}

382 383 384 385 386 387
static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
				   unsigned long pagesize)
{
	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;

	if (pagesize <= PAGE_SIZE)
388
		return true;
389 390 391
	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/*
 * This works for 4k, 64k and 16M pages on POWER7,
 * and 4k and 16M pages on PPC970.
 */
static inline unsigned long slb_pgsize_encoding(unsigned long psize)
{
	unsigned long senc = 0;

	if (psize > 0x1000) {
		senc = SLB_VSID_L;
		if (psize == 0x10000)
			senc |= SLB_VSID_LP_01;
	}
	return senc;
}

static inline int is_vrma_hpte(unsigned long hpte_v)
{
	return (hpte_v & ~0xffffffUL) ==
		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
}

414
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
415 416 417 418 419 420 421 422 423 424
/*
 * Note modification of an HPTE; set the HPTE modified bit
 * if anyone is interested.
 */
static inline void note_hpte_modification(struct kvm *kvm,
					  struct revmap_entry *rev)
{
	if (atomic_read(&kvm->arch.hpte_mod_interest))
		rev->guest_rpte |= HPTE_GR_MODIFIED;
}
425 426 427 428 429 430 431 432 433

/*
 * Like kvm_memslots(), but for use in real mode when we can't do
 * any RCU stuff (since the secondary threads are offline from the
 * kernel's point of view), and we can't print anything.
 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
 */
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
{
434
	return rcu_dereference_raw_notrace(kvm->memslots[0]);
435 436
}

437 438
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);

439 440
extern void kvmhv_rm_send_ipi(int cpu);

441 442 443 444 445 446 447 448 449 450 451 452
static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
{
	/* HPTEs are 2**4 bytes long */
	return 1UL << (hpt->order - 4);
}

static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
{
	/* 128 (2**7) bytes in each HPTEG */
	return (1UL << (hpt->order - 7)) - 1;
}

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/* Set bits in a dirty bitmap, which is in LE format */
static inline void set_dirty_bits(unsigned long *map, unsigned long i,
				  unsigned long npages)
{

	if (npages >= 8)
		memset((char *)map + i / 8, 0xff, npages / 8);
	else
		for (; npages; ++i, --npages)
			__set_bit_le(i, map);
}

static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
					 unsigned long npages)
{
	if (npages >= 8)
		memset((char *)map + i / 8, 0xff, npages / 8);
	else
		for (; npages; ++i, --npages)
			set_bit_le(i, map);
}

475 476 477 478 479 480 481 482 483 484
static inline u64 sanitize_msr(u64 msr)
{
	msr &= ~MSR_HV;
	msr |= MSR_ME;
	return msr;
}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{
485
	vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
486 487 488
	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
	vcpu->arch.regs.link  = vcpu->arch.lr_tm;
	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
489 490 491 492
	vcpu->arch.amr = vcpu->arch.amr_tm;
	vcpu->arch.ppr = vcpu->arch.ppr_tm;
	vcpu->arch.dscr = vcpu->arch.dscr_tm;
	vcpu->arch.tar = vcpu->arch.tar_tm;
493 494
	memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
	       sizeof(vcpu->arch.regs.gpr));
495 496 497 498 499 500 501
	vcpu->arch.fp  = vcpu->arch.fp_tm;
	vcpu->arch.vr  = vcpu->arch.vr_tm;
	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}

static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{
502
	vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
503 504 505
	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
	vcpu->arch.lr_tm  = vcpu->arch.regs.link;
	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
506 507 508 509
	vcpu->arch.amr_tm = vcpu->arch.amr;
	vcpu->arch.ppr_tm = vcpu->arch.ppr;
	vcpu->arch.dscr_tm = vcpu->arch.dscr;
	vcpu->arch.tar_tm = vcpu->arch.tar;
510 511
	memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
	       sizeof(vcpu->arch.regs.gpr));
512 513 514 515 516 517
	vcpu->arch.fp_tm  = vcpu->arch.fp;
	vcpu->arch.vr_tm  = vcpu->arch.vr;
	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

518
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
519

A
Alexander Graf 已提交
520
#endif /* __ASM_KVM_BOOK3S_64_H__ */