e500_tlb.c 20.1 KB
Newer Older
1
/*
2
 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Author: Yu Liu, yu.liu@freescale.com
 *
 * Description:
 * This file is based on arch/powerpc/kvm/44x_tlb.c,
 * by Hollis Blanchard <hollisb@us.ibm.com>.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 */

#include <linux/types.h>
16
#include <linux/slab.h>
17 18 19 20 21 22 23
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_e500.h>

24
#include "../mm/mmu_decl.h"
25
#include "e500_tlb.h"
26
#include "trace.h"
27
#include "timing.h"
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43

#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)

static unsigned int tlb1_entry_num;

void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *tlbe;
	int i, tlbsel;

	printk("| %8s | %8s | %8s | %8s | %8s |\n",
			"nr", "mas1", "mas2", "mas3", "mas7");

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		printk("Guest TLB%d:\n", tlbsel);
L
Liu Yu 已提交
44 45
		for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
			tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
46 47 48 49 50 51 52 53 54 55 56 57 58
			if (tlbe->mas1 & MAS1_VALID)
				printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
					tlbsel, i, tlbe->mas1, tlbe->mas2,
					tlbe->mas3, tlbe->mas7);
		}
	}
}

static inline unsigned int tlb0_get_next_victim(
		struct kvmppc_vcpu_e500 *vcpu_e500)
{
	unsigned int victim;

L
Liu Yu 已提交
59 60 61
	victim = vcpu_e500->gtlb_nv[0]++;
	if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
		vcpu_e500->gtlb_nv[0] = 0;
62 63 64 65 66 67

	return victim;
}

static inline unsigned int tlb1_max_shadow_size(void)
{
S
Scott Wood 已提交
68 69
	/* reserve one entry for magic page */
	return tlb1_entry_num - tlbcam_index - 1;
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
}

static inline int tlbe_is_writable(struct tlbe *tlbe)
{
	return tlbe->mas3 & (MAS3_SW|MAS3_UW);
}

static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
{
	/* Mask off reserved bits. */
	mas3 &= MAS3_ATTRIB_MASK;

	if (!usermode) {
		/* Guest is in supervisor mode,
		 * so we need to translate guest
		 * supervisor permissions into user permissions. */
		mas3 &= ~E500_TLB_USER_PERM_MASK;
		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
	}

	return mas3 | E500_TLB_SUPER_PERM_MASK;
}

static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
{
95 96 97
#ifdef CONFIG_SMP
	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
#else
98
	return mas2 & MAS2_ATTRIB_MASK;
99
#endif
100 101 102 103 104
}

/*
 * writing shadow tlb entry to host TLB
 */
105
static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
106
{
107 108 109 110
	unsigned long flags;

	local_irq_save(flags);
	mtspr(SPRN_MAS0, mas0);
111 112 113 114
	mtspr(SPRN_MAS1, stlbe->mas1);
	mtspr(SPRN_MAS2, stlbe->mas2);
	mtspr(SPRN_MAS3, stlbe->mas3);
	mtspr(SPRN_MAS7, stlbe->mas7);
115 116
	asm volatile("isync; tlbwe" : : : "memory");
	local_irq_restore(flags);
117 118 119
}

static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
120
		int tlbsel, int esel, struct tlbe *stlbe)
121 122
{
	if (tlbsel == 0) {
123 124 125
		__write_host_tlbe(stlbe,
				  MAS0_TLBSEL(0) |
				  MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
126
	} else {
127 128 129
		__write_host_tlbe(stlbe,
				  MAS0_TLBSEL(1) |
				  MAS0_ESEL(to_htlb1_esel(esel)));
130
	}
L
Liu Yu 已提交
131 132
	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
			     stlbe->mas3, stlbe->mas7);
133 134
}

S
Scott Wood 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
	struct tlbe magic;
	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
	pfn_t pfn;

	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
	get_page(pfn_to_page(pfn));

	magic.mas1 = MAS1_VALID | MAS1_TS |
		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
	magic.mas3 = (pfn << PAGE_SHIFT) |
		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
	magic.mas7 = pfn >> (32 - PAGE_SHIFT);

	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
}

154 155 156 157 158 159
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{
}

void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
160
	_tlbil_all();
161 162 163 164 165 166 167 168 169
}

/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
		gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
	int i;

	/* XXX Replace loop with fancy data structures. */
L
Liu Yu 已提交
170 171
	for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
		struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
		unsigned int tid;

		if (eaddr < get_tlb_eaddr(tlbe))
			continue;

		if (eaddr > get_tlb_end(tlbe))
			continue;

		tid = get_tlb_tid(tlbe);
		if (tid && (tid != pid))
			continue;

		if (!get_tlb_v(tlbe))
			continue;

		if (get_tlb_ts(tlbe) != as && as != -1)
			continue;

		return i;
	}

	return -1;
}

L
Liu Yu 已提交
196 197 198
static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
					  struct tlbe *gtlbe,
					  pfn_t pfn)
199
{
L
Liu Yu 已提交
200 201
	priv->pfn = pfn;
	priv->flags = E500_TLB_VALID;
202

L
Liu Yu 已提交
203 204
	if (tlbe_is_writable(gtlbe))
		priv->flags |= E500_TLB_DIRTY;
205 206
}

L
Liu Yu 已提交
207
static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
208
{
L
Liu Yu 已提交
209 210 211 212 213
	if (priv->flags & E500_TLB_VALID) {
		if (priv->flags & E500_TLB_DIRTY)
			kvm_release_pfn_dirty(priv->pfn);
		else
			kvm_release_pfn_clean(priv->pfn);
214

L
Liu Yu 已提交
215 216
		priv->flags = 0;
	}
217 218 219
}

static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
220
					int esel)
221
{
L
Liu Yu 已提交
222
	mtspr(SPRN_MMUCSR0, MMUCSR0_TLB1FI);
223 224 225 226 227 228 229 230 231
}

static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
		unsigned int eaddr, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	unsigned int victim, pidsel, tsized;
	int tlbsel;

232
	/* since we only have two TLBs, only lower bit is used. */
233 234 235
	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
	victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
236
	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
237 238

	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
L
Liu Yu 已提交
239
		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
240 241 242 243 244 245 246 247 248 249 250 251
	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
		| MAS1_TID(vcpu_e500->pid[pidsel])
		| MAS1_TSIZE(tsized);
	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
	vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
		| (get_cur_pid(vcpu) << 16)
		| (as ? MAS6_SAS : 0);
	vcpu_e500->mas7 = 0;
}

L
Liu Yu 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
					   struct tlbe *gtlbe, int tsize,
					   struct tlbe_priv *priv,
					   u64 gvaddr, struct tlbe *stlbe)
{
	pfn_t pfn = priv->pfn;

	/* Force TS=1 IPROT=0 for all guest mappings. */
	stlbe->mas1 = MAS1_TSIZE(tsize)
		| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
	stlbe->mas2 = (gvaddr & MAS2_EPN)
		| e500_shadow_mas2_attrib(gtlbe->mas2,
				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
	stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
		| e500_shadow_mas3_attrib(gtlbe->mas3,
				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
	stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
}


272
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
273 274
	u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
	struct tlbe *stlbe)
275
{
276 277 278 279
	struct kvm_memory_slot *slot;
	unsigned long pfn, hva;
	int pfnmap = 0;
	int tsize = BOOK3E_PAGESZ_4K;
L
Liu Yu 已提交
280
	struct tlbe_priv *priv;
281

282 283 284
	/*
	 * Translate guest physical to true physical, acquiring
	 * a page reference if it is normal, non-reserved memory.
285 286 287 288
	 *
	 * gfn_to_memslot() must succeed because otherwise we wouldn't
	 * have gotten this far.  Eventually we should just pass the slot
	 * pointer through from the first lookup.
289
	 */
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
	hva = gfn_to_hva_memslot(slot, gfn);

	if (tlbsel == 1) {
		struct vm_area_struct *vma;
		down_read(&current->mm->mmap_sem);

		vma = find_vma(current->mm, hva);
		if (vma && hva >= vma->vm_start &&
		    (vma->vm_flags & VM_PFNMAP)) {
			/*
			 * This VMA is a physically contiguous region (e.g.
			 * /dev/mem) that bypasses normal Linux page
			 * management.  Find the overlap between the
			 * vma and the memslot.
			 */

			unsigned long start, end;
			unsigned long slot_start, slot_end;

			pfnmap = 1;

			start = vma->vm_pgoff;
			end = start +
			      ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);

			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);

			slot_start = pfn - (gfn - slot->base_gfn);
			slot_end = slot_start + slot->npages;

			if (start < slot_start)
				start = slot_start;
			if (end > slot_end)
				end = slot_end;

			tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
				MAS1_TSIZE_SHIFT;

			/*
			 * e500 doesn't implement the lowest tsize bit,
			 * or 1K pages.
			 */
			tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);

			/*
			 * Now find the largest tsize (up to what the guest
			 * requested) that will cover gfn, stay within the
			 * range, and for which gfn and pfn are mutually
			 * aligned.
			 */

			for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
				unsigned long gfn_start, gfn_end, tsize_pages;
				tsize_pages = 1 << (tsize - 2);

				gfn_start = gfn & ~(tsize_pages - 1);
				gfn_end = gfn_start + tsize_pages;

				if (gfn_start + pfn - gfn < start)
					continue;
				if (gfn_end + pfn - gfn > end)
					continue;
				if ((gfn & (tsize_pages - 1)) !=
				    (pfn & (tsize_pages - 1)))
					continue;

				gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
				pfn &= ~(tsize_pages - 1);
				break;
			}
		}

		up_read(&current->mm->mmap_sem);
	}

	if (likely(!pfnmap)) {
		pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
		if (is_error_pfn(pfn)) {
			printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
					(long)gfn);
			kvm_release_pfn_clean(pfn);
			return;
		}
374 375
	}

L
Liu Yu 已提交
376 377 378 379
	/* Drop old priv and setup new one. */
	priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
	kvmppc_e500_priv_release(priv);
	kvmppc_e500_priv_setup(priv, gtlbe, pfn);
380

L
Liu Yu 已提交
381
	kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
382 383 384
}

/* XXX only map the one-one case, for now use TLB0 */
L
Liu Yu 已提交
385 386
static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
				int esel, struct tlbe *stlbe)
387 388 389
{
	struct tlbe *gtlbe;

L
Liu Yu 已提交
390
	gtlbe = &vcpu_e500->gtlb_arch[0][esel];
391 392 393

	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
L
Liu Yu 已提交
394
			gtlbe, 0, esel, stlbe);
395 396 397 398 399 400 401 402

	return esel;
}

/* Caller must ensure that the specified guest TLB entry is safe to insert into
 * the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
403
		u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
404 405 406
{
	unsigned int victim;

L
Liu Yu 已提交
407
	victim = vcpu_e500->gtlb_nv[1]++;
408

L
Liu Yu 已提交
409 410
	if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
		vcpu_e500->gtlb_nv[1] = 0;
411

L
Liu Yu 已提交
412
	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
413 414 415 416 417 418 419 420 421 422

	return victim;
}

/* Invalidate all guest kernel mappings when enter usermode,
 * so that when they fault back in they will get the
 * proper permission bits. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
	if (usermode) {
423
		_tlbil_all();
424 425 426
	}
}

L
Liu Yu 已提交
427 428 429
static inline int kvmppc_e500_gtlbe_invalidate(
				struct kvmppc_vcpu_e500 *vcpu_e500,
				int tlbsel, int esel)
430
{
L
Liu Yu 已提交
431
	struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
432 433 434 435 436 437 438 439 440

	if (unlikely(get_tlb_iprot(gtlbe)))
		return -1;

	gtlbe->mas1 = 0;

	return 0;
}

441 442 443 444 445
int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
{
	int esel;

	if (value & MMUCSR0_TLB0FI)
L
Liu Yu 已提交
446
		for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
447 448
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
	if (value & MMUCSR0_TLB1FI)
L
Liu Yu 已提交
449
		for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
450 451 452 453 454 455 456
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);

	_tlbil_all();

	return EMULATE_DONE;
}

457 458 459 460 461 462 463
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	unsigned int ia;
	int esel, tlbsel;
	gva_t ea;

464
	ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
465 466 467

	ia = (ea >> 2) & 0x1;

468
	/* since we only have two TLBs, only lower bit is used. */
469 470 471 472
	tlbsel = (ea >> 3) & 0x1;

	if (ia) {
		/* invalidate all entries */
L
Liu Yu 已提交
473
		for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
474 475 476 477 478 479 480 481 482
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	} else {
		ea &= 0xfffff000;
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
				get_cur_pid(vcpu), -1);
		if (esel >= 0)
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	}

483
	_tlbil_all();
484 485 486 487 488 489 490 491 492 493 494 495 496

	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int tlbsel, esel;
	struct tlbe *gtlbe;

	tlbsel = get_tlb_tlbsel(vcpu_e500);
	esel = get_tlb_esel(vcpu_e500, tlbsel);

L
Liu Yu 已提交
497
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
498
	vcpu_e500->mas0 &= ~MAS0_NV(~0);
L
Liu Yu 已提交
499
	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	vcpu_e500->mas1 = gtlbe->mas1;
	vcpu_e500->mas2 = gtlbe->mas2;
	vcpu_e500->mas3 = gtlbe->mas3;
	vcpu_e500->mas7 = gtlbe->mas7;

	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int as = !!get_cur_sas(vcpu_e500);
	unsigned int pid = get_cur_spid(vcpu_e500);
	int esel, tlbsel;
	struct tlbe *gtlbe = NULL;
	gva_t ea;

517
	ea = kvmppc_get_gpr(vcpu, rb);
518 519 520 521

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
		if (esel >= 0) {
L
Liu Yu 已提交
522
			gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
523 524 525 526 527 528
			break;
		}
	}

	if (gtlbe) {
		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
L
Liu Yu 已提交
529
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
530 531 532 533 534 535 536
		vcpu_e500->mas1 = gtlbe->mas1;
		vcpu_e500->mas2 = gtlbe->mas2;
		vcpu_e500->mas3 = gtlbe->mas3;
		vcpu_e500->mas7 = gtlbe->mas7;
	} else {
		int victim;

537
		/* since we only have two TLBs, only lower bit is used. */
538 539 540 541
		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
		victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;

		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
L
Liu Yu 已提交
542
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
543 544 545 546 547 548 549 550 551
		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
		vcpu_e500->mas2 &= MAS2_EPN;
		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
		vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
		vcpu_e500->mas7 = 0;
	}

552
	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
553 554 555 556 557 558 559
	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *gtlbe;
L
Liu Yu 已提交
560
	int tlbsel, esel;
561 562 563 564

	tlbsel = get_tlb_tlbsel(vcpu_e500);
	esel = get_tlb_esel(vcpu_e500, tlbsel);

L
Liu Yu 已提交
565
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
566

L
Liu Yu 已提交
567 568
	if (get_tlb_v(gtlbe) && tlbsel == 1)
		kvmppc_e500_tlb1_invalidate(vcpu_e500, esel);
569 570 571 572 573 574

	gtlbe->mas1 = vcpu_e500->mas1;
	gtlbe->mas2 = vcpu_e500->mas2;
	gtlbe->mas3 = vcpu_e500->mas3;
	gtlbe->mas7 = vcpu_e500->mas7;

575 576
	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
			     gtlbe->mas3, gtlbe->mas7);
577 578 579

	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
	if (tlbe_is_host_safe(vcpu, gtlbe)) {
L
Liu Yu 已提交
580 581 582 583 584
		struct tlbe stlbe;
		int stlbsel, sesel;
		u64 eaddr;
		u64 raddr;

585 586 587 588
		switch (tlbsel) {
		case 0:
			/* TLB0 */
			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
589
			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
590 591

			stlbsel = 0;
L
Liu Yu 已提交
592
			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
593 594 595 596 597 598 599 600 601 602 603 604 605 606

			break;

		case 1:
			/* TLB1 */
			eaddr = get_tlb_eaddr(gtlbe);
			raddr = get_tlb_raddr(gtlbe);

			/* Create a 4KB mapping on the host.
			 * If the guest wanted a large page,
			 * only the first 4KB is mapped here and the rest
			 * are mapped on the fly. */
			stlbsel = 1;
			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
L
Liu Yu 已提交
607
					raddr >> PAGE_SHIFT, gtlbe, &stlbe);
608 609 610 611 612
			break;

		default:
			BUG();
		}
L
Liu Yu 已提交
613
		write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
614 615
	}

616
	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
617 618 619 620 621
	return EMULATE_DONE;
}

int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
622
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
623 624 625 626 627 628

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
629
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
630 631 632 633 634 635

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{
636
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
637 638 639 640 641 642

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
}

void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
{
643
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
644 645 646 647 648 649 650 651 652

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
}

gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
			gva_t eaddr)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *gtlbe =
L
Liu Yu 已提交
653
		&vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
654 655 656 657 658 659 660 661 662 663 664 665 666
	u64 pgmask = get_tlb_bytes(gtlbe) - 1;

	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}

void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
			unsigned int index)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
L
Liu Yu 已提交
667 668
	struct tlbe_priv *priv;
	struct tlbe *gtlbe, stlbe;
669 670 671 672
	int tlbsel = tlbsel_of(index);
	int esel = esel_of(index);
	int stlbsel, sesel;

L
Liu Yu 已提交
673 674
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];

675 676 677 678
	switch (tlbsel) {
	case 0:
		stlbsel = 0;
		sesel = esel;
L
Liu Yu 已提交
679 680 681 682
		priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];

		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
					priv, eaddr, &stlbe);
683 684 685 686 687 688
		break;

	case 1: {
		gfn_t gfn = gpaddr >> PAGE_SHIFT;

		stlbsel = 1;
L
Liu Yu 已提交
689 690
		sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
					     gtlbe, &stlbe);
691 692 693 694 695 696 697
		break;
	}

	default:
		BUG();
		break;
	}
L
Liu Yu 已提交
698 699

	write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
}

int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
				gva_t eaddr, unsigned int pid, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int esel, tlbsel;

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
		if (esel >= 0)
			return index_of(tlbsel, esel);
	}

	return -1;
}

S
Scott Wood 已提交
717 718 719 720 721 722 723 724
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
		vcpu->arch.pid = pid;
}

725 726 727 728 729
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	struct tlbe *tlbe;

	/* Insert large initial mapping for guest. */
L
Liu Yu 已提交
730
	tlbe = &vcpu_e500->gtlb_arch[1][0];
731
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
732 733 734 735 736
	tlbe->mas2 = 0;
	tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
	tlbe->mas7 = 0;

	/* 4K map for serial output. Used by kernel wrapper. */
L
Liu Yu 已提交
737
	tlbe = &vcpu_e500->gtlb_arch[1][1];
738
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
739 740 741 742 743 744 745 746 747
	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
	tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
	tlbe->mas7 = 0;
}

int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;

L
Liu Yu 已提交
748 749
	vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
	vcpu_e500->gtlb_arch[0] =
750
		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
L
Liu Yu 已提交
751
	if (vcpu_e500->gtlb_arch[0] == NULL)
752 753
		goto err_out;

L
Liu Yu 已提交
754 755
	vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
	vcpu_e500->gtlb_arch[1] =
756
		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
L
Liu Yu 已提交
757 758
	if (vcpu_e500->gtlb_arch[1] == NULL)
		goto err_out_guest0;
759

L
Liu Yu 已提交
760 761 762
	vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
	if (vcpu_e500->gtlb_priv[0] == NULL)
763
		goto err_out_guest1;
L
Liu Yu 已提交
764 765 766 767 768
	vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);

	if (vcpu_e500->gtlb_priv[1] == NULL)
		goto err_out_priv0;
769

L
Liu Yu 已提交
770 771
	/* Init TLB configuration register */
	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
L
Liu Yu 已提交
772
	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
L
Liu Yu 已提交
773
	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
L
Liu Yu 已提交
774
	vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
L
Liu Yu 已提交
775

776 777
	return 0;

L
Liu Yu 已提交
778 779
err_out_priv0:
	kfree(vcpu_e500->gtlb_priv[0]);
780
err_out_guest1:
L
Liu Yu 已提交
781
	kfree(vcpu_e500->gtlb_arch[1]);
782
err_out_guest0:
L
Liu Yu 已提交
783
	kfree(vcpu_e500->gtlb_arch[0]);
784 785 786 787 788 789
err_out:
	return -1;
}

void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
L
Liu Yu 已提交
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	int stlbsel, i;

	/* release all privs */
	for (stlbsel = 0; stlbsel < 2; stlbsel++)
		for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
			struct tlbe_priv *priv =
				&vcpu_e500->gtlb_priv[stlbsel][i];
			kvmppc_e500_priv_release(priv);
		}

	/* discard all guest mapping */
	_tlbil_all();

	kfree(vcpu_e500->gtlb_arch[1]);
	kfree(vcpu_e500->gtlb_arch[0]);
805
}