e500_tlb.c 27.7 KB
Newer Older
1
/*
2
 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Author: Yu Liu, yu.liu@freescale.com
 *
 * Description:
 * This file is based on arch/powerpc/kvm/44x_tlb.c,
 * by Hollis Blanchard <hollisb@us.ibm.com>.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 */

#include <linux/types.h>
16
#include <linux/slab.h>
17 18 19 20 21 22 23
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_e500.h>

24
#include "../mm/mmu_decl.h"
25
#include "e500_tlb.h"
26
#include "trace.h"
27
#include "timing.h"
28 29 30

#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)

L
Liu Yu 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
struct id {
	unsigned long val;
	struct id **pentry;
};

#define NUM_TIDS 256

/*
 * This table provide mappings from:
 * (guestAS,guestTID,guestPR) --> ID of physical cpu
 * guestAS	[0..1]
 * guestTID	[0..255]
 * guestPR	[0..1]
 * ID		[1..255]
 * Each vcpu keeps one vcpu_id_table.
 */
struct vcpu_id_table {
	struct id id[2][NUM_TIDS][2];
};

/*
 * This table provide reversed mappings of vcpu_id_table:
 * ID --> address of vcpu_id_table item.
 * Each physical core has one pcpu_id_table.
 */
struct pcpu_id_table {
	struct id *entry[NUM_TIDS];
};

static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);

/* This variable keeps last used shadow ID on local core.
 * The valid range of shadow ID is [1..255] */
static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);

66 67
static unsigned int tlb1_entry_num;

L
Liu Yu 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
/*
 * Allocate a free shadow id and setup a valid sid mapping in given entry.
 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
static inline int local_sid_setup_one(struct id *entry)
{
	unsigned long sid;
	int ret = -1;

	sid = ++(__get_cpu_var(pcpu_last_used_sid));
	if (sid < NUM_TIDS) {
		__get_cpu_var(pcpu_sids).entry[sid] = entry;
		entry->val = sid;
		entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
		ret = sid;
	}

	/*
	 * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
	 * the caller will invalidate everything and start over.
	 *
	 * sid > NUM_TIDS indicates a race, which we disable preemption to
	 * avoid.
	 */
	WARN_ON(sid > NUM_TIDS);

	return ret;
}

/*
 * Check if given entry contain a valid shadow id mapping.
 * An ID mapping is considered valid only if
 * both vcpu and pcpu know this mapping.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
static inline int local_sid_lookup(struct id *entry)
{
	if (entry && entry->val != 0 &&
	    __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
	    entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
		return entry->val;
	return -1;
}

/* Invalidate all id mappings on local core */
static inline void local_sid_destroy_all(void)
{
	preempt_disable();
	__get_cpu_var(pcpu_last_used_sid) = 0;
	memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
	preempt_enable();
}

static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
	return vcpu_e500->idt;
}

static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	kfree(vcpu_e500->idt);
}

/* Invalidate all mappings on vcpu */
static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));

	/* Update shadow pid when mappings are changed */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}

/* Invalidate one ID mapping on vcpu */
static inline void kvmppc_e500_id_table_reset_one(
			       struct kvmppc_vcpu_e500 *vcpu_e500,
			       int as, int pid, int pr)
{
	struct vcpu_id_table *idt = vcpu_e500->idt;

	BUG_ON(as >= 2);
	BUG_ON(pid >= NUM_TIDS);
	BUG_ON(pr >= 2);

	idt->id[as][pid][pr].val = 0;
	idt->id[as][pid][pr].pentry = NULL;

	/* Update shadow pid when mappings are changed */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}

/*
 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
 * This function first lookup if a valid mapping exists,
 * if not, then creates a new one.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
					unsigned int as, unsigned int gid,
					unsigned int pr, int avoid_recursion)
{
	struct vcpu_id_table *idt = vcpu_e500->idt;
	int sid;

	BUG_ON(as >= 2);
	BUG_ON(gid >= NUM_TIDS);
	BUG_ON(pr >= 2);

	sid = local_sid_lookup(&idt->id[as][gid][pr]);

	while (sid <= 0) {
		/* No mapping yet */
		sid = local_sid_setup_one(&idt->id[as][gid][pr]);
		if (sid <= 0) {
			_tlbil_all();
			local_sid_destroy_all();
		}

		/* Update shadow pid when mappings are changed */
		if (!avoid_recursion)
			kvmppc_e500_recalc_shadow_pid(vcpu_e500);
	}

	return sid;
}

/* Map guest pid to shadow.
 * We use PID to keep shadow of current guest non-zero PID,
 * and use PID1 to keep shadow of guest zero PID.
 * So that guest tlbe with TID=0 can be accessed at any time */
void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	preempt_disable();
	vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
			get_cur_as(&vcpu_e500->vcpu),
			get_cur_pid(&vcpu_e500->vcpu),
			get_cur_pr(&vcpu_e500->vcpu), 1);
	vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
			get_cur_as(&vcpu_e500->vcpu), 0,
			get_cur_pr(&vcpu_e500->vcpu), 1);
	preempt_enable();
}

221 222 223 224 225 226 227 228 229 230 231
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *tlbe;
	int i, tlbsel;

	printk("| %8s | %8s | %8s | %8s | %8s |\n",
			"nr", "mas1", "mas2", "mas3", "mas7");

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		printk("Guest TLB%d:\n", tlbsel);
L
Liu Yu 已提交
232 233
		for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
			tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
234 235 236 237 238 239 240 241 242 243 244 245 246
			if (tlbe->mas1 & MAS1_VALID)
				printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
					tlbsel, i, tlbe->mas1, tlbe->mas2,
					tlbe->mas3, tlbe->mas7);
		}
	}
}

static inline unsigned int tlb0_get_next_victim(
		struct kvmppc_vcpu_e500 *vcpu_e500)
{
	unsigned int victim;

L
Liu Yu 已提交
247 248 249
	victim = vcpu_e500->gtlb_nv[0]++;
	if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
		vcpu_e500->gtlb_nv[0] = 0;
250 251 252 253 254 255

	return victim;
}

static inline unsigned int tlb1_max_shadow_size(void)
{
S
Scott Wood 已提交
256 257
	/* reserve one entry for magic page */
	return tlb1_entry_num - tlbcam_index - 1;
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
}

static inline int tlbe_is_writable(struct tlbe *tlbe)
{
	return tlbe->mas3 & (MAS3_SW|MAS3_UW);
}

static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
{
	/* Mask off reserved bits. */
	mas3 &= MAS3_ATTRIB_MASK;

	if (!usermode) {
		/* Guest is in supervisor mode,
		 * so we need to translate guest
		 * supervisor permissions into user permissions. */
		mas3 &= ~E500_TLB_USER_PERM_MASK;
		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
	}

	return mas3 | E500_TLB_SUPER_PERM_MASK;
}

static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
{
283 284 285
#ifdef CONFIG_SMP
	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
#else
286
	return mas2 & MAS2_ATTRIB_MASK;
287
#endif
288 289 290 291 292
}

/*
 * writing shadow tlb entry to host TLB
 */
293
static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
294
{
295 296 297 298
	unsigned long flags;

	local_irq_save(flags);
	mtspr(SPRN_MAS0, mas0);
299 300 301 302
	mtspr(SPRN_MAS1, stlbe->mas1);
	mtspr(SPRN_MAS2, stlbe->mas2);
	mtspr(SPRN_MAS3, stlbe->mas3);
	mtspr(SPRN_MAS7, stlbe->mas7);
303 304
	asm volatile("isync; tlbwe" : : : "memory");
	local_irq_restore(flags);
305 306 307
}

static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
308
		int tlbsel, int esel, struct tlbe *stlbe)
309 310
{
	if (tlbsel == 0) {
311 312 313
		__write_host_tlbe(stlbe,
				  MAS0_TLBSEL(0) |
				  MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
314
	} else {
315 316 317
		__write_host_tlbe(stlbe,
				  MAS0_TLBSEL(1) |
				  MAS0_ESEL(to_htlb1_esel(esel)));
318
	}
L
Liu Yu 已提交
319 320
	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
			     stlbe->mas3, stlbe->mas7);
321 322
}

S
Scott Wood 已提交
323 324
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
L
Liu Yu 已提交
325
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
S
Scott Wood 已提交
326 327
	struct tlbe magic;
	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
L
Liu Yu 已提交
328
	unsigned int stid;
S
Scott Wood 已提交
329 330 331 332 333
	pfn_t pfn;

	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
	get_page(pfn_to_page(pfn));

L
Liu Yu 已提交
334 335 336 337
	preempt_disable();
	stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);

	magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
S
Scott Wood 已提交
338 339 340 341 342 343 344
		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
	magic.mas3 = (pfn << PAGE_SHIFT) |
		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
	magic.mas7 = pfn >> (32 - PAGE_SHIFT);

	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
L
Liu Yu 已提交
345
	preempt_enable();
S
Scott Wood 已提交
346 347
}

348 349
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{
L
Liu Yu 已提交
350 351 352 353
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	/* Shadow PID may be expired on local core */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
354 355 356 357
}

void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
L
Liu Yu 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
}

static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
					 int tlbsel, int esel)
{
	struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
	struct vcpu_id_table *idt = vcpu_e500->idt;
	unsigned int pr, tid, ts, pid;
	u32 val, eaddr;
	unsigned long flags;

	ts = get_tlb_ts(gtlbe);
	tid = get_tlb_tid(gtlbe);

	preempt_disable();

	/* One guest ID may be mapped to two shadow IDs */
	for (pr = 0; pr < 2; pr++) {
		/*
		 * The shadow PID can have a valid mapping on at most one
		 * host CPU.  In the common case, it will be valid on this
		 * CPU, in which case (for TLB0) we do a local invalidation
		 * of the specific address.
		 *
		 * If the shadow PID is not valid on the current host CPU, or
		 * if we're invalidating a TLB1 entry, we invalidate the
		 * entire shadow PID.
		 */
		if (tlbsel == 1 ||
		    (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
			kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
			continue;
		}

		/*
		 * The guest is invalidating a TLB0 entry which is in a PID
		 * that has a valid shadow mapping on this host CPU.  We
		 * search host TLB0 to invalidate it's shadow TLB entry,
		 * similar to __tlbil_va except that we need to look in AS1.
		 */
		val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
		eaddr = get_tlb_eaddr(gtlbe);

		local_irq_save(flags);

		mtspr(SPRN_MAS6, val);
		asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
		val = mfspr(SPRN_MAS1);
		if (val & MAS1_VALID) {
			mtspr(SPRN_MAS1, val & ~MAS1_VALID);
			asm volatile("tlbwe");
		}

		local_irq_restore(flags);
	}

	preempt_enable();
415 416 417 418 419 420
}

/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
		gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
421 422
	int size = vcpu_e500->gtlb_size[tlbsel];
	int set_base;
423 424
	int i;

425 426 427 428 429 430 431 432 433 434 435
	if (tlbsel == 0) {
		int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
		set_base = (eaddr >> PAGE_SHIFT) & mask;
		set_base *= KVM_E500_TLB0_WAY_NUM;
		size = KVM_E500_TLB0_WAY_NUM;
	} else {
		set_base = 0;
	}

	for (i = 0; i < size; i++) {
		struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		unsigned int tid;

		if (eaddr < get_tlb_eaddr(tlbe))
			continue;

		if (eaddr > get_tlb_end(tlbe))
			continue;

		tid = get_tlb_tid(tlbe);
		if (tid && (tid != pid))
			continue;

		if (!get_tlb_v(tlbe))
			continue;

		if (get_tlb_ts(tlbe) != as && as != -1)
			continue;

454
		return set_base + i;
455 456 457 458 459
	}

	return -1;
}

L
Liu Yu 已提交
460 461 462
static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
					  struct tlbe *gtlbe,
					  pfn_t pfn)
463
{
L
Liu Yu 已提交
464 465
	priv->pfn = pfn;
	priv->flags = E500_TLB_VALID;
466

L
Liu Yu 已提交
467 468
	if (tlbe_is_writable(gtlbe))
		priv->flags |= E500_TLB_DIRTY;
469 470
}

L
Liu Yu 已提交
471
static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
472
{
L
Liu Yu 已提交
473 474 475 476 477
	if (priv->flags & E500_TLB_VALID) {
		if (priv->flags & E500_TLB_DIRTY)
			kvm_release_pfn_dirty(priv->pfn);
		else
			kvm_release_pfn_clean(priv->pfn);
478

L
Liu Yu 已提交
479 480
		priv->flags = 0;
	}
481 482 483 484 485 486 487 488 489
}

static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
		unsigned int eaddr, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	unsigned int victim, pidsel, tsized;
	int tlbsel;

490
	/* since we only have two TLBs, only lower bit is used. */
491 492 493
	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
	victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
494
	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
495 496

	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
L
Liu Yu 已提交
497
		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
498 499 500 501 502 503 504 505 506 507 508 509
	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
		| MAS1_TID(vcpu_e500->pid[pidsel])
		| MAS1_TSIZE(tsized);
	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
	vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
		| (get_cur_pid(vcpu) << 16)
		| (as ? MAS6_SAS : 0);
	vcpu_e500->mas7 = 0;
}

510
/* TID must be supplied by the caller */
L
Liu Yu 已提交
511 512 513 514 515 516 517 518
static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
					   struct tlbe *gtlbe, int tsize,
					   struct tlbe_priv *priv,
					   u64 gvaddr, struct tlbe *stlbe)
{
	pfn_t pfn = priv->pfn;

	/* Force TS=1 IPROT=0 for all guest mappings. */
519
	stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
L
Liu Yu 已提交
520 521 522 523 524 525 526 527 528 529
	stlbe->mas2 = (gvaddr & MAS2_EPN)
		| e500_shadow_mas2_attrib(gtlbe->mas2,
				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
	stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
		| e500_shadow_mas3_attrib(gtlbe->mas3,
				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
	stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
}


530
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
531 532
	u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
	struct tlbe *stlbe)
533
{
534 535 536 537
	struct kvm_memory_slot *slot;
	unsigned long pfn, hva;
	int pfnmap = 0;
	int tsize = BOOK3E_PAGESZ_4K;
L
Liu Yu 已提交
538
	struct tlbe_priv *priv;
539

540 541 542
	/*
	 * Translate guest physical to true physical, acquiring
	 * a page reference if it is normal, non-reserved memory.
543 544 545 546
	 *
	 * gfn_to_memslot() must succeed because otherwise we wouldn't
	 * have gotten this far.  Eventually we should just pass the slot
	 * pointer through from the first lookup.
547
	 */
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
	hva = gfn_to_hva_memslot(slot, gfn);

	if (tlbsel == 1) {
		struct vm_area_struct *vma;
		down_read(&current->mm->mmap_sem);

		vma = find_vma(current->mm, hva);
		if (vma && hva >= vma->vm_start &&
		    (vma->vm_flags & VM_PFNMAP)) {
			/*
			 * This VMA is a physically contiguous region (e.g.
			 * /dev/mem) that bypasses normal Linux page
			 * management.  Find the overlap between the
			 * vma and the memslot.
			 */

			unsigned long start, end;
			unsigned long slot_start, slot_end;

			pfnmap = 1;

			start = vma->vm_pgoff;
			end = start +
			      ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);

			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);

			slot_start = pfn - (gfn - slot->base_gfn);
			slot_end = slot_start + slot->npages;

			if (start < slot_start)
				start = slot_start;
			if (end > slot_end)
				end = slot_end;

			tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
				MAS1_TSIZE_SHIFT;

			/*
			 * e500 doesn't implement the lowest tsize bit,
			 * or 1K pages.
			 */
			tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);

			/*
			 * Now find the largest tsize (up to what the guest
			 * requested) that will cover gfn, stay within the
			 * range, and for which gfn and pfn are mutually
			 * aligned.
			 */

			for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
				unsigned long gfn_start, gfn_end, tsize_pages;
				tsize_pages = 1 << (tsize - 2);

				gfn_start = gfn & ~(tsize_pages - 1);
				gfn_end = gfn_start + tsize_pages;

				if (gfn_start + pfn - gfn < start)
					continue;
				if (gfn_end + pfn - gfn > end)
					continue;
				if ((gfn & (tsize_pages - 1)) !=
				    (pfn & (tsize_pages - 1)))
					continue;

				gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
				pfn &= ~(tsize_pages - 1);
				break;
			}
		}

		up_read(&current->mm->mmap_sem);
	}

	if (likely(!pfnmap)) {
		pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
		if (is_error_pfn(pfn)) {
			printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
					(long)gfn);
			kvm_release_pfn_clean(pfn);
			return;
		}
632 633
	}

L
Liu Yu 已提交
634 635 636 637
	/* Drop old priv and setup new one. */
	priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
	kvmppc_e500_priv_release(priv);
	kvmppc_e500_priv_setup(priv, gtlbe, pfn);
638

L
Liu Yu 已提交
639
	kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
640 641 642
}

/* XXX only map the one-one case, for now use TLB0 */
L
Liu Yu 已提交
643 644
static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
				int esel, struct tlbe *stlbe)
645 646 647
{
	struct tlbe *gtlbe;

L
Liu Yu 已提交
648
	gtlbe = &vcpu_e500->gtlb_arch[0][esel];
649 650 651

	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
L
Liu Yu 已提交
652
			gtlbe, 0, esel, stlbe);
653 654 655 656 657 658 659 660

	return esel;
}

/* Caller must ensure that the specified guest TLB entry is safe to insert into
 * the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
L
Liu Yu 已提交
661
		u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
662 663 664
{
	unsigned int victim;

L
Liu Yu 已提交
665
	victim = vcpu_e500->gtlb_nv[1]++;
666

L
Liu Yu 已提交
667 668
	if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
		vcpu_e500->gtlb_nv[1] = 0;
669

L
Liu Yu 已提交
670
	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
671 672 673 674

	return victim;
}

L
Liu Yu 已提交
675
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
676
{
L
Liu Yu 已提交
677 678 679 680
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	/* Recalc shadow pid since MSR changes */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
681 682
}

L
Liu Yu 已提交
683 684 685
static inline int kvmppc_e500_gtlbe_invalidate(
				struct kvmppc_vcpu_e500 *vcpu_e500,
				int tlbsel, int esel)
686
{
L
Liu Yu 已提交
687
	struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
688 689 690 691 692 693 694 695 696

	if (unlikely(get_tlb_iprot(gtlbe)))
		return -1;

	gtlbe->mas1 = 0;

	return 0;
}

697 698 699 700 701
int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
{
	int esel;

	if (value & MMUCSR0_TLB0FI)
L
Liu Yu 已提交
702
		for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
703 704
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
	if (value & MMUCSR0_TLB1FI)
L
Liu Yu 已提交
705
		for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
706 707
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);

L
Liu Yu 已提交
708 709
	/* Invalidate all vcpu id mappings */
	kvmppc_e500_id_table_reset_all(vcpu_e500);
710 711 712 713

	return EMULATE_DONE;
}

714 715 716 717 718 719 720
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	unsigned int ia;
	int esel, tlbsel;
	gva_t ea;

721
	ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
722 723 724

	ia = (ea >> 2) & 0x1;

725
	/* since we only have two TLBs, only lower bit is used. */
726 727 728 729
	tlbsel = (ea >> 3) & 0x1;

	if (ia) {
		/* invalidate all entries */
L
Liu Yu 已提交
730
		for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
731 732 733 734 735 736 737 738 739
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	} else {
		ea &= 0xfffff000;
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
				get_cur_pid(vcpu), -1);
		if (esel >= 0)
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	}

L
Liu Yu 已提交
740 741
	/* Invalidate all vcpu id mappings */
	kvmppc_e500_id_table_reset_all(vcpu_e500);
742 743 744 745 746 747 748 749 750 751 752 753 754

	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int tlbsel, esel;
	struct tlbe *gtlbe;

	tlbsel = get_tlb_tlbsel(vcpu_e500);
	esel = get_tlb_esel(vcpu_e500, tlbsel);

L
Liu Yu 已提交
755
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
756
	vcpu_e500->mas0 &= ~MAS0_NV(~0);
L
Liu Yu 已提交
757
	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
	vcpu_e500->mas1 = gtlbe->mas1;
	vcpu_e500->mas2 = gtlbe->mas2;
	vcpu_e500->mas3 = gtlbe->mas3;
	vcpu_e500->mas7 = gtlbe->mas7;

	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int as = !!get_cur_sas(vcpu_e500);
	unsigned int pid = get_cur_spid(vcpu_e500);
	int esel, tlbsel;
	struct tlbe *gtlbe = NULL;
	gva_t ea;

775
	ea = kvmppc_get_gpr(vcpu, rb);
776 777 778 779

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
		if (esel >= 0) {
L
Liu Yu 已提交
780
			gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
781 782 783 784 785 786
			break;
		}
	}

	if (gtlbe) {
		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
L
Liu Yu 已提交
787
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
788 789 790 791 792 793 794
		vcpu_e500->mas1 = gtlbe->mas1;
		vcpu_e500->mas2 = gtlbe->mas2;
		vcpu_e500->mas3 = gtlbe->mas3;
		vcpu_e500->mas7 = gtlbe->mas7;
	} else {
		int victim;

795
		/* since we only have two TLBs, only lower bit is used. */
796 797 798 799
		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
		victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;

		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
L
Liu Yu 已提交
800
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
801 802 803 804 805 806 807 808 809
		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
		vcpu_e500->mas2 &= MAS2_EPN;
		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
		vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
		vcpu_e500->mas7 = 0;
	}

810
	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
811 812 813
	return EMULATE_DONE;
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
/* sesel is index into the set, not the whole array */
static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
			struct tlbe *gtlbe,
			struct tlbe *stlbe,
			int stlbsel, int sesel)
{
	int stid;

	preempt_disable();
	stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
				   get_tlb_tid(gtlbe),
				   get_cur_pr(&vcpu_e500->vcpu), 0);

	stlbe->mas1 |= MAS1_TID(stid);
	write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
	preempt_enable();
}

832 833 834 835
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *gtlbe;
L
Liu Yu 已提交
836
	int tlbsel, esel;
837 838 839 840

	tlbsel = get_tlb_tlbsel(vcpu_e500);
	esel = get_tlb_esel(vcpu_e500, tlbsel);

L
Liu Yu 已提交
841
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
842

L
Liu Yu 已提交
843 844
	if (get_tlb_v(gtlbe))
		kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
845 846 847 848 849 850

	gtlbe->mas1 = vcpu_e500->mas1;
	gtlbe->mas2 = vcpu_e500->mas2;
	gtlbe->mas3 = vcpu_e500->mas3;
	gtlbe->mas7 = vcpu_e500->mas7;

851 852
	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
			     gtlbe->mas3, gtlbe->mas7);
853 854 855

	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
	if (tlbe_is_host_safe(vcpu, gtlbe)) {
L
Liu Yu 已提交
856 857 858 859 860
		struct tlbe stlbe;
		int stlbsel, sesel;
		u64 eaddr;
		u64 raddr;

861 862 863 864
		switch (tlbsel) {
		case 0:
			/* TLB0 */
			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
865
			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
866 867

			stlbsel = 0;
L
Liu Yu 已提交
868
			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
869 870 871 872 873 874 875 876 877 878 879 880 881 882

			break;

		case 1:
			/* TLB1 */
			eaddr = get_tlb_eaddr(gtlbe);
			raddr = get_tlb_raddr(gtlbe);

			/* Create a 4KB mapping on the host.
			 * If the guest wanted a large page,
			 * only the first 4KB is mapped here and the rest
			 * are mapped on the fly. */
			stlbsel = 1;
			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
L
Liu Yu 已提交
883
					raddr >> PAGE_SHIFT, gtlbe, &stlbe);
884 885 886 887 888
			break;

		default:
			BUG();
		}
889 890

		write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
891 892
	}

893
	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
894 895 896 897 898
	return EMULATE_DONE;
}

int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
899
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
900 901 902 903 904 905

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
906
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
907 908 909 910 911 912

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{
913
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
914 915 916 917 918 919

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
}

void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
{
920
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
921 922 923 924 925 926 927 928 929

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
}

gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
			gva_t eaddr)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct tlbe *gtlbe =
L
Liu Yu 已提交
930
		&vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
931 932 933 934 935 936 937 938 939 940 941 942 943
	u64 pgmask = get_tlb_bytes(gtlbe) - 1;

	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}

void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
			unsigned int index)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
L
Liu Yu 已提交
944 945
	struct tlbe_priv *priv;
	struct tlbe *gtlbe, stlbe;
946 947 948 949
	int tlbsel = tlbsel_of(index);
	int esel = esel_of(index);
	int stlbsel, sesel;

L
Liu Yu 已提交
950 951
	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];

952 953 954 955
	switch (tlbsel) {
	case 0:
		stlbsel = 0;
		sesel = esel;
L
Liu Yu 已提交
956 957 958 959
		priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];

		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
					priv, eaddr, &stlbe);
960 961 962 963 964 965
		break;

	case 1: {
		gfn_t gfn = gpaddr >> PAGE_SHIFT;

		stlbsel = 1;
L
Liu Yu 已提交
966 967
		sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
					     gtlbe, &stlbe);
968 969 970 971 972 973 974
		break;
	}

	default:
		BUG();
		break;
	}
L
Liu Yu 已提交
975

976
	write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
}

int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
				gva_t eaddr, unsigned int pid, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int esel, tlbsel;

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
		if (esel >= 0)
			return index_of(tlbsel, esel);
	}

	return -1;
}

S
Scott Wood 已提交
994 995 996 997
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

L
Liu Yu 已提交
998 999 1000 1001
	if (vcpu->arch.pid != pid) {
		vcpu_e500->pid[0] = vcpu->arch.pid = pid;
		kvmppc_e500_recalc_shadow_pid(vcpu_e500);
	}
S
Scott Wood 已提交
1002 1003
}

1004 1005 1006 1007 1008
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	struct tlbe *tlbe;

	/* Insert large initial mapping for guest. */
L
Liu Yu 已提交
1009
	tlbe = &vcpu_e500->gtlb_arch[1][0];
1010
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1011 1012 1013 1014 1015
	tlbe->mas2 = 0;
	tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
	tlbe->mas7 = 0;

	/* 4K map for serial output. Used by kernel wrapper. */
L
Liu Yu 已提交
1016
	tlbe = &vcpu_e500->gtlb_arch[1][1];
1017
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1018 1019 1020 1021 1022 1023 1024 1025 1026
	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
	tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
	tlbe->mas7 = 0;
}

int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;

L
Liu Yu 已提交
1027 1028
	vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
	vcpu_e500->gtlb_arch[0] =
1029
		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
L
Liu Yu 已提交
1030
	if (vcpu_e500->gtlb_arch[0] == NULL)
1031 1032
		goto err_out;

L
Liu Yu 已提交
1033 1034
	vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
	vcpu_e500->gtlb_arch[1] =
1035
		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
L
Liu Yu 已提交
1036 1037
	if (vcpu_e500->gtlb_arch[1] == NULL)
		goto err_out_guest0;
1038

L
Liu Yu 已提交
1039 1040 1041
	vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
	if (vcpu_e500->gtlb_priv[0] == NULL)
1042
		goto err_out_guest1;
L
Liu Yu 已提交
1043 1044 1045 1046 1047
	vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);

	if (vcpu_e500->gtlb_priv[1] == NULL)
		goto err_out_priv0;
1048

L
Liu Yu 已提交
1049 1050 1051
	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
		goto err_out_priv1;

L
Liu Yu 已提交
1052 1053
	/* Init TLB configuration register */
	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
L
Liu Yu 已提交
1054
	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
L
Liu Yu 已提交
1055
	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
L
Liu Yu 已提交
1056
	vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
L
Liu Yu 已提交
1057

1058 1059
	return 0;

L
Liu Yu 已提交
1060 1061
err_out_priv1:
	kfree(vcpu_e500->gtlb_priv[1]);
L
Liu Yu 已提交
1062 1063
err_out_priv0:
	kfree(vcpu_e500->gtlb_priv[0]);
1064
err_out_guest1:
L
Liu Yu 已提交
1065
	kfree(vcpu_e500->gtlb_arch[1]);
1066
err_out_guest0:
L
Liu Yu 已提交
1067
	kfree(vcpu_e500->gtlb_arch[0]);
1068 1069 1070 1071 1072 1073
err_out:
	return -1;
}

void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
L
Liu Yu 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	int stlbsel, i;

	/* release all privs */
	for (stlbsel = 0; stlbsel < 2; stlbsel++)
		for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
			struct tlbe_priv *priv =
				&vcpu_e500->gtlb_priv[stlbsel][i];
			kvmppc_e500_priv_release(priv);
		}

L
Liu Yu 已提交
1084
	kvmppc_e500_id_table_free(vcpu_e500);
L
Liu Yu 已提交
1085 1086
	kfree(vcpu_e500->gtlb_arch[1]);
	kfree(vcpu_e500->gtlb_arch[0]);
1087
}