e500_mmu.c 20.8 KB
Newer Older
1
/*
2
 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 4
 *
 * Author: Yu Liu, yu.liu@freescale.com
S
Scott Wood 已提交
5
 *         Scott Wood, scottwood@freescale.com
6
 *         Ashish Kalra, ashish.kalra@freescale.com
S
Scott Wood 已提交
7
 *         Varun Sethi, varun.sethi@freescale.com
8
 *         Alexander Graf, agraf@suse.de
9 10 11 12 13 14 15 16 17 18
 *
 * Description:
 * This file is based on arch/powerpc/kvm/44x_tlb.c,
 * by Hollis Blanchard <hollisb@us.ibm.com>.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 */

19
#include <linux/kernel.h>
20
#include <linux/types.h>
21
#include <linux/slab.h>
22 23 24 25
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
S
Scott Wood 已提交
26 27 28 29 30
#include <linux/log2.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/rwsem.h>
#include <linux/vmalloc.h>
31
#include <linux/hugetlb.h>
32 33
#include <asm/kvm_ppc.h>

34
#include "e500.h"
35
#include "trace.h"
36
#include "timing.h"
37
#include "e500_mmu_host.h"
38

39
static inline unsigned int gtlb0_get_next_victim(
40 41 42 43
		struct kvmppc_vcpu_e500 *vcpu_e500)
{
	unsigned int victim;

L
Liu Yu 已提交
44
	victim = vcpu_e500->gtlb_nv[0]++;
S
Scott Wood 已提交
45
	if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
L
Liu Yu 已提交
46
		vcpu_e500->gtlb_nv[0] = 0;
47 48 49 50

	return victim;
}

51 52 53 54 55 56 57 58 59 60 61 62
static int tlb0_set_base(gva_t addr, int sets, int ways)
{
	int set_base;

	set_base = (addr >> PAGE_SHIFT) & (sets - 1);
	set_base *= ways;

	return set_base;
}

static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
{
S
Scott Wood 已提交
63 64
	return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
			     vcpu_e500->gtlb_params[0].ways);
65 66
}

67
static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
68
{
69 70
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int esel = get_tlb_esel_bit(vcpu);
71 72

	if (tlbsel == 0) {
S
Scott Wood 已提交
73
		esel &= vcpu_e500->gtlb_params[0].ways - 1;
74
		esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
75
	} else {
S
Scott Wood 已提交
76
		esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
77 78 79 80 81
	}

	return esel;
}

82 83 84 85
/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
		gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
S
Scott Wood 已提交
86 87
	int size = vcpu_e500->gtlb_params[tlbsel].entries;
	unsigned int set_base, offset;
88 89
	int i;

90
	if (tlbsel == 0) {
91
		set_base = gtlb0_set_base(vcpu_e500, eaddr);
S
Scott Wood 已提交
92
		size = vcpu_e500->gtlb_params[0].ways;
93
	} else {
94 95 96
		if (eaddr < vcpu_e500->tlb1_min_eaddr ||
				eaddr > vcpu_e500->tlb1_max_eaddr)
			return -1;
97 98 99
		set_base = 0;
	}

S
Scott Wood 已提交
100 101
	offset = vcpu_e500->gtlb_offset[tlbsel];

102
	for (i = 0; i < size; i++) {
S
Scott Wood 已提交
103 104
		struct kvm_book3e_206_tlb_entry *tlbe =
			&vcpu_e500->gtlb_arch[offset + set_base + i];
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
		unsigned int tid;

		if (eaddr < get_tlb_eaddr(tlbe))
			continue;

		if (eaddr > get_tlb_end(tlbe))
			continue;

		tid = get_tlb_tid(tlbe);
		if (tid && (tid != pid))
			continue;

		if (!get_tlb_v(tlbe))
			continue;

		if (get_tlb_ts(tlbe) != as && as != -1)
			continue;

123
		return set_base + i;
124 125 126 127 128 129 130 131 132
	}

	return -1;
}

static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
		unsigned int eaddr, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
133
	unsigned int victim, tsized;
134 135
	int tlbsel;

136
	/* since we only have two TLBs, only lower bit is used. */
137
	tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
138
	victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
139
	tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
140

141
	vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
L
Liu Yu 已提交
142
		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
143
	vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
144
		| MAS1_TID(get_tlbmiss_tid(vcpu))
145
		| MAS1_TSIZE(tsized);
146 147 148 149
	vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
		| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
	vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
	vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
150 151 152 153
		| (get_cur_pid(vcpu) << 16)
		| (as ? MAS6_SAS : 0);
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	int size = vcpu_e500->gtlb_params[1].entries;
	unsigned int offset;
	gva_t eaddr;
	int i;

	vcpu_e500->tlb1_min_eaddr = ~0UL;
	vcpu_e500->tlb1_max_eaddr = 0;
	offset = vcpu_e500->gtlb_offset[1];

	for (i = 0; i < size; i++) {
		struct kvm_book3e_206_tlb_entry *tlbe =
			&vcpu_e500->gtlb_arch[offset + i];

		if (!get_tlb_v(tlbe))
			continue;

		eaddr = get_tlb_eaddr(tlbe);
		vcpu_e500->tlb1_min_eaddr =
				min(vcpu_e500->tlb1_min_eaddr, eaddr);

		eaddr = get_tlb_end(tlbe);
		vcpu_e500->tlb1_max_eaddr =
				max(vcpu_e500->tlb1_max_eaddr, eaddr);
	}
}

static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
				struct kvm_book3e_206_tlb_entry *gtlbe)
{
	unsigned long start, end, size;

	size = get_tlb_bytes(gtlbe);
	start = get_tlb_eaddr(gtlbe) & ~(size - 1);
	end = start + size - 1;

	return vcpu_e500->tlb1_min_eaddr == start ||
			vcpu_e500->tlb1_max_eaddr == end;
}

/* This function is supposed to be called for a adding a new valid tlb entry */
static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
				struct kvm_book3e_206_tlb_entry *gtlbe)
{
	unsigned long start, end, size;
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	if (!get_tlb_v(gtlbe))
		return;

	size = get_tlb_bytes(gtlbe);
	start = get_tlb_eaddr(gtlbe) & ~(size - 1);
	end = start + size - 1;

	vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
	vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
}

L
Liu Yu 已提交
213 214 215
static inline int kvmppc_e500_gtlbe_invalidate(
				struct kvmppc_vcpu_e500 *vcpu_e500,
				int tlbsel, int esel)
216
{
S
Scott Wood 已提交
217 218
	struct kvm_book3e_206_tlb_entry *gtlbe =
		get_entry(vcpu_e500, tlbsel, esel);
219 220 221 222

	if (unlikely(get_tlb_iprot(gtlbe)))
		return -1;

223 224 225
	if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
		kvmppc_recalc_tlb1map_range(vcpu_e500);

226 227 228 229 230
	gtlbe->mas1 = 0;

	return 0;
}

231 232 233 234 235
int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
{
	int esel;

	if (value & MMUCSR0_TLB0FI)
S
Scott Wood 已提交
236
		for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
237 238
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
	if (value & MMUCSR0_TLB1FI)
S
Scott Wood 已提交
239
		for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
240 241
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);

242 243
	/* Invalidate all host shadow mappings */
	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
244 245 246 247

	return EMULATE_DONE;
}

248
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
249 250 251 252 253 254 255
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	unsigned int ia;
	int esel, tlbsel;

	ia = (ea >> 2) & 0x1;

256
	/* since we only have two TLBs, only lower bit is used. */
257 258 259 260
	tlbsel = (ea >> 3) & 0x1;

	if (ia) {
		/* invalidate all entries */
S
Scott Wood 已提交
261 262
		for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
		     esel++)
263 264 265 266 267 268 269 270 271
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	} else {
		ea &= 0xfffff000;
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
				get_cur_pid(vcpu), -1);
		if (esel >= 0)
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
	}

272 273
	/* Invalidate all host shadow mappings */
	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
274 275 276 277

	return EMULATE_DONE;
}

S
Scott Wood 已提交
278
static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
279
		       int pid, int type)
S
Scott Wood 已提交
280 281 282 283 284 285 286 287
{
	struct kvm_book3e_206_tlb_entry *tlbe;
	int tid, esel;

	/* invalidate all entries */
	for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
		tlbe = get_entry(vcpu_e500, tlbsel, esel);
		tid = get_tlb_tid(tlbe);
288
		if (type == 0 || tid == pid) {
S
Scott Wood 已提交
289 290 291 292 293 294 295
			inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
		}
	}
}

static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
296
		       gva_t ea)
S
Scott Wood 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309
{
	int tlbsel, esel;

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
		if (esel >= 0) {
			inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
			break;
		}
	}
}

310
int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
S
Scott Wood 已提交
311 312 313 314
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int pid = get_cur_spid(vcpu);

315 316 317 318 319
	if (type == 0 || type == 1) {
		tlbilx_all(vcpu_e500, 0, pid, type);
		tlbilx_all(vcpu_e500, 1, pid, type);
	} else if (type == 3) {
		tlbilx_one(vcpu_e500, pid, ea);
S
Scott Wood 已提交
320 321 322 323 324
	}

	return EMULATE_DONE;
}

325 326 327 328
int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int tlbsel, esel;
S
Scott Wood 已提交
329
	struct kvm_book3e_206_tlb_entry *gtlbe;
330

331 332
	tlbsel = get_tlb_tlbsel(vcpu);
	esel = get_tlb_esel(vcpu, tlbsel);
333

S
Scott Wood 已提交
334
	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
335 336 337 338 339
	vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
	vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
	vcpu->arch.shared->mas1 = gtlbe->mas1;
	vcpu->arch.shared->mas2 = gtlbe->mas2;
	vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
340 341 342 343

	return EMULATE_DONE;
}

344
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
345 346
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
347 348
	int as = !!get_cur_sas(vcpu);
	unsigned int pid = get_cur_spid(vcpu);
349
	int esel, tlbsel;
S
Scott Wood 已提交
350
	struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
351 352 353 354

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
		if (esel >= 0) {
S
Scott Wood 已提交
355
			gtlbe = get_entry(vcpu_e500, tlbsel, esel);
356 357 358 359 360
			break;
		}
	}

	if (gtlbe) {
361 362
		esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;

363
		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
L
Liu Yu 已提交
364
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
365 366 367
		vcpu->arch.shared->mas1 = gtlbe->mas1;
		vcpu->arch.shared->mas2 = gtlbe->mas2;
		vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
368 369 370
	} else {
		int victim;

371
		/* since we only have two TLBs, only lower bit is used. */
372
		tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
373
		victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
374

375 376
		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
			| MAS0_ESEL(victim)
L
Liu Yu 已提交
377
			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
378 379 380 381 382 383 384 385 386
		vcpu->arch.shared->mas1 =
			  (vcpu->arch.shared->mas6 & MAS6_SPID0)
			| (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
			| (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
		vcpu->arch.shared->mas2 &= MAS2_EPN;
		vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
					   MAS2_ATTRIB_MASK;
		vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
					     MAS3_U2 | MAS3_U3;
387 388
	}

389
	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
390 391 392 393 394 395
	return EMULATE_DONE;
}

int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
396 397
	struct kvm_book3e_206_tlb_entry *gtlbe;
	int tlbsel, esel;
398
	int recal = 0;
399

400 401
	tlbsel = get_tlb_tlbsel(vcpu);
	esel = get_tlb_esel(vcpu, tlbsel);
402

S
Scott Wood 已提交
403
	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
404

405
	if (get_tlb_v(gtlbe)) {
406
		inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
407 408 409 410
		if ((tlbsel == 1) &&
			kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
			recal = 1;
	}
411

412 413
	gtlbe->mas1 = vcpu->arch.shared->mas1;
	gtlbe->mas2 = vcpu->arch.shared->mas2;
414 415
	if (!(vcpu->arch.shared->msr & MSR_CM))
		gtlbe->mas2 &= 0xffffffffUL;
416
	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
417

L
Liu Yu 已提交
418 419
	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
	                              gtlbe->mas2, gtlbe->mas7_3);
420

421 422 423 424 425 426 427 428 429 430 431 432
	if (tlbsel == 1) {
		/*
		 * If a valid tlb1 entry is overwritten then recalculate the
		 * min/max TLB1 map address range otherwise no need to look
		 * in tlb1 array.
		 */
		if (recal)
			kvmppc_recalc_tlb1map_range(vcpu_e500);
		else
			kvmppc_set_tlb1map_range(vcpu, gtlbe);
	}

433 434
	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
	if (tlbe_is_host_safe(vcpu, gtlbe)) {
435 436
		u64 eaddr = get_tlb_eaddr(gtlbe);
		u64 raddr = get_tlb_raddr(gtlbe);
L
Liu Yu 已提交
437

438
		if (tlbsel == 0) {
439
			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
440
			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
441
		}
442

443 444
		/* Premap the faulting page */
		kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
445 446
	}

447
	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
448 449 450
	return EMULATE_DONE;
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
				  gva_t eaddr, unsigned int pid, int as)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int esel, tlbsel;

	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
		if (esel >= 0)
			return index_of(tlbsel, esel);
	}

	return -1;
}

/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
                               struct kvm_translation *tr)
{
	int index;
	gva_t eaddr;
	u8 pid;
	u8 as;

	eaddr = tr->linear_address;
	pid = (tr->linear_address >> 32) & 0xff;
	as = (tr->linear_address >> 40) & 0x1;

	index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
	if (index < 0) {
		tr->valid = 0;
		return 0;
	}

	tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
	/* XXX what does "writeable" and "usermode" even mean? */
	tr->valid = 1;

	return 0;
}


493 494
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
495
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
496 497 498 499 500 501

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
502
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
503 504 505 506 507 508

	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
}

void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{
509
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
510 511 512 513 514 515

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
}

void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
{
516
	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
517 518 519 520 521 522 523 524

	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
}

gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
			gva_t eaddr)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
S
Scott Wood 已提交
525 526 527 528 529
	struct kvm_book3e_206_tlb_entry *gtlbe;
	u64 pgmask;

	gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
	pgmask = get_tlb_bytes(gtlbe) - 1;
530 531 532 533 534 535 536 537

	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}

538 539
/*****************************************/

S
Scott Wood 已提交
540 541 542 543
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	int i;

544
	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
545
	kfree(vcpu_e500->g2h_tlb1_map);
S
Scott Wood 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558
	kfree(vcpu_e500->gtlb_priv[0]);
	kfree(vcpu_e500->gtlb_priv[1]);

	if (vcpu_e500->shared_tlb_pages) {
		vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
					  PAGE_SIZE)));

		for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
			set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
			put_page(vcpu_e500->shared_tlb_pages[i]);
		}

		vcpu_e500->num_shared_tlb_pages = 0;
559 560

		kfree(vcpu_e500->shared_tlb_pages);
S
Scott Wood 已提交
561 562 563 564 565 566 567 568
		vcpu_e500->shared_tlb_pages = NULL;
	} else {
		kfree(vcpu_e500->gtlb_arch);
	}

	vcpu_e500->gtlb_arch = NULL;
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
	sregs->u.e.mas0 = vcpu->arch.shared->mas0;
	sregs->u.e.mas1 = vcpu->arch.shared->mas1;
	sregs->u.e.mas2 = vcpu->arch.shared->mas2;
	sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
	sregs->u.e.mas4 = vcpu->arch.shared->mas4;
	sregs->u.e.mas6 = vcpu->arch.shared->mas6;

	sregs->u.e.mmucfg = vcpu->arch.mmucfg;
	sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
	sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
	sregs->u.e.tlbcfg[2] = 0;
	sregs->u.e.tlbcfg[3] = 0;
}

int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
	if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
		vcpu->arch.shared->mas0 = sregs->u.e.mas0;
		vcpu->arch.shared->mas1 = sregs->u.e.mas1;
		vcpu->arch.shared->mas2 = sregs->u.e.mas2;
		vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
		vcpu->arch.shared->mas4 = sregs->u.e.mas4;
		vcpu->arch.shared->mas6 = sregs->u.e.mas6;
	}

	return 0;
}

S
Scott Wood 已提交
599 600 601 602 603 604 605 606
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
			      struct kvm_config_tlb *cfg)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	struct kvm_book3e_206_tlb_params params;
	char *virt;
	struct page **pages;
	struct tlbe_priv *privs[2] = {};
607
	u64 *g2h_bitmap = NULL;
S
Scott Wood 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	size_t array_len;
	u32 sets;
	int num_pages, ret, i;

	if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
		return -EINVAL;

	if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
			   sizeof(params)))
		return -EFAULT;

	if (params.tlb_sizes[1] > 64)
		return -EINVAL;
	if (params.tlb_ways[1] != params.tlb_sizes[1])
		return -EINVAL;
	if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
		return -EINVAL;
	if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
		return -EINVAL;

	if (!is_power_of_2(params.tlb_ways[0]))
		return -EINVAL;

	sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
	if (!is_power_of_2(sets))
		return -EINVAL;

	array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
	array_len *= sizeof(struct kvm_book3e_206_tlb_entry);

	if (cfg->array_len < array_len)
		return -EINVAL;

	num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
		    cfg->array / PAGE_SIZE;
	pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
	if (!pages)
		return -ENOMEM;

	ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
	if (ret < 0)
		goto err_pages;

	if (ret != num_pages) {
		num_pages = ret;
		ret = -EFAULT;
		goto err_put_page;
	}

	virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
658 659
	if (!virt) {
		ret = -ENOMEM;
S
Scott Wood 已提交
660
		goto err_put_page;
661
	}
S
Scott Wood 已提交
662 663 664 665 666 667

	privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
			   GFP_KERNEL);
	privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
			   GFP_KERNEL);

668 669 670 671
	if (!privs[0] || !privs[1]) {
		ret = -ENOMEM;
		goto err_privs;
	}
S
Scott Wood 已提交
672

673 674
	g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
	                     GFP_KERNEL);
675 676 677 678
	if (!g2h_bitmap) {
		ret = -ENOMEM;
		goto err_privs;
	}
679

S
Scott Wood 已提交
680 681 682 683
	free_gtlb(vcpu_e500);

	vcpu_e500->gtlb_priv[0] = privs[0];
	vcpu_e500->gtlb_priv[1] = privs[1];
684
	vcpu_e500->g2h_tlb1_map = g2h_bitmap;
S
Scott Wood 已提交
685 686 687 688 689 690 691 692 693 694

	vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
		(virt + (cfg->array & (PAGE_SIZE - 1)));

	vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
	vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];

	vcpu_e500->gtlb_offset[0] = 0;
	vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];

695 696 697
	vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;

	vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
S
Scott Wood 已提交
698
	if (params.tlb_sizes[0] <= 2048)
699 700
		vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
	vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
S
Scott Wood 已提交
701

702 703 704
	vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
	vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
	vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
S
Scott Wood 已提交
705 706 707 708 709 710 711 712 713 714

	vcpu_e500->shared_tlb_pages = pages;
	vcpu_e500->num_shared_tlb_pages = num_pages;

	vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
	vcpu_e500->gtlb_params[0].sets = sets;

	vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
	vcpu_e500->gtlb_params[1].sets = 1;

715
	kvmppc_recalc_tlb1map_range(vcpu_e500);
S
Scott Wood 已提交
716 717
	return 0;

718
err_privs:
S
Scott Wood 已提交
719 720 721
	kfree(privs[0]);
	kfree(privs[1]);

722
err_put_page:
S
Scott Wood 已提交
723 724 725 726 727 728 729 730 731 732 733 734
	for (i = 0; i < num_pages; i++)
		put_page(pages[i]);

err_pages:
	kfree(pages);
	return ret;
}

int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
			     struct kvm_dirty_tlb *dirty)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
735
	kvmppc_recalc_tlb1map_range(vcpu_e500);
736
	kvmppc_core_flush_tlb(vcpu);
S
Scott Wood 已提交
737
	return 0;
738 739 740 741
}

int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
742
	struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
S
Scott Wood 已提交
743 744 745
	int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
	int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;

746 747
	if (e500_mmu_host_init(vcpu_e500))
		goto err;
748

S
Scott Wood 已提交
749 750
	vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
	vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
751

S
Scott Wood 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764
	vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
	vcpu_e500->gtlb_params[0].sets =
		KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;

	vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
	vcpu_e500->gtlb_params[1].sets = 1;

	vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
	if (!vcpu_e500->gtlb_arch)
		return -ENOMEM;

	vcpu_e500->gtlb_offset[0] = 0;
	vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
765

S
Scott Wood 已提交
766 767 768
	vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
					  vcpu_e500->gtlb_params[0].entries,
					  GFP_KERNEL);
769 770 771
	if (!vcpu_e500->gtlb_priv[0])
		goto err;

S
Scott Wood 已提交
772 773 774
	vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
					  vcpu_e500->gtlb_params[1].entries,
					  GFP_KERNEL);
775 776
	if (!vcpu_e500->gtlb_priv[1])
		goto err;
777

778
	vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
779 780 781 782 783
					  vcpu_e500->gtlb_params[1].entries,
					  GFP_KERNEL);
	if (!vcpu_e500->g2h_tlb1_map)
		goto err;

L
Liu Yu 已提交
784
	/* Init TLB configuration register */
785
	vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
786
			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
787 788
	vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
	vcpu->arch.tlbcfg[0] |=
789 790
		vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;

791
	vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
792
			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
793 794
	vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
	vcpu->arch.tlbcfg[1] |=
795
		vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
L
Liu Yu 已提交
796

797
	kvmppc_recalc_tlb1map_range(vcpu_e500);
798 799
	return 0;

800
err:
S
Scott Wood 已提交
801
	free_gtlb(vcpu_e500);
802 803 804 805 806
	return -1;
}

void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
S
Scott Wood 已提交
807
	free_gtlb(vcpu_e500);
808
	e500_mmu_host_uninit(vcpu_e500);
809
}