tlb.c 19.9 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
 * TLB handlers run from KSEG0
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */
12 13 14 15 16 17 18

#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kvm_host.h>
19 20
#include <linux/srcu.h>

21 22 23 24 25
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
26
#include <asm/tlb.h>
27 28 29 30 31 32 33 34 35

#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT

#define KVM_GUEST_PC_TLB    0
#define KVM_GUEST_SP_TLB    1

atomic_t kvm_mips_instance;
36
EXPORT_SYMBOL_GPL(kvm_mips_instance);
37 38

/* These function pointers are initialized once the KVM module is loaded */
D
Dan Williams 已提交
39
kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
40
EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
41

D
Dan Williams 已提交
42
void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
43
EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
44

D
Dan Williams 已提交
45
bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
46
EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
47

48
u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
49
{
50 51 52 53
	int cpu = smp_processor_id();

	return vcpu->arch.guest_kernel_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
54 55
}

56
u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
57
{
58 59 60 61
	int cpu = smp_processor_id();

	return vcpu->arch.guest_user_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
62 63
}

64
inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
65 66 67 68
{
	return vcpu->kvm->arch.commpage_tlb;
}

69
/* Structure defining an tlb entry data set. */
70 71 72 73 74 75 76 77 78 79 80 81 82 83

void kvm_mips_dump_host_tlbs(void)
{
	unsigned long old_entryhi;
	unsigned long old_pagemask;
	struct kvm_mips_tlb tlb;
	unsigned long flags;
	int i;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

84
	kvm_info("HOST TLBs:\n");
85 86
	kvm_info("ASID: %#lx\n", read_c0_entryhi() &
		 cpu_asid_mask(&current_cpu_data));
87 88 89 90 91 92 93 94 95 96 97 98 99

	for (i = 0; i < current_cpu_data.tlbsize; i++) {
		write_c0_index(i);
		mtc0_tlbw_hazard();

		tlb_read();
		tlbw_use_hazard();

		tlb.tlb_hi = read_c0_entryhi();
		tlb.tlb_lo0 = read_c0_entrylo0();
		tlb.tlb_lo1 = read_c0_entrylo1();
		tlb.tlb_mask = read_c0_pagemask();

100 101 102
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
103 104
		kvm_info("Lo0=0x%09llx %c%c attr %lx ",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 106 107
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
108 109
		kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 111 112
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
113 114 115 116 117 118
	}
	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	local_irq_restore(flags);
}
119
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
120 121 122 123 124 125 126

void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	struct kvm_mips_tlb tlb;
	int i;

127 128
	kvm_info("Guest TLBs:\n");
	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129 130 131

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
		tlb = vcpu->arch.guest_tlb[i];
132 133 134
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
135 136
		kvm_info("Lo0=0x%09llx %c%c attr %lx ",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 138 139
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
140 141
		kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 143 144
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 146
	}
}
147
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
148

149
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
150
{
151
	int srcu_idx, err = 0;
D
Dan Williams 已提交
152
	kvm_pfn_t pfn;
153 154

	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
155
		return 0;
156

157
	srcu_idx = srcu_read_lock(&kvm->srcu);
158 159 160
	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);

	if (kvm_mips_is_error_pfn(pfn)) {
161
		kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
162 163
		err = -EFAULT;
		goto out;
164 165 166
	}

	kvm->arch.guest_pmap[gfn] = pfn;
167 168 169
out:
	srcu_read_unlock(&kvm->srcu, srcu_idx);
	return err;
170 171 172 173
}

/* Translate guest KSEG0 addresses to Host PA */
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
174
						    unsigned long gva)
175 176
{
	gfn_t gfn;
177
	unsigned long offset = gva & ~PAGE_MASK;
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	struct kvm *kvm = vcpu->kvm;

	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
			__builtin_return_address(0), gva);
		return KVM_INVALID_PAGE;
	}

	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);

	if (gfn >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
			gva);
		return KVM_INVALID_PAGE;
	}
193 194 195 196

	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return KVM_INVALID_ADDR;

197 198
	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
}
199
EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
200 201 202

/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
203 204 205
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
			    unsigned long entrylo0, unsigned long entrylo1,
			    int flush_dcache_mask)
206 207 208
{
	unsigned long flags;
	unsigned long old_entryhi;
209
	int idx;
210 211 212 213 214 215 216 217 218 219 220 221 222 223

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	write_c0_entryhi(entryhi);
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx > current_cpu_data.tlbsize) {
		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
		kvm_mips_dump_host_tlbs();
224
		local_irq_restore(flags);
225 226 227 228 229 230 231
		return -1;
	}

	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();

J
James Hogan 已提交
232 233 234 235
	if (idx < 0)
		tlb_write_random();
	else
		tlb_write_indexed();
236 237
	tlbw_use_hazard();

238 239 240
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, idx, read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
241 242 243 244 245

	/* Flush D-cache */
	if (flush_dcache_mask) {
		if (entrylo0 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
246 247
			flush_data_cache_page((entryhi & VPN2_MASK) &
					      ~flush_dcache_mask);
248 249 250
		}
		if (entrylo1 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
251 252 253
			flush_data_cache_page(((entryhi & VPN2_MASK) &
					       ~flush_dcache_mask) |
					      (0x1 << PAGE_SHIFT));
254 255 256 257 258 259 260 261 262 263 264 265 266
		}
	}

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);
	return 0;
}

/* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
267
				    struct kvm_vcpu *vcpu)
268 269
{
	gfn_t gfn;
D
Dan Williams 已提交
270
	kvm_pfn_t pfn0, pfn1;
271 272 273 274 275
	unsigned long vaddr = 0;
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	int even;
	struct kvm *kvm = vcpu->kvm;
	const int flush_dcache_mask = 0;
276
	int ret;
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}

	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
	if (gfn >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
			gfn, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}
	even = !(gfn & 0x1);
	vaddr = badvaddr & (PAGE_MASK << 1);

294 295 296 297 298
	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return -1;

	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
		return -1;
299 300 301 302 303 304 305 306 307

	if (even) {
		pfn0 = kvm->arch.guest_pmap[gfn];
		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
	} else {
		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
		pfn1 = kvm->arch.guest_pmap[gfn];
	}

308 309 310 311
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
312

313 314 315 316 317 318 319
	preempt_disable();
	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				      flush_dcache_mask);
	preempt_enable();

	return ret;
320
}
321
EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
322 323 324 325

int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
	struct kvm_vcpu *vcpu)
{
D
Dan Williams 已提交
326
	kvm_pfn_t pfn0, pfn1;
327 328 329 330 331
	unsigned long flags, old_entryhi = 0, vaddr = 0;
	unsigned long entrylo0 = 0, entrylo1 = 0;

	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
	pfn1 = 0;
332 333
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	entrylo1 = 0;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	vaddr = badvaddr & (PAGE_MASK << 1);
	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
	mtc0_tlbw_hazard();
	write_c0_entrylo0(entrylo0);
	mtc0_tlbw_hazard();
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

352 353 354
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
355 356 357 358 359 360 361 362 363

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);

	return 0;
}
364
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
365

366 367 368 369
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
					 struct kvm_mips_tlb *tlb,
					 unsigned long *hpa0,
					 unsigned long *hpa1)
370 371 372
{
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	struct kvm *kvm = vcpu->kvm;
D
Dan Williams 已提交
373
	kvm_pfn_t pfn0, pfn1;
374
	int ret;
375 376 377 378 379

	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
		pfn0 = 0;
		pfn1 = 0;
	} else {
380 381
		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
					   >> PAGE_SHIFT) < 0)
382 383
			return -1;

384 385
		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
					   >> PAGE_SHIFT) < 0)
386
			return -1;
387

388 389 390 391
		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
					    >> PAGE_SHIFT];
		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
					    >> PAGE_SHIFT];
392 393 394 395 396 397 398 399 400 401
	}

	if (hpa0)
		*hpa0 = pfn0 << PAGE_SHIFT;

	if (hpa1)
		*hpa1 = pfn1 << PAGE_SHIFT;

	/* Get attributes from the Guest TLB */
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
402
		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
403
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
404
		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
405 406 407 408

	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
		  tlb->tlb_lo0, tlb->tlb_lo1);

409 410 411 412 413 414 415 416 417
	preempt_disable();
	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
					       kvm_mips_get_kernel_asid(vcpu) :
					       kvm_mips_get_user_asid(vcpu));
	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				      tlb->tlb_mask);
	preempt_enable();

	return ret;
418
}
419
EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
420 421 422 423 424 425 426 427

int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
	int i;
	int index = -1;
	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
428 429
		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
430 431 432 433 434 435 436 437 438 439
			index = i;
			break;
		}
	}

	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);

	return index;
}
440
EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
441 442 443 444

int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
{
	unsigned long old_entryhi, flags;
445
	int idx;
446 447 448 449 450 451

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	if (KVM_GUEST_KERNEL_MODE(vcpu))
452 453
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_kernel_asid(vcpu));
454
	else {
455 456
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_user_asid(vcpu));
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
	}

	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);

	return idx;
}
476
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516

int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{
	int idx;
	unsigned long flags, old_entryhi;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx >= current_cpu_data.tlbsize)
		BUG();

	if (idx > 0) {
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
		mtc0_tlbw_hazard();

		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();

		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

517
	if (idx > 0)
518
		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
519
			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
520 521 522

	return 0;
}
523
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549

void kvm_mips_flush_host_tlb(int skip_kseg0)
{
	unsigned long flags;
	unsigned long old_entryhi, entryhi;
	unsigned long old_pagemask;
	int entry = 0;
	int maxentry = current_cpu_data.tlbsize;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

	/* Blast 'em all away. */
	for (entry = 0; entry < maxentry; entry++) {
		write_c0_index(entry);
		mtc0_tlbw_hazard();

		if (skip_kseg0) {
			tlb_read();
			tlbw_use_hazard();

			entryhi = read_c0_entryhi();

			/* Don't blow away guest kernel entries */
550
			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
				continue;
		}

		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		mtc0_tlbw_hazard();
		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();
		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	tlbw_use_hazard();

	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);
}
575
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
576

577 578
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
			     struct kvm_vcpu *vcpu)
579 580 581
{
	unsigned long asid = asid_cache(cpu);

582 583
	asid += cpu_asid_inc();
	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
584
		if (cpu_has_vtag_icache)
585 586 587 588 589
			flush_icache_all();

		kvm_local_flush_tlb_all();      /* start new asid cycle */

		if (!asid)      /* fix version if needed */
590
			asid = asid_first_version(cpu);
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

void kvm_local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry = 0;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	mtc0_tlbw_hazard();

	local_irq_restore(flags);
}
623
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
624

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
/**
 * kvm_mips_migrate_count() - Migrate timer.
 * @vcpu:	Virtual CPU.
 *
 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
 * if it was running prior to being cancelled.
 *
 * Must be called when the VCPU is migrated to a different CPU to ensure that
 * timer expiry during guest execution interrupts the guest and causes the
 * interrupt to be delivered in a timely manner.
 */
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
		hrtimer_restart(&vcpu->arch.comparecount_timer);
}

642 643 644
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
645
	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
646 647 648 649 650
	unsigned long flags;
	int newasid = 0;

	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);

651
	/* Allocate new kernel and user ASIDs if needed */
652 653 654

	local_irq_save(flags);

655
	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
656
						asid_version_mask(cpu)) {
657 658 659 660 661 662 663 664
		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
		vcpu->arch.guest_kernel_asid[cpu] =
		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
		vcpu->arch.guest_user_asid[cpu] =
		    vcpu->arch.guest_user_mm.context.asid[cpu];
		newasid++;

665 666 667 668 669 670
		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
			  cpu_context(cpu, current->mm));
		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
			  vcpu->arch.guest_user_asid[cpu]);
671 672 673
	}

	if (vcpu->arch.last_sched_cpu != cpu) {
674 675
		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
676 677 678 679 680 681
		/*
		 * Migrate the timer interrupt to the current CPU so that it
		 * always interrupts the guest and synchronously triggers a
		 * guest timer interrupt.
		 */
		kvm_mips_migrate_count(vcpu);
682 683 684
	}

	if (!newasid) {
685 686 687 688
		/*
		 * If we preempted while the guest was executing, then reload
		 * the pre-empted ASID
		 */
689
		if (current->flags & PF_VCPU) {
690
			write_c0_entryhi(vcpu->arch.
691
					 preempt_entryhi & asid_mask);
692 693 694 695 696
			ehb();
		}
	} else {
		/* New ASIDs were allocated for the VM */

697 698 699 700
		/*
		 * Were we in guest context? If so then the pre-empted ASID is
		 * no longer valid, we need to set it to what it should be based
		 * on the mode of the Guest (Kernel/User)
701 702 703
		 */
		if (current->flags & PF_VCPU) {
			if (KVM_GUEST_KERNEL_MODE(vcpu))
704 705
				write_c0_entryhi(vcpu->arch.
						 guest_kernel_asid[cpu] &
706
						 asid_mask);
707
			else
708 709
				write_c0_entryhi(vcpu->arch.
						 guest_user_asid[cpu] &
710
						 asid_mask);
711 712 713 714
			ehb();
		}
	}

715 716 717
	/* restore guest state to registers */
	kvm_mips_callbacks->vcpu_set_regs(vcpu);

718 719 720
	local_irq_restore(flags);

}
721
EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
722 723 724 725 726

/* ASID can change if another task is scheduled during preemption */
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
	unsigned long flags;
727
	int cpu;
728 729 730 731 732 733 734 735

	local_irq_save(flags);

	cpu = smp_processor_id();

	vcpu->arch.preempt_entryhi = read_c0_entryhi();
	vcpu->arch.last_sched_cpu = cpu;

736 737 738
	/* save guest state in registers */
	kvm_mips_callbacks->vcpu_get_regs(vcpu);

739
	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
740
	     asid_version_mask(cpu))) {
741 742 743 744 745 746 747 748 749
		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
			  cpu_context(cpu, current->mm));
		drop_mmu_context(current->mm, cpu);
	}
	write_c0_entryhi(cpu_asid(cpu, current->mm));
	ehb();

	local_irq_restore(flags);
}
750
EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
751

752
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
753 754
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
755
	unsigned long paddr, flags, vpn2, asid;
756
	u32 inst;
757 758 759 760 761 762 763 764 765
	int index;

	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
		local_irq_save(flags);
		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
		if (index >= 0) {
			inst = *(opc);
		} else {
766
			vpn2 = (unsigned long) opc & VPN2_MASK;
767 768
			asid = kvm_read_c0_guest_entryhi(cop0) &
						KVM_ENTRYHI_ASID;
769
			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
770
			if (index < 0) {
771 772
				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
					__func__, opc, vcpu, read_c0_entryhi());
773 774 775 776 777 778 779 780 781 782 783 784 785 786
				kvm_mips_dump_host_tlbs();
				local_irq_restore(flags);
				return KVM_INVALID_INST;
			}
			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
							     &vcpu->arch.
							     guest_tlb[index],
							     NULL, NULL);
			inst = *(opc);
		}
		local_irq_restore(flags);
	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
		paddr =
		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
787
							  (unsigned long) opc);
788
		inst = *(u32 *) CKSEG0ADDR(paddr);
789 790 791 792 793 794 795
	} else {
		kvm_err("%s: illegal address: %p\n", __func__, opc);
		return KVM_INVALID_INST;
	}

	return inst;
}
796
EXPORT_SYMBOL_GPL(kvm_get_inst);