tlb.c 19.6 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
 * TLB handlers run from KSEG0
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */
12 13 14 15 16 17 18

#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kvm_host.h>
19 20
#include <linux/srcu.h>

21 22 23 24 25
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
26
#include <asm/tlb.h>
27 28 29 30 31 32 33 34 35 36 37 38 39 40

#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT

#define KVM_GUEST_PC_TLB    0
#define KVM_GUEST_SP_TLB    1

#define PRIx64 "llx"

atomic_t kvm_mips_instance;
EXPORT_SYMBOL(kvm_mips_instance);

/* These function pointers are initialized once the KVM module is loaded */
41
pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42 43
EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);

44
void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
45 46
EXPORT_SYMBOL(kvm_mips_release_pfn_clean);

47
bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
48 49 50 51
EXPORT_SYMBOL(kvm_mips_is_error_pfn);

uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
52
	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
53 54 55 56
}

uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
57
	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
58 59
}

60
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
61 62 63 64
{
	return vcpu->kvm->arch.commpage_tlb;
}

65
/* Structure defining an tlb entry data set. */
66 67 68 69 70 71 72 73 74 75 76 77 78 79

void kvm_mips_dump_host_tlbs(void)
{
	unsigned long old_entryhi;
	unsigned long old_pagemask;
	struct kvm_mips_tlb tlb;
	unsigned long flags;
	int i;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

80 81
	kvm_info("HOST TLBs:\n");
	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
82 83 84 85 86 87 88 89 90 91 92 93 94

	for (i = 0; i < current_cpu_data.tlbsize; i++) {
		write_c0_index(i);
		mtc0_tlbw_hazard();

		tlb_read();
		tlbw_use_hazard();

		tlb.tlb_hi = read_c0_entryhi();
		tlb.tlb_lo0 = read_c0_entrylo0();
		tlb.tlb_lo1 = read_c0_entrylo1();
		tlb.tlb_mask = read_c0_pagemask();

95 96 97 98 99 100 101 102 103 104 105 106 107
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108 109 110 111 112 113
	}
	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	local_irq_restore(flags);
}
114
EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
115 116 117 118 119 120 121

void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	struct kvm_mips_tlb tlb;
	int i;

122 123
	kvm_info("Guest TLBs:\n");
	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124 125 126

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
		tlb = vcpu->arch.guest_tlb[i];
127 128 129 130 131 132 133 134 135 136 137 138 139
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 141
	}
}
142
EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
143

144
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145
{
146
	int srcu_idx, err = 0;
147 148 149
	pfn_t pfn;

	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150
		return 0;
151

152
	srcu_idx = srcu_read_lock(&kvm->srcu);
153 154 155
	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);

	if (kvm_mips_is_error_pfn(pfn)) {
156 157 158
		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
		err = -EFAULT;
		goto out;
159 160 161
	}

	kvm->arch.guest_pmap[gfn] = pfn;
162 163 164
out:
	srcu_read_unlock(&kvm->srcu, srcu_idx);
	return err;
165 166 167 168
}

/* Translate guest KSEG0 addresses to Host PA */
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
169
						    unsigned long gva)
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
{
	gfn_t gfn;
	uint32_t offset = gva & ~PAGE_MASK;
	struct kvm *kvm = vcpu->kvm;

	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
			__builtin_return_address(0), gva);
		return KVM_INVALID_PAGE;
	}

	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);

	if (gfn >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
			gva);
		return KVM_INVALID_PAGE;
	}
188 189 190 191

	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return KVM_INVALID_ADDR;

192 193
	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
}
194
EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
195 196 197

/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
198 199 200
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
			    unsigned long entrylo0, unsigned long entrylo1,
			    int flush_dcache_mask)
201 202 203
{
	unsigned long flags;
	unsigned long old_entryhi;
204
	int idx;
205 206 207 208 209 210 211 212 213 214 215 216 217 218

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	write_c0_entryhi(entryhi);
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx > current_cpu_data.tlbsize) {
		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
		kvm_mips_dump_host_tlbs();
219
		local_irq_restore(flags);
220 221 222 223 224 225 226
		return -1;
	}

	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();

J
James Hogan 已提交
227 228 229 230
	if (idx < 0)
		tlb_write_random();
	else
		tlb_write_indexed();
231 232
	tlbw_use_hazard();

233 234 235
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, idx, read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
236 237 238 239 240

	/* Flush D-cache */
	if (flush_dcache_mask) {
		if (entrylo0 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
241 242
			flush_data_cache_page((entryhi & VPN2_MASK) &
					      ~flush_dcache_mask);
243 244 245
		}
		if (entrylo1 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
246 247 248
			flush_data_cache_page(((entryhi & VPN2_MASK) &
					       ~flush_dcache_mask) |
					      (0x1 << PAGE_SHIFT));
249 250 251 252 253 254 255 256 257 258 259 260 261
		}
	}

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);
	return 0;
}

/* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262
				    struct kvm_vcpu *vcpu)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
{
	gfn_t gfn;
	pfn_t pfn0, pfn1;
	unsigned long vaddr = 0;
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	int even;
	struct kvm *kvm = vcpu->kvm;
	const int flush_dcache_mask = 0;

	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}

	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
	if (gfn >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
			gfn, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}
	even = !(gfn & 0x1);
	vaddr = badvaddr & (PAGE_MASK << 1);

288 289 290 291 292
	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return -1;

	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
		return -1;
293 294 295 296 297 298 299 300 301 302

	if (even) {
		pfn0 = kvm->arch.guest_pmap[gfn];
		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
	} else {
		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
		pfn1 = kvm->arch.guest_pmap[gfn];
	}

	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
303 304 305 306
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
307 308 309 310

	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				       flush_dcache_mask);
}
311
EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
312 313 314 315 316 317 318 319 320 321

int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
	struct kvm_vcpu *vcpu)
{
	pfn_t pfn0, pfn1;
	unsigned long flags, old_entryhi = 0, vaddr = 0;
	unsigned long entrylo0 = 0, entrylo1 = 0;

	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
	pfn1 = 0;
322 323
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	entrylo1 = 0;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	vaddr = badvaddr & (PAGE_MASK << 1);
	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
	mtc0_tlbw_hazard();
	write_c0_entrylo0(entrylo0);
	mtc0_tlbw_hazard();
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

342 343 344
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
345 346 347 348 349 350 351 352 353

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);

	return 0;
}
354
EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
355

356 357 358 359
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
					 struct kvm_mips_tlb *tlb,
					 unsigned long *hpa0,
					 unsigned long *hpa1)
360 361 362 363 364 365 366 367 368
{
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	struct kvm *kvm = vcpu->kvm;
	pfn_t pfn0, pfn1;

	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
		pfn0 = 0;
		pfn1 = 0;
	} else {
369 370
		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
					   >> PAGE_SHIFT) < 0)
371 372
			return -1;

373 374
		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
					   >> PAGE_SHIFT) < 0)
375
			return -1;
376

377 378 379 380
		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
					    >> PAGE_SHIFT];
		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
					    >> PAGE_SHIFT];
381 382 383 384 385 386 387 388 389 390
	}

	if (hpa0)
		*hpa0 = pfn0 << PAGE_SHIFT;

	if (hpa1)
		*hpa1 = pfn1 << PAGE_SHIFT;

	/* Get attributes from the Guest TLB */
	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
391 392
					       kvm_mips_get_kernel_asid(vcpu) :
					       kvm_mips_get_user_asid(vcpu));
393
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
394
		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
395
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
396
		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
397 398 399 400 401 402 403

	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
		  tlb->tlb_lo0, tlb->tlb_lo1);

	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				       tlb->tlb_mask);
}
404
EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
405 406 407 408 409 410 411 412

int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
	int i;
	int index = -1;
	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
413 414
		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
415 416 417 418 419 420 421 422 423 424
			index = i;
			break;
		}
	}

	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);

	return index;
}
425
EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
426 427 428 429

int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
{
	unsigned long old_entryhi, flags;
430
	int idx;
431 432 433 434 435 436

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	if (KVM_GUEST_KERNEL_MODE(vcpu))
437 438
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_kernel_asid(vcpu));
439
	else {
440 441
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_user_asid(vcpu));
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	}

	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);

	return idx;
}
461
EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{
	int idx;
	unsigned long flags, old_entryhi;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx >= current_cpu_data.tlbsize)
		BUG();

	if (idx > 0) {
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
		mtc0_tlbw_hazard();

		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();

		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

502
	if (idx > 0)
503
		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
504
			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
505 506 507

	return 0;
}
508
EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

void kvm_mips_flush_host_tlb(int skip_kseg0)
{
	unsigned long flags;
	unsigned long old_entryhi, entryhi;
	unsigned long old_pagemask;
	int entry = 0;
	int maxentry = current_cpu_data.tlbsize;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

	/* Blast 'em all away. */
	for (entry = 0; entry < maxentry; entry++) {
		write_c0_index(entry);
		mtc0_tlbw_hazard();

		if (skip_kseg0) {
			tlb_read();
			tlbw_use_hazard();

			entryhi = read_c0_entryhi();

			/* Don't blow away guest kernel entries */
535
			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
				continue;
		}

		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		mtc0_tlbw_hazard();
		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();
		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	tlbw_use_hazard();

	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);
}
560
EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
561

562 563
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
			     struct kvm_vcpu *vcpu)
564 565 566
{
	unsigned long asid = asid_cache(cpu);

567 568 569
	asid += ASID_INC;
	if (!(asid & ASID_MASK)) {
		if (cpu_has_vtag_icache)
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
			flush_icache_all();

		kvm_local_flush_tlb_all();      /* start new asid cycle */

		if (!asid)      /* fix version if needed */
			asid = ASID_FIRST_VERSION;
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

void kvm_local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry = 0;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	mtc0_tlbw_hazard();

	local_irq_restore(flags);
}
608
EXPORT_SYMBOL(kvm_local_flush_tlb_all);
609

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
/**
 * kvm_mips_migrate_count() - Migrate timer.
 * @vcpu:	Virtual CPU.
 *
 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
 * if it was running prior to being cancelled.
 *
 * Must be called when the VCPU is migrated to a different CPU to ensure that
 * timer expiry during guest execution interrupts the guest and causes the
 * interrupt to be delivered in a timely manner.
 */
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
		hrtimer_restart(&vcpu->arch.comparecount_timer);
}

627 628 629 630 631 632 633 634 635 636 637 638
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	unsigned long flags;
	int newasid = 0;

	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);

	/* Alocate new kernel and user ASIDs if needed */

	local_irq_save(flags);

639 640
	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
							ASID_VERSION_MASK) {
641 642 643 644 645 646 647 648
		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
		vcpu->arch.guest_kernel_asid[cpu] =
		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
		vcpu->arch.guest_user_asid[cpu] =
		    vcpu->arch.guest_user_mm.context.asid[cpu];
		newasid++;

649 650 651 652 653 654
		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
			  cpu_context(cpu, current->mm));
		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
			  vcpu->arch.guest_user_asid[cpu]);
655 656 657
	}

	if (vcpu->arch.last_sched_cpu != cpu) {
658 659
		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
660 661 662 663 664 665
		/*
		 * Migrate the timer interrupt to the current CPU so that it
		 * always interrupts the guest and synchronously triggers a
		 * guest timer interrupt.
		 */
		kvm_mips_migrate_count(vcpu);
666 667 668
	}

	if (!newasid) {
669 670 671 672
		/*
		 * If we preempted while the guest was executing, then reload
		 * the pre-empted ASID
		 */
673
		if (current->flags & PF_VCPU) {
674 675
			write_c0_entryhi(vcpu->arch.
					 preempt_entryhi & ASID_MASK);
676 677 678 679 680
			ehb();
		}
	} else {
		/* New ASIDs were allocated for the VM */

681 682 683 684
		/*
		 * Were we in guest context? If so then the pre-empted ASID is
		 * no longer valid, we need to set it to what it should be based
		 * on the mode of the Guest (Kernel/User)
685 686 687
		 */
		if (current->flags & PF_VCPU) {
			if (KVM_GUEST_KERNEL_MODE(vcpu))
688 689 690
				write_c0_entryhi(vcpu->arch.
						 guest_kernel_asid[cpu] &
						 ASID_MASK);
691
			else
692 693 694
				write_c0_entryhi(vcpu->arch.
						 guest_user_asid[cpu] &
						 ASID_MASK);
695 696 697 698
			ehb();
		}
	}

699 700 701
	/* restore guest state to registers */
	kvm_mips_callbacks->vcpu_set_regs(vcpu);

702 703 704
	local_irq_restore(flags);

}
705
EXPORT_SYMBOL(kvm_arch_vcpu_load);
706 707 708 709 710 711 712 713 714 715 716 717 718 719

/* ASID can change if another task is scheduled during preemption */
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
	unsigned long flags;
	uint32_t cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();

	vcpu->arch.preempt_entryhi = read_c0_entryhi();
	vcpu->arch.last_sched_cpu = cpu;

720 721 722
	/* save guest state in registers */
	kvm_mips_callbacks->vcpu_get_regs(vcpu);

723 724 725 726 727 728 729 730 731 732 733
	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
	     ASID_VERSION_MASK)) {
		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
			  cpu_context(cpu, current->mm));
		drop_mmu_context(current->mm, cpu);
	}
	write_c0_entryhi(cpu_asid(cpu, current->mm));
	ehb();

	local_irq_restore(flags);
}
734
EXPORT_SYMBOL(kvm_arch_vcpu_put);
735 736 737 738

uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
739
	unsigned long paddr, flags, vpn2, asid;
740 741 742 743 744 745 746 747 748 749
	uint32_t inst;
	int index;

	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
		local_irq_save(flags);
		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
		if (index >= 0) {
			inst = *(opc);
		} else {
750 751 752
			vpn2 = (unsigned long) opc & VPN2_MASK;
			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
753
			if (index < 0) {
754 755
				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
					__func__, opc, vcpu, read_c0_entryhi());
756 757 758 759 760 761 762 763 764 765 766 767 768 769
				kvm_mips_dump_host_tlbs();
				local_irq_restore(flags);
				return KVM_INVALID_INST;
			}
			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
							     &vcpu->arch.
							     guest_tlb[index],
							     NULL, NULL);
			inst = *(opc);
		}
		local_irq_restore(flags);
	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
		paddr =
		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
770
							  (unsigned long) opc);
771 772 773 774 775 776 777 778 779
		inst = *(uint32_t *) CKSEG0ADDR(paddr);
	} else {
		kvm_err("%s: illegal address: %p\n", __func__, opc);
		return KVM_INVALID_INST;
	}

	return inst;
}
EXPORT_SYMBOL(kvm_get_inst);