tlb.c 10.1 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
 * TLB handlers run from KSEG0
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */
12 13 14 15 16

#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
17
#include <linux/export.h>
18
#include <linux/kvm_host.h>
19 20
#include <linux/srcu.h>

21 22 23 24 25
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
26
#include <asm/tlb.h>
27 28 29 30 31 32 33 34 35

#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT

#define KVM_GUEST_PC_TLB    0
#define KVM_GUEST_SP_TLB    1

atomic_t kvm_mips_instance;
36
EXPORT_SYMBOL_GPL(kvm_mips_instance);
37

38
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
39
{
40 41 42 43
	int cpu = smp_processor_id();

	return vcpu->arch.guest_kernel_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
44 45
}

46
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
47
{
48 49 50 51
	int cpu = smp_processor_id();

	return vcpu->arch.guest_user_asid[cpu] &
			cpu_asid_mask(&cpu_data[cpu]);
52 53
}

54
inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
55 56 57 58
{
	return vcpu->kvm->arch.commpage_tlb;
}

59
/* Structure defining an tlb entry data set. */
60 61 62 63 64 65 66 67 68 69 70 71 72 73

void kvm_mips_dump_host_tlbs(void)
{
	unsigned long old_entryhi;
	unsigned long old_pagemask;
	struct kvm_mips_tlb tlb;
	unsigned long flags;
	int i;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

74
	kvm_info("HOST TLBs:\n");
75 76
	kvm_info("ASID: %#lx\n", read_c0_entryhi() &
		 cpu_asid_mask(&current_cpu_data));
77 78 79 80 81 82 83 84 85 86 87 88 89

	for (i = 0; i < current_cpu_data.tlbsize; i++) {
		write_c0_index(i);
		mtc0_tlbw_hazard();

		tlb_read();
		tlbw_use_hazard();

		tlb.tlb_hi = read_c0_entryhi();
		tlb.tlb_lo0 = read_c0_entrylo0();
		tlb.tlb_lo1 = read_c0_entrylo1();
		tlb.tlb_mask = read_c0_pagemask();

90 91 92
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
93 94
		kvm_info("Lo0=0x%09llx %c%c attr %lx ",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
95 96 97
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
98 99
		kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
100 101 102
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
103 104 105 106 107 108
	}
	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	local_irq_restore(flags);
}
109
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
110 111 112 113 114 115 116

void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	struct kvm_mips_tlb tlb;
	int i;

117 118
	kvm_info("Guest TLBs:\n");
	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
119 120 121

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
		tlb = vcpu->arch.guest_tlb[i];
122 123 124
		kvm_info("TLB%c%3d Hi 0x%08lx ",
			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
			 i, tlb.tlb_hi);
125 126
		kvm_info("Lo0=0x%09llx %c%c attr %lx ",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
127 128 129
			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo0 >> 3) & 7);
130 131
		kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
132 133 134
			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
135 136
	}
}
137
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
138 139 140

/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
141 142 143
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
			    unsigned long entrylo0, unsigned long entrylo1,
			    int flush_dcache_mask)
144 145 146
{
	unsigned long flags;
	unsigned long old_entryhi;
147
	int idx;
148 149 150 151 152 153 154 155 156 157 158 159 160 161

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	write_c0_entryhi(entryhi);
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx > current_cpu_data.tlbsize) {
		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
		kvm_mips_dump_host_tlbs();
162
		local_irq_restore(flags);
163 164 165 166 167 168 169
		return -1;
	}

	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();

J
James Hogan 已提交
170 171 172 173
	if (idx < 0)
		tlb_write_random();
	else
		tlb_write_indexed();
174 175
	tlbw_use_hazard();

176 177 178
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, idx, read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
179 180 181 182 183

	/* Flush D-cache */
	if (flush_dcache_mask) {
		if (entrylo0 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
184 185
			flush_data_cache_page((entryhi & VPN2_MASK) &
					      ~flush_dcache_mask);
186 187 188
		}
		if (entrylo1 & MIPS3_PG_V) {
			++vcpu->stat.flush_dcache_exits;
189 190 191
			flush_data_cache_page(((entryhi & VPN2_MASK) &
					       ~flush_dcache_mask) |
					      (0x1 << PAGE_SHIFT));
192 193 194 195 196 197 198 199 200 201
		}
	}

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);
	return 0;
}
202
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
203 204 205 206

int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
	struct kvm_vcpu *vcpu)
{
D
Dan Williams 已提交
207
	kvm_pfn_t pfn0, pfn1;
208 209 210 211 212
	unsigned long flags, old_entryhi = 0, vaddr = 0;
	unsigned long entrylo0 = 0, entrylo1 = 0;

	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
	pfn1 = 0;
213 214
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
		   (1 << 2) | (0x1 << 1);
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	entrylo1 = 0;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	vaddr = badvaddr & (PAGE_MASK << 1);
	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
	mtc0_tlbw_hazard();
	write_c0_entrylo0(entrylo0);
	mtc0_tlbw_hazard();
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

233 234 235
	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
		  read_c0_entrylo0(), read_c0_entrylo1());
236 237 238 239 240 241 242 243 244

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();
	local_irq_restore(flags);

	return 0;
}
245
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
246 247 248 249 250 251 252 253

int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
	int i;
	int index = -1;
	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;

	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
254 255
		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
256 257 258 259 260 261 262 263 264 265
			index = i;
			break;
		}
	}

	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);

	return index;
}
266
EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
267 268 269 270

int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
{
	unsigned long old_entryhi, flags;
271
	int idx;
272 273 274 275 276 277

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	if (KVM_GUEST_KERNEL_MODE(vcpu))
278 279
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_kernel_asid(vcpu));
280
	else {
281 282
		write_c0_entryhi((vaddr & VPN2_MASK) |
				 kvm_mips_get_user_asid(vcpu));
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	}

	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	/* Restore old ASID */
	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);

	return idx;
}
302
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342

int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{
	int idx;
	unsigned long flags, old_entryhi;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();

	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
	mtc0_tlbw_hazard();

	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	if (idx >= current_cpu_data.tlbsize)
		BUG();

	if (idx > 0) {
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
		mtc0_tlbw_hazard();

		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();

		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	write_c0_entryhi(old_entryhi);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);

343
	if (idx > 0)
344
		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
345
			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
346 347 348

	return 0;
}
349
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375

void kvm_mips_flush_host_tlb(int skip_kseg0)
{
	unsigned long flags;
	unsigned long old_entryhi, entryhi;
	unsigned long old_pagemask;
	int entry = 0;
	int maxentry = current_cpu_data.tlbsize;

	local_irq_save(flags);

	old_entryhi = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();

	/* Blast 'em all away. */
	for (entry = 0; entry < maxentry; entry++) {
		write_c0_index(entry);
		mtc0_tlbw_hazard();

		if (skip_kseg0) {
			tlb_read();
			tlbw_use_hazard();

			entryhi = read_c0_entryhi();

			/* Don't blow away guest kernel entries */
376
			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
				continue;
		}

		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		mtc0_tlbw_hazard();
		write_c0_entrylo0(0);
		mtc0_tlbw_hazard();
		write_c0_entrylo1(0);
		mtc0_tlbw_hazard();

		tlb_write_indexed();
		mtc0_tlbw_hazard();
	}

	tlbw_use_hazard();

	write_c0_entryhi(old_entryhi);
	write_c0_pagemask(old_pagemask);
	mtc0_tlbw_hazard();
	tlbw_use_hazard();

	local_irq_restore(flags);
}
401
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429

void kvm_local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry = 0;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	mtc0_tlbw_hazard();

	local_irq_restore(flags);
}
430
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);