tlbex.c 33.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Synthesize TLB refill handlers at runtime.
 *
8
 * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
9
 * Copyright (C) 2005, 2007  Maciej W. Rozycki
10 11 12 13 14 15 16 17 18 19
 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
 *
 * ... and the days got worse and worse and now you see
 * I've gone completly out of my mind.
 *
 * They're coming to take me a away haha
 * they're coming to take me a away hoho hihi haha
 * to the funny farm where code is beautiful all the time ...
 *
 * (Condolences to Napoleon XIV)
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29
 */

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>

#include <asm/mmu_context.h>
#include <asm/war.h>

30 31
#include "uasm.h"

32
static inline int r45k_bvahwbug(void)
L
Linus Torvalds 已提交
33 34 35 36 37
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

38
static inline int r4k_250MHZhwbug(void)
L
Linus Torvalds 已提交
39 40 41 42 43
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

44
static inline int __maybe_unused bcm1250_m3_war(void)
L
Linus Torvalds 已提交
45 46 47 48
{
	return BCM1250_M3_WAR;
}

49
static inline int __maybe_unused r10000_llsc_war(void)
L
Linus Torvalds 已提交
50 51 52 53
{
	return R10000_LLSC_WAR;
}

54 55 56 57 58 59 60 61 62
/*
 * Found by experiment: At least some revisions of the 4kc throw under
 * some circumstances a machine check exception, triggered by invalid
 * values in the index register.  Delaying the tlbp instruction until
 * after the next branch,  plus adding an additional nop in front of
 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 * why; it's not an issue caused by the core RTL.
 *
 */
63
static int __cpuinit m4kc_tlbp_war(void)
64 65 66 67 68
{
	return (current_cpu_data.processor_id & 0xffff00) ==
	       (PRID_COMP_MIPS | PRID_IMP_4KC);
}

69
/* Handle labels (which must be positive integers). */
L
Linus Torvalds 已提交
70
enum label_id {
71
	label_second_part = 1,
L
Linus Torvalds 已提交
72
	label_leave,
73 74 75
#ifdef MODULE_START
	label_module_alloc,
#endif
L
Linus Torvalds 已提交
76 77 78 79 80 81 82 83 84 85 86
	label_vmalloc,
	label_vmalloc_done,
	label_tlbw_hazard,
	label_split,
	label_nopage_tlbl,
	label_nopage_tlbs,
	label_nopage_tlbm,
	label_smp_pgtable_change,
	label_r3000_write_probe_fail,
};

87 88
UASM_L_LA(_second_part)
UASM_L_LA(_leave)
89
#ifdef MODULE_START
90
UASM_L_LA(_module_alloc)
91
#endif
92 93 94 95 96 97 98 99 100
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard)
UASM_L_LA(_split)
UASM_L_LA(_nopage_tlbl)
UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * For debug purposes.
 */
static inline void dump_handler(const u32 *handler, int count)
{
	int i;

	pr_debug("\t.set push\n");
	pr_debug("\t.set noreorder\n");

	for (i = 0; i < count; i++)
		pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);

	pr_debug("\t.set pop\n");
}

L
Linus Torvalds 已提交
118 119 120 121 122
/* The only general purpose registers allowed in TLB handlers. */
#define K0		26
#define K1		27

/* Some CP0 registers */
123 124 125 126 127 128 129 130 131
#define C0_INDEX	0, 0
#define C0_ENTRYLO0	2, 0
#define C0_TCBIND	2, 2
#define C0_ENTRYLO1	3, 0
#define C0_CONTEXT	4, 0
#define C0_BADVADDR	8, 0
#define C0_ENTRYHI	10, 0
#define C0_EPC		14, 0
#define C0_XCONTEXT	20, 0
L
Linus Torvalds 已提交
132

133
#ifdef CONFIG_64BIT
134
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
L
Linus Torvalds 已提交
135
#else
136
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146
#endif

/* The worst case length of the handler is around 18 instructions for
 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 * Maximum space available is 32 instructions for R3000 and 64
 * instructions for R4000.
 *
 * We deliberately chose a buffer size of 128, so we won't scribble
 * over anything important on overflow before we panic.
 */
147
static u32 tlb_handler[128] __cpuinitdata;
L
Linus Torvalds 已提交
148 149

/* simply assume worst case size for labels and relocs */
150 151
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
L
Linus Torvalds 已提交
152 153 154 155

/*
 * The R3000 TLB handler is simple.
 */
156
static void __cpuinit build_r3000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
157 158 159 160 161 162 163
{
	long pgdc = (long)pgd_current;
	u32 *p;

	memset(tlb_handler, 0, sizeof(tlb_handler));
	p = tlb_handler;

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	uasm_i_mfc0(&p, K0, C0_BADVADDR);
	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
	uasm_i_srl(&p, K0, K0, 22); /* load delay */
	uasm_i_sll(&p, K0, K0, 2);
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_mfc0(&p, K0, C0_CONTEXT);
	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_lw(&p, K0, 0, K1);
	uasm_i_nop(&p); /* load delay */
	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
	uasm_i_tlbwr(&p); /* cp0 delay */
	uasm_i_jr(&p, K1);
	uasm_i_rfe(&p); /* branch delay */
L
Linus Torvalds 已提交
181 182 183 184

	if (p > tlb_handler + 32)
		panic("TLB refill handler space exceeded");

185 186
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 (unsigned int)(p - tlb_handler));
L
Linus Torvalds 已提交
187

188
	memcpy((void *)ebase, tlb_handler, 0x80);
189 190

	dump_handler((u32 *)ebase, 32);
L
Linus Torvalds 已提交
191 192 193 194 195 196 197 198 199
}

/*
 * The R4000 TLB handler is much more complicated. We have two
 * consecutive handler areas with 32 instructions space each.
 * Since they aren't used at the same time, we can overflow in the
 * other one.To keep things simple, we first assume linear space,
 * then we relocate it to the final handler layout as needed.
 */
200
static u32 final_handler[64] __cpuinitdata;
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

/*
 * Hazards
 *
 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 * 2. A timing hazard exists for the TLBP instruction.
 *
 *      stalling_instruction
 *      TLBP
 *
 * The JTLB is being read for the TLBP throughout the stall generated by the
 * previous instruction. This is not really correct as the stalling instruction
 * can modify the address used to access the JTLB.  The failure symptom is that
 * the TLBP instruction will use an address created for the stalling instruction
 * and not the address held in C0_ENHI and thus report the wrong results.
 *
 * The software work-around is to not allow the instruction preceding the TLBP
 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 *
 * Errata 2 will not be fixed.  This errata is also on the R5000.
 *
 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 */
224
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
L
Linus Torvalds 已提交
225
{
226
	switch (current_cpu_type()) {
227
	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
228
	case CPU_R4600:
229
	case CPU_R4700:
L
Linus Torvalds 已提交
230 231 232
	case CPU_R5000:
	case CPU_R5000A:
	case CPU_NEVADA:
233 234
		uasm_i_nop(p);
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
235 236 237
		break;

	default:
238
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
239 240 241 242 243 244 245 246 247 248
		break;
	}
}

/*
 * Write random or indexed TLB entry, and care about the hazards from
 * the preceeding mtc0 and for the following eret.
 */
enum tlb_write_entry { tlb_random, tlb_indexed };

249
static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
250
					 struct uasm_reloc **r,
L
Linus Torvalds 已提交
251 252 253 254 255
					 enum tlb_write_entry wmode)
{
	void(*tlbw)(u32 **) = NULL;

	switch (wmode) {
256 257
	case tlb_random: tlbw = uasm_i_tlbwr; break;
	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
L
Linus Torvalds 已提交
258 259
	}

260
	if (cpu_has_mips_r2) {
261
		uasm_i_ehb(p);
262 263 264 265
		tlbw(p);
		return;
	}

266
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
267 268 269 270 271 272 273 274 275 276
	case CPU_R4000PC:
	case CPU_R4000SC:
	case CPU_R4000MC:
	case CPU_R4400PC:
	case CPU_R4400SC:
	case CPU_R4400MC:
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * two nops after the tlbw instruction.
		 */
277
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
278
		tlbw(p);
279 280
		uasm_l_tlbw_hazard(l, *p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
281 282 283 284 285 286
		break;

	case CPU_R4600:
	case CPU_R4700:
	case CPU_R5000:
	case CPU_R5000A:
287
		uasm_i_nop(p);
288
		tlbw(p);
289
		uasm_i_nop(p);
290 291 292
		break;

	case CPU_R4300:
L
Linus Torvalds 已提交
293 294 295 296 297 298
	case CPU_5KC:
	case CPU_TX49XX:
	case CPU_AU1000:
	case CPU_AU1100:
	case CPU_AU1500:
	case CPU_AU1550:
P
Pete Popov 已提交
299
	case CPU_AU1200:
300 301
	case CPU_AU1210:
	case CPU_AU1250:
302
	case CPU_PR4450:
303
		uasm_i_nop(p);
L
Linus Torvalds 已提交
304 305 306 307 308
		tlbw(p);
		break;

	case CPU_R10000:
	case CPU_R12000:
K
Kumba 已提交
309
	case CPU_R14000:
L
Linus Torvalds 已提交
310
	case CPU_4KC:
311
	case CPU_4KEC:
L
Linus Torvalds 已提交
312
	case CPU_SB1:
A
Andrew Isaacson 已提交
313
	case CPU_SB1A:
L
Linus Torvalds 已提交
314 315 316
	case CPU_4KSC:
	case CPU_20KC:
	case CPU_25KF:
317 318
	case CPU_BCM3302:
	case CPU_BCM4710:
319
	case CPU_LOONGSON2:
320
	case CPU_CAVIUM_OCTEON:
321
		if (m4kc_tlbp_war())
322
			uasm_i_nop(p);
L
Linus Torvalds 已提交
323 324 325 326
		tlbw(p);
		break;

	case CPU_NEVADA:
327
		uasm_i_nop(p); /* QED specifies 2 nops hazard */
L
Linus Torvalds 已提交
328 329 330 331
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * a nop after the tlbw instruction.
		 */
332
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
333
		tlbw(p);
334
		uasm_l_tlbw_hazard(l, *p);
L
Linus Torvalds 已提交
335 336 337
		break;

	case CPU_RM7000:
338 339 340 341
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351
		tlbw(p);
		break;

	case CPU_RM9000:
		/*
		 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
		 * use of the JTLB for instructions should not occur for 4
		 * cpu cycles and use for data translations should not occur
		 * for 3 cpu cycles.
		 */
352 353 354 355
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
356
		tlbw(p);
357 358 359 360
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
361 362 363 364 365 366 367
		break;

	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4181:
	case CPU_VR4181A:
368 369
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
370
		tlbw(p);
371 372
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
373 374 375 376
		break;

	case CPU_VR4131:
	case CPU_VR4133:
377
	case CPU_R5432:
378 379
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
380 381 382 383 384 385 386 387 388 389
		tlbw(p);
		break;

	default:
		panic("No TLB refill handler yet (CPU type: %d)",
		      current_cpu_data.cputype);
		break;
	}
}

390
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
391 392 393 394
/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pmd entry.
 */
395
static void __cpuinit
396
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
397 398 399 400 401 402 403
		 unsigned int tmp, unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	/*
	 * The vmalloc handling is not in the hotpath.
	 */
404
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
405
#ifdef MODULE_START
406
	uasm_il_bltz(p, r, tmp, label_module_alloc);
407
#else
408
	uasm_il_bltz(p, r, tmp, label_vmalloc);
409
#endif
410
	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
L
Linus Torvalds 已提交
411 412

#ifdef CONFIG_SMP
413 414 415 416
# ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
417 418
	uasm_i_mfc0(p, ptr, C0_TCBIND);
	uasm_i_dsrl(p, ptr, ptr, 19);
419
# else
L
Linus Torvalds 已提交
420
	/*
421
	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
L
Linus Torvalds 已提交
422 423
	 * stored in CONTEXT.
	 */
424 425
	uasm_i_dmfc0(p, ptr, C0_CONTEXT);
	uasm_i_dsrl(p, ptr, ptr, 23);
426
#endif
427 428 429 430
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_daddu(p, ptr, ptr, tmp);
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
431
#else
432 433
	UASM_i_LA_mostly(p, ptr, pgdc);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
434 435
#endif

436
	uasm_l_vmalloc_done(l, *p);
R
Ralf Baechle 已提交
437 438

	if (PGDIR_SHIFT - 3 < 32)		/* get pgd offset in bytes */
439
		uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
R
Ralf Baechle 已提交
440
	else
441 442 443 444 445 446 447 448 449
		uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);

	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
	uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
L
Linus Torvalds 已提交
450 451 452 453 454 455
}

/*
 * BVADDR is the faulting address, PTR is scratch.
 * PTR will hold the pgd for vmalloc.
 */
456
static void __cpuinit
457
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
458 459 460 461
			unsigned int bvaddr, unsigned int ptr)
{
	long swpd = (long)swapper_pg_dir;

462 463 464
#ifdef MODULE_START
	long modd = (long)module_pg_dir;

465
	uasm_l_module_alloc(l, *p);
466 467 468 469 470
	/*
	 * Assumption:
	 * VMALLOC_START >= 0xc000000000000000UL
	 * MODULE_START >= 0xe000000000000000UL
	 */
471 472
	UASM_i_SLL(p, ptr, bvaddr, 2);
	uasm_il_bgez(p, r, ptr, label_vmalloc);
473

474 475 476
	if (uasm_in_compat_space_p(MODULE_START) &&
	    !uasm_rel_lo(MODULE_START)) {
		uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
477 478
	} else {
		/* unlikely configuration */
479 480
		uasm_i_nop(p); /* delay slot */
		UASM_i_LA(p, ptr, MODULE_START);
481
	}
482
	uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
483

484 485 486
	if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
		uasm_il_b(p, r, label_vmalloc_done);
		uasm_i_lui(p, ptr, uasm_rel_hi(modd));
487
	} else {
488 489 490 491
		UASM_i_LA_mostly(p, ptr, modd);
		uasm_il_b(p, r, label_vmalloc_done);
		if (uasm_in_compat_space_p(modd))
			uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
492
		else
493
			uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
494 495
	}

496 497 498
	uasm_l_vmalloc(l, *p);
	if (uasm_in_compat_space_p(MODULE_START) &&
	    !uasm_rel_lo(MODULE_START) &&
499
	    MODULE_START << 32 == VMALLOC_START)
500
		uasm_i_dsll32(p, ptr, ptr, 0);	/* typical case */
501
	else
502
		UASM_i_LA(p, ptr, VMALLOC_START);
503
#else
504 505
	uasm_l_vmalloc(l, *p);
	UASM_i_LA(p, ptr, VMALLOC_START);
506
#endif
507
	uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
L
Linus Torvalds 已提交
508

509 510 511
	if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
		uasm_il_b(p, r, label_vmalloc_done);
		uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
L
Linus Torvalds 已提交
512
	} else {
513 514 515 516
		UASM_i_LA_mostly(p, ptr, swpd);
		uasm_il_b(p, r, label_vmalloc_done);
		if (uasm_in_compat_space_p(swpd))
			uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
517
		else
518
			uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
L
Linus Torvalds 已提交
519 520 521
	}
}

522
#else /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
523 524 525 526 527

/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pgd entry.
 */
528
static void __cpuinit __maybe_unused
L
Linus Torvalds 已提交
529 530 531 532 533 534
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
535 536 537 538
#ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
539 540 541
	uasm_i_mfc0(p, ptr, C0_TCBIND);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 19);
542 543 544 545
#else
	/*
	 * smp_processor_id() << 3 is stored in CONTEXT.
         */
546 547 548
	uasm_i_mfc0(p, ptr, C0_CONTEXT);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 23);
549
#endif
550
	uasm_i_addu(p, ptr, tmp, ptr);
L
Linus Torvalds 已提交
551
#else
552
	UASM_i_LA_mostly(p, ptr, pgdc);
L
Linus Torvalds 已提交
553
#endif
554 555 556 557 558
	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
L
Linus Torvalds 已提交
559 560
}

561
#endif /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
562

563
static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
L
Linus Torvalds 已提交
564
{
R
Ralf Baechle 已提交
565
	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
L
Linus Torvalds 已提交
566 567
	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);

568
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	case CPU_VR41XX:
	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4131:
	case CPU_VR4181:
	case CPU_VR4181A:
	case CPU_VR4133:
		shift += 2;
		break;

	default:
		break;
	}

	if (shift)
585 586
		UASM_i_SRL(p, ctx, ctx, shift);
	uasm_i_andi(p, ctx, ctx, mask);
L
Linus Torvalds 已提交
587 588
}

589
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
L
Linus Torvalds 已提交
590 591 592 593 594 595 596 597
{
	/*
	 * Bug workaround for the Nevada. It seems as if under certain
	 * circumstances the move from cp0_context might produce a
	 * bogus result when the mfc0 instruction and its consumer are
	 * in a different cacheline or a load instruction, probably any
	 * memory reference, is between them.
	 */
598
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
599
	case CPU_NEVADA:
600
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
601 602 603 604 605
		GET_CONTEXT(p, tmp); /* get context reg */
		break;

	default:
		GET_CONTEXT(p, tmp); /* get context reg */
606
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
607 608 609 610
		break;
	}

	build_adjust_context(p, tmp);
611
	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
L
Linus Torvalds 已提交
612 613
}

614
static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
L
Linus Torvalds 已提交
615 616 617 618 619 620 621 622
					unsigned int ptep)
{
	/*
	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
	 * Kernel is a special case. Only a few CPUs use it.
	 */
#ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits) {
623 624 625 626 627 628
		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
		uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
		uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
629 630 631 632 633
	} else {
		int pte_off_even = sizeof(pte_t) / 2;
		int pte_off_odd = pte_off_even + sizeof(pte_t);

		/* The pte entries are pre-shifted */
634 635 636 637
		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
638 639
	}
#else
640 641
	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
L
Linus Torvalds 已提交
642 643
	if (r45k_bvahwbug())
		build_tlb_probe_entry(p);
644
	UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
L
Linus Torvalds 已提交
645
	if (r4k_250MHZhwbug())
646 647 648
		uasm_i_mtc0(p, 0, C0_ENTRYLO0);
	uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
	UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
L
Linus Torvalds 已提交
649
	if (r45k_bvahwbug())
650
		uasm_i_mfc0(p, tmp, C0_INDEX);
L
Linus Torvalds 已提交
651
	if (r4k_250MHZhwbug())
652 653
		uasm_i_mtc0(p, 0, C0_ENTRYLO1);
	uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
654 655 656
#endif
}

657
static void __cpuinit build_r4000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
658 659
{
	u32 *p = tlb_handler;
660 661
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
662 663 664 665 666 667 668 669 670 671 672 673
	u32 *f;
	unsigned int final_len;

	memset(tlb_handler, 0, sizeof(tlb_handler));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));
	memset(final_handler, 0, sizeof(final_handler));

	/*
	 * create the plain linear handler
	 */
	if (bcm1250_m3_war()) {
674 675 676 677 678 679
		UASM_i_MFC0(&p, K0, C0_BADVADDR);
		UASM_i_MFC0(&p, K1, C0_ENTRYHI);
		uasm_i_xor(&p, K0, K0, K1);
		UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
		uasm_il_bnez(&p, &r, K0, label_leave);
		/* No need for uasm_i_nop */
L
Linus Torvalds 已提交
680 681
	}

682
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
683 684 685 686 687 688 689 690
	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
#else
	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif

	build_get_ptep(&p, K0, K1);
	build_update_entries(&p, K0, K1);
	build_tlb_write_entry(&p, &l, &r, tlb_random);
691 692
	uasm_l_leave(&l, p);
	uasm_i_eret(&p); /* return from trap */
L
Linus Torvalds 已提交
693

694
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
695 696 697 698 699 700 701
	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
#endif

	/*
	 * Overflow check: For the 64bit handler, we need at least one
	 * free instruction slot for the wrap-around branch. In worst
	 * case, if the intended insertion point is a delay slot, we
M
Matt LaPlante 已提交
702
	 * need three, with the second nop'ed and the third being
L
Linus Torvalds 已提交
703 704
	 * unused.
	 */
705 706
	/* Loongson2 ebase is different than r4k, we have more space */
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
707 708 709 710 711
	if ((p - tlb_handler) > 64)
		panic("TLB refill handler space exceeded");
#else
	if (((p - tlb_handler) > 63)
	    || (((p - tlb_handler) > 61)
712
		&& uasm_insn_has_bdelay(relocs, tlb_handler + 29)))
L
Linus Torvalds 已提交
713 714 715 716 717 718
		panic("TLB refill handler space exceeded");
#endif

	/*
	 * Now fold the handler in the TLB refill handler space.
	 */
719
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
720 721
	f = final_handler;
	/* Simplest case, just copy the handler. */
722
	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
723
	final_len = p - tlb_handler;
724
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
725 726 727
	f = final_handler + 32;
	if ((p - tlb_handler) <= 32) {
		/* Just copy the handler. */
728
		uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
729 730 731 732 733 734 735
		final_len = p - tlb_handler;
	} else {
		u32 *split = tlb_handler + 30;

		/*
		 * Find the split point.
		 */
736
		if (uasm_insn_has_bdelay(relocs, split - 1))
L
Linus Torvalds 已提交
737 738 739
			split--;

		/* Copy first part of the handler. */
740
		uasm_copy_handler(relocs, labels, tlb_handler, split, f);
L
Linus Torvalds 已提交
741 742 743
		f += split - tlb_handler;

		/* Insert branch. */
744 745 746 747
		uasm_l_split(&l, final_handler);
		uasm_il_b(&f, &r, label_split);
		if (uasm_insn_has_bdelay(relocs, split))
			uasm_i_nop(&f);
L
Linus Torvalds 已提交
748
		else {
749 750
			uasm_copy_handler(relocs, labels, split, split + 1, f);
			uasm_move_labels(labels, f, f + 1, -1);
L
Linus Torvalds 已提交
751 752 753 754 755
			f++;
			split++;
		}

		/* Copy the rest of the handler. */
756
		uasm_copy_handler(relocs, labels, split, p, final_handler);
L
Linus Torvalds 已提交
757 758
		final_len = (f - (final_handler + 32)) + (p - split);
	}
759
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
760

761 762 763
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 final_len);
L
Linus Torvalds 已提交
764

765
	memcpy((void *)ebase, final_handler, 0x100);
766 767

	dump_handler((u32 *)ebase, 64);
L
Linus Torvalds 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
}

/*
 * TLB load/store/modify handlers.
 *
 * Only the fastpath gets synthesized at runtime, the slowpath for
 * do_page_fault remains normal asm.
 */
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);

/*
 * 128 instructions for the fastpath handler is generous and should
 * never be exceeded.
 */
#define FASTPATH_SIZE 128

785 786 787
u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
L
Linus Torvalds 已提交
788

789
static void __cpuinit
790
iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
L
Linus Torvalds 已提交
791 792 793 794
{
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
795
		uasm_i_lld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
796 797
	else
# endif
798
		UASM_i_LL(p, pte, 0, ptr);
L
Linus Torvalds 已提交
799 800 801
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
802
		uasm_i_ld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
803 804
	else
# endif
805
		UASM_i_LW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
806 807 808
#endif
}

809
static void __cpuinit
810
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
811
	unsigned int mode)
L
Linus Torvalds 已提交
812
{
813 814 815 816
#ifdef CONFIG_64BIT_PHYS_ADDR
	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif

817
	uasm_i_ori(p, pte, pte, mode);
L
Linus Torvalds 已提交
818 819 820
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
821
		uasm_i_scd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
822 823
	else
# endif
824
		UASM_i_SC(p, pte, 0, ptr);
L
Linus Torvalds 已提交
825 826

	if (r10000_llsc_war())
827
		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
828
	else
829
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
830 831 832

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
833 834 835 836 837 838 839
		/* no uasm_i_nop needed */
		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
		/* no uasm_i_nop needed */
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
840
	} else
841
		uasm_i_nop(p);
L
Linus Torvalds 已提交
842
# else
843
	uasm_i_nop(p);
L
Linus Torvalds 已提交
844 845 846 847
# endif
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
848
		uasm_i_sd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
849 850
	else
# endif
851
		UASM_i_SW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
852 853 854

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
855 856 857 858
		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
859 860 861 862 863 864 865 866 867 868
	}
# endif
#endif
}

/*
 * Check if PTE is present, if not then jump to LABEL. PTR points to
 * the page table where this PTE is located, PTE will be re-loaded
 * with it's original value.
 */
869
static void __cpuinit
870
build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
871 872
		  unsigned int pte, unsigned int ptr, enum label_id lid)
{
873 874 875
	uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
	uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
	uasm_il_bnez(p, r, pte, lid);
876
	iPTE_LW(p, l, pte, ptr);
L
Linus Torvalds 已提交
877 878 879
}

/* Make PTE valid, store result in PTR. */
880
static void __cpuinit
881
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
882 883
		 unsigned int ptr)
{
884 885 886
	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
887 888 889 890 891 892
}

/*
 * Check if PTE can be written to, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
893
static void __cpuinit
894
build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
895 896
		   unsigned int pte, unsigned int ptr, enum label_id lid)
{
897 898 899
	uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
	uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
	uasm_il_bnez(p, r, pte, lid);
900
	iPTE_LW(p, l, pte, ptr);
L
Linus Torvalds 已提交
901 902 903 904 905
}

/* Make PTE writable, update software status bits as well, then store
 * at PTR.
 */
906
static void __cpuinit
907
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
908 909
		 unsigned int ptr)
{
910 911 912 913
	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
			     | _PAGE_DIRTY);

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
914 915 916 917 918 919
}

/*
 * Check if PTE can be modified, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
920
static void __cpuinit
921
build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
922 923
		     unsigned int pte, unsigned int ptr, enum label_id lid)
{
924 925
	uasm_i_andi(p, pte, pte, _PAGE_WRITE);
	uasm_il_beqz(p, r, pte, lid);
926
	iPTE_LW(p, l, pte, ptr);
L
Linus Torvalds 已提交
927 928 929 930 931 932
}

/*
 * R3000 style TLB load/store/modify handlers.
 */

933 934 935 936
/*
 * This places the pte into ENTRYLO0 and writes it with tlbwi.
 * Then it returns.
 */
937
static void __cpuinit
938
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
L
Linus Torvalds 已提交
939
{
940 941 942 943 944
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
	uasm_i_tlbwi(p);
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
945 946 947
}

/*
948 949 950 951
 * This places the pte into ENTRYLO0 and writes it with tlbwi
 * or tlbwr as appropriate.  This is because the index register
 * may have the probe fail bit set as a result of a trap on a
 * kseg2 access, i.e. without refill.  Then it returns.
L
Linus Torvalds 已提交
952
 */
953
static void __cpuinit
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
			     struct uasm_reloc **r, unsigned int pte,
			     unsigned int tmp)
{
	uasm_i_mfc0(p, tmp, C0_INDEX);
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
	uasm_i_tlbwi(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
	uasm_l_r3000_write_probe_fail(l, *p);
	uasm_i_tlbwr(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
969 970
}

971
static void __cpuinit
L
Linus Torvalds 已提交
972 973 974 975 976
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
				   unsigned int ptr)
{
	long pgdc = (long)pgd_current;

977 978 979 980 981 982 983 984 985 986 987 988
	uasm_i_mfc0(p, pte, C0_BADVADDR);
	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, pte, pte, 22); /* load delay */
	uasm_i_sll(p, pte, pte, 2);
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_mfc0(p, pte, C0_CONTEXT);
	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_lw(p, pte, 0, ptr);
	uasm_i_tlbp(p); /* load delay */
L
Linus Torvalds 已提交
989 990
}

991
static void __cpuinit build_r3000_tlb_load_handler(void)
L
Linus Torvalds 已提交
992 993
{
	u32 *p = handle_tlbl;
994 995
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
996 997 998 999 1000 1001 1002

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
	build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1003
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1004
	build_make_valid(&p, &r, K0, K1);
1005
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1006

1007 1008 1009
	uasm_l_nopage_tlbl(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1010 1011 1012 1013

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1014 1015 1016
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1017

1018
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1019 1020
}

1021
static void __cpuinit build_r3000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1022 1023
{
	u32 *p = handle_tlbs;
1024 1025
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1026 1027 1028 1029 1030 1031 1032

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
	build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1033
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1034
	build_make_write(&p, &r, K0, K1);
1035
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1036

1037 1038 1039
	uasm_l_nopage_tlbs(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1040 1041 1042 1043

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

1044 1045 1046
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
1047

1048
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
1049 1050
}

1051
static void __cpuinit build_r3000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
1052 1053
{
	u32 *p = handle_tlbm;
1054 1055
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1056 1057 1058 1059 1060 1061 1062

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
	build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1063
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1064
	build_make_write(&p, &r, K0, K1);
1065
	build_r3000_pte_reload_tlbwi(&p, K0, K1);
L
Linus Torvalds 已提交
1066

1067 1068 1069
	uasm_l_nopage_tlbm(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1070 1071 1072 1073

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

1074 1075 1076
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
L
Linus Torvalds 已提交
1077

1078
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
1079 1080 1081 1082 1083
}

/*
 * R4000 style TLB load/store/modify handlers.
 */
1084
static void __cpuinit
1085 1086
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
				   struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1087 1088
				   unsigned int ptr)
{
1089
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
1090 1091 1092 1093 1094
	build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
#else
	build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
#endif

1095 1096 1097 1098 1099
	UASM_i_MFC0(p, pte, C0_BADVADDR);
	UASM_i_LW(p, ptr, 0, ptr);
	UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
	uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
	UASM_i_ADDU(p, ptr, ptr, pte);
L
Linus Torvalds 已提交
1100 1101

#ifdef CONFIG_SMP
1102 1103
	uasm_l_smp_pgtable_change(l, *p);
#endif
1104
	iPTE_LW(p, l, pte, ptr); /* get even pte */
1105 1106
	if (!m4kc_tlbp_war())
		build_tlb_probe_entry(p);
L
Linus Torvalds 已提交
1107 1108
}

1109
static void __cpuinit
1110 1111
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
				   struct uasm_reloc **r, unsigned int tmp,
L
Linus Torvalds 已提交
1112 1113
				   unsigned int ptr)
{
1114 1115
	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
L
Linus Torvalds 已提交
1116 1117
	build_update_entries(p, tmp, ptr);
	build_tlb_write_entry(p, l, r, tlb_indexed);
1118 1119
	uasm_l_leave(l, *p);
	uasm_i_eret(p); /* return from trap */
L
Linus Torvalds 已提交
1120

1121
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
1122 1123 1124 1125
	build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
#endif
}

1126
static void __cpuinit build_r4000_tlb_load_handler(void)
L
Linus Torvalds 已提交
1127 1128
{
	u32 *p = handle_tlbl;
1129 1130
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1131 1132 1133 1134 1135 1136

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (bcm1250_m3_war()) {
1137 1138 1139 1140 1141 1142
		UASM_i_MFC0(&p, K0, C0_BADVADDR);
		UASM_i_MFC0(&p, K1, C0_ENTRYHI);
		uasm_i_xor(&p, K0, K0, K1);
		UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
		uasm_il_bnez(&p, &r, K0, label_leave);
		/* No need for uasm_i_nop */
L
Linus Torvalds 已提交
1143 1144 1145 1146
	}

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
	build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1147 1148
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
1149 1150 1151
	build_make_valid(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

1152 1153 1154
	uasm_l_nopage_tlbl(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1155 1156 1157 1158

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1159 1160 1161
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1162

1163
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1164 1165
}

1166
static void __cpuinit build_r4000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1167 1168
{
	u32 *p = handle_tlbs;
1169 1170
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1171 1172 1173 1174 1175 1176 1177

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
	build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1178 1179
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
1180 1181 1182
	build_make_write(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

1183 1184 1185
	uasm_l_nopage_tlbs(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1186 1187 1188 1189

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

1190 1191 1192
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
1193

1194
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
1195 1196
}

1197
static void __cpuinit build_r4000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
1198 1199
{
	u32 *p = handle_tlbm;
1200 1201
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1202 1203 1204 1205 1206 1207 1208

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
	build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1209 1210
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
1211 1212 1213 1214
	/* Present and writable bits set, set accessed and dirty bits. */
	build_make_write(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

1215 1216 1217
	uasm_l_nopage_tlbm(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1218 1219 1220 1221

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

1222 1223 1224
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
1225

1226
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
1227 1228
}

1229
void __cpuinit build_tlb_refill_handler(void)
L
Linus Torvalds 已提交
1230 1231 1232 1233 1234 1235 1236 1237
{
	/*
	 * The refill handler is generated per-CPU, multi-node systems
	 * may have local storage for it. The other handlers are only
	 * needed once.
	 */
	static int run_once = 0;

1238
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
	case CPU_R2000:
	case CPU_R3000:
	case CPU_R3000A:
	case CPU_R3081E:
	case CPU_TX3912:
	case CPU_TX3922:
	case CPU_TX3927:
		build_r3000_tlb_refill_handler();
		if (!run_once) {
			build_r3000_tlb_load_handler();
			build_r3000_tlb_store_handler();
			build_r3000_tlb_modify_handler();
			run_once++;
		}
		break;

	case CPU_R6000:
	case CPU_R6000A:
		panic("No R6000 TLB refill handler yet");
		break;

	case CPU_R8000:
		panic("No R8000 TLB refill handler yet");
		break;

	default:
		build_r4000_tlb_refill_handler();
		if (!run_once) {
			build_r4000_tlb_load_handler();
			build_r4000_tlb_store_handler();
			build_r4000_tlb_modify_handler();
			run_once++;
		}
	}
}
1274

1275
void __cpuinit flush_tlb_handlers(void)
1276
{
1277
	local_flush_icache_range((unsigned long)handle_tlbl,
1278
			   (unsigned long)handle_tlbl + sizeof(handle_tlbl));
1279
	local_flush_icache_range((unsigned long)handle_tlbs,
1280
			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1281
	local_flush_icache_range((unsigned long)handle_tlbm,
1282 1283
			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
}