tlbex.c 54.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Synthesize TLB refill handlers at runtime.
 *
8
 * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
9
 * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
10
 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
D
David Daney 已提交
11
 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12 13 14 15 16 17 18 19 20
 *
 * ... and the days got worse and worse and now you see
 * I've gone completly out of my mind.
 *
 * They're coming to take me a away haha
 * they're coming to take me a away hoho hihi haha
 * to the funny farm where code is beautiful all the time ...
 *
 * (Condolences to Napoleon XIV)
L
Linus Torvalds 已提交
21 22
 */

23
#include <linux/bug.h>
L
Linus Torvalds 已提交
24 25
#include <linux/kernel.h>
#include <linux/types.h>
26
#include <linux/smp.h>
L
Linus Torvalds 已提交
27 28
#include <linux/string.h>
#include <linux/init.h>
29
#include <linux/cache.h>
L
Linus Torvalds 已提交
30

31 32
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
33
#include <asm/war.h>
34
#include <asm/uasm.h>
35

36 37 38 39 40 41 42 43 44 45
/*
 * TLB load/store/modify handlers.
 *
 * Only the fastpath gets synthesized at runtime, the slowpath for
 * do_page_fault remains normal asm.
 */
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);


46
static inline int r45k_bvahwbug(void)
L
Linus Torvalds 已提交
47 48 49 50 51
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

52
static inline int r4k_250MHZhwbug(void)
L
Linus Torvalds 已提交
53 54 55 56 57
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

58
static inline int __maybe_unused bcm1250_m3_war(void)
L
Linus Torvalds 已提交
59 60 61 62
{
	return BCM1250_M3_WAR;
}

63
static inline int __maybe_unused r10000_llsc_war(void)
L
Linus Torvalds 已提交
64 65 66 67
{
	return R10000_LLSC_WAR;
}

68 69 70 71 72 73 74 75 76 77 78 79
static int use_bbit_insns(void)
{
	switch (current_cpu_type()) {
	case CPU_CAVIUM_OCTEON:
	case CPU_CAVIUM_OCTEON_PLUS:
	case CPU_CAVIUM_OCTEON2:
		return 1;
	default:
		return 0;
	}
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
static int use_lwx_insns(void)
{
	switch (current_cpu_type()) {
	case CPU_CAVIUM_OCTEON2:
		return 1;
	default:
		return 0;
	}
}
#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
static bool scratchpad_available(void)
{
	return true;
}
static int scratchpad_offset(int i)
{
	/*
	 * CVMSEG starts at address -32768 and extends for
	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
	 */
	i += 1; /* Kernel use starts at the top and works down. */
	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
}
#else
static bool scratchpad_available(void)
{
	return false;
}
static int scratchpad_offset(int i)
{
	BUG();
112 113
	/* Really unreachable, but evidently some GCC want this. */
	return 0;
114 115
}
#endif
116 117 118 119 120 121 122 123 124
/*
 * Found by experiment: At least some revisions of the 4kc throw under
 * some circumstances a machine check exception, triggered by invalid
 * values in the index register.  Delaying the tlbp instruction until
 * after the next branch,  plus adding an additional nop in front of
 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 * why; it's not an issue caused by the core RTL.
 *
 */
125
static int __cpuinit m4kc_tlbp_war(void)
126 127 128 129 130
{
	return (current_cpu_data.processor_id & 0xffff00) ==
	       (PRID_COMP_MIPS | PRID_IMP_4KC);
}

131
/* Handle labels (which must be positive integers). */
L
Linus Torvalds 已提交
132
enum label_id {
133
	label_second_part = 1,
L
Linus Torvalds 已提交
134 135 136 137 138
	label_leave,
	label_vmalloc,
	label_vmalloc_done,
	label_tlbw_hazard,
	label_split,
139 140
	label_tlbl_goaround1,
	label_tlbl_goaround2,
L
Linus Torvalds 已提交
141 142 143 144 145
	label_nopage_tlbl,
	label_nopage_tlbs,
	label_nopage_tlbm,
	label_smp_pgtable_change,
	label_r3000_write_probe_fail,
146
	label_large_segbits_fault,
D
David Daney 已提交
147 148 149
#ifdef CONFIG_HUGETLB_PAGE
	label_tlb_huge_update,
#endif
L
Linus Torvalds 已提交
150 151
};

152 153 154 155 156 157
UASM_L_LA(_second_part)
UASM_L_LA(_leave)
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard)
UASM_L_LA(_split)
158 159
UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2)
160 161 162 163 164
UASM_L_LA(_nopage_tlbl)
UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
165
UASM_L_LA(_large_segbits_fault)
D
David Daney 已提交
166 167 168
#ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA(_tlb_huge_update)
#endif
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
/*
 * For debug purposes.
 */
static inline void dump_handler(const u32 *handler, int count)
{
	int i;

	pr_debug("\t.set push\n");
	pr_debug("\t.set noreorder\n");

	for (i = 0; i < count; i++)
		pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);

	pr_debug("\t.set pop\n");
}

L
Linus Torvalds 已提交
186 187 188 189 190
/* The only general purpose registers allowed in TLB handlers. */
#define K0		26
#define K1		27

/* Some CP0 registers */
191 192 193 194 195
#define C0_INDEX	0, 0
#define C0_ENTRYLO0	2, 0
#define C0_TCBIND	2, 2
#define C0_ENTRYLO1	3, 0
#define C0_CONTEXT	4, 0
D
David Daney 已提交
196
#define C0_PAGEMASK	5, 0
197 198 199 200
#define C0_BADVADDR	8, 0
#define C0_ENTRYHI	10, 0
#define C0_EPC		14, 0
#define C0_XCONTEXT	20, 0
L
Linus Torvalds 已提交
201

202
#ifdef CONFIG_64BIT
203
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
L
Linus Torvalds 已提交
204
#else
205
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
L
Linus Torvalds 已提交
206 207 208 209 210 211 212 213 214 215
#endif

/* The worst case length of the handler is around 18 instructions for
 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 * Maximum space available is 32 instructions for R3000 and 64
 * instructions for R4000.
 *
 * We deliberately chose a buffer size of 128, so we won't scribble
 * over anything important on overflow before we panic.
 */
216
static u32 tlb_handler[128] __cpuinitdata;
L
Linus Torvalds 已提交
217 218

/* simply assume worst case size for labels and relocs */
219 220
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
L
Linus Torvalds 已提交
221

222 223 224 225
#ifdef CONFIG_64BIT
static int check_for_high_segbits __cpuinitdata;
#endif

226
static int check_for_high_segbits __cpuinitdata;
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

static unsigned int kscratch_used_mask __cpuinitdata;

static int __cpuinit allocate_kscratch(void)
{
	int r;
	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;

	r = ffs(a);

	if (r == 0)
		return -1;

	r--; /* make it zero based */

	kscratch_used_mask |= (1 << r);

	return r;
}

247
static int scratch_reg __cpuinitdata;
248
static int pgd_reg __cpuinitdata;
249 250 251
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};

#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
252

253 254 255
/*
 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 * we cannot do r3000 under these circumstances.
256 257 258
 *
 * Declare pgd_current here instead of including mmu_context.h to avoid type
 * conflicts for tlbmiss_handler_setup_pgd
259
 */
260
extern unsigned long pgd_current[];
261

L
Linus Torvalds 已提交
262 263 264
/*
 * The R3000 TLB handler is simple.
 */
265
static void __cpuinit build_r3000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
266 267 268 269 270 271 272
{
	long pgdc = (long)pgd_current;
	u32 *p;

	memset(tlb_handler, 0, sizeof(tlb_handler));
	p = tlb_handler;

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	uasm_i_mfc0(&p, K0, C0_BADVADDR);
	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
	uasm_i_srl(&p, K0, K0, 22); /* load delay */
	uasm_i_sll(&p, K0, K0, 2);
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_mfc0(&p, K0, C0_CONTEXT);
	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_lw(&p, K0, 0, K1);
	uasm_i_nop(&p); /* load delay */
	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
	uasm_i_tlbwr(&p); /* cp0 delay */
	uasm_i_jr(&p, K1);
	uasm_i_rfe(&p); /* branch delay */
L
Linus Torvalds 已提交
290 291 292 293

	if (p > tlb_handler + 32)
		panic("TLB refill handler space exceeded");

294 295
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 (unsigned int)(p - tlb_handler));
L
Linus Torvalds 已提交
296

297
	memcpy((void *)ebase, tlb_handler, 0x80);
298 299

	dump_handler((u32 *)ebase, 32);
L
Linus Torvalds 已提交
300
}
301
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309

/*
 * The R4000 TLB handler is much more complicated. We have two
 * consecutive handler areas with 32 instructions space each.
 * Since they aren't used at the same time, we can overflow in the
 * other one.To keep things simple, we first assume linear space,
 * then we relocate it to the final handler layout as needed.
 */
310
static u32 final_handler[64] __cpuinitdata;
L
Linus Torvalds 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

/*
 * Hazards
 *
 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 * 2. A timing hazard exists for the TLBP instruction.
 *
 *      stalling_instruction
 *      TLBP
 *
 * The JTLB is being read for the TLBP throughout the stall generated by the
 * previous instruction. This is not really correct as the stalling instruction
 * can modify the address used to access the JTLB.  The failure symptom is that
 * the TLBP instruction will use an address created for the stalling instruction
 * and not the address held in C0_ENHI and thus report the wrong results.
 *
 * The software work-around is to not allow the instruction preceding the TLBP
 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 *
 * Errata 2 will not be fixed.  This errata is also on the R5000.
 *
 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 */
334
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
L
Linus Torvalds 已提交
335
{
336
	switch (current_cpu_type()) {
337
	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
338
	case CPU_R4600:
339
	case CPU_R4700:
L
Linus Torvalds 已提交
340 341 342
	case CPU_R5000:
	case CPU_R5000A:
	case CPU_NEVADA:
343 344
		uasm_i_nop(p);
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
345 346 347
		break;

	default:
348
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
349 350 351 352 353 354
		break;
	}
}

/*
 * Write random or indexed TLB entry, and care about the hazards from
L
Lucas De Marchi 已提交
355
 * the preceding mtc0 and for the following eret.
L
Linus Torvalds 已提交
356 357 358
 */
enum tlb_write_entry { tlb_random, tlb_indexed };

359
static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
360
					 struct uasm_reloc **r,
L
Linus Torvalds 已提交
361 362 363 364 365
					 enum tlb_write_entry wmode)
{
	void(*tlbw)(u32 **) = NULL;

	switch (wmode) {
366 367
	case tlb_random: tlbw = uasm_i_tlbwr; break;
	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
L
Linus Torvalds 已提交
368 369
	}

370
	if (cpu_has_mips_r2) {
371 372
		if (cpu_has_mips_r2_exec_hazard)
			uasm_i_ehb(p);
373 374 375 376
		tlbw(p);
		return;
	}

377
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
378 379 380 381 382 383 384 385 386 387
	case CPU_R4000PC:
	case CPU_R4000SC:
	case CPU_R4000MC:
	case CPU_R4400PC:
	case CPU_R4400SC:
	case CPU_R4400MC:
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * two nops after the tlbw instruction.
		 */
388
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
389
		tlbw(p);
390 391
		uasm_l_tlbw_hazard(l, *p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
392 393 394 395 396 397
		break;

	case CPU_R4600:
	case CPU_R4700:
	case CPU_R5000:
	case CPU_R5000A:
398
		uasm_i_nop(p);
399
		tlbw(p);
400
		uasm_i_nop(p);
401 402 403
		break;

	case CPU_R4300:
L
Linus Torvalds 已提交
404 405
	case CPU_5KC:
	case CPU_TX49XX:
406
	case CPU_PR4450:
407
		uasm_i_nop(p);
L
Linus Torvalds 已提交
408 409 410 411 412
		tlbw(p);
		break;

	case CPU_R10000:
	case CPU_R12000:
K
Kumba 已提交
413
	case CPU_R14000:
L
Linus Torvalds 已提交
414
	case CPU_4KC:
415
	case CPU_4KEC:
L
Linus Torvalds 已提交
416
	case CPU_SB1:
A
Andrew Isaacson 已提交
417
	case CPU_SB1A:
L
Linus Torvalds 已提交
418 419 420
	case CPU_4KSC:
	case CPU_20KC:
	case CPU_25KF:
421 422 423 424 425
	case CPU_BMIPS32:
	case CPU_BMIPS3300:
	case CPU_BMIPS4350:
	case CPU_BMIPS4380:
	case CPU_BMIPS5000:
426
	case CPU_LOONGSON2:
427
	case CPU_R5500:
428
		if (m4kc_tlbp_war())
429
			uasm_i_nop(p);
430
	case CPU_ALCHEMY:
L
Linus Torvalds 已提交
431 432 433 434
		tlbw(p);
		break;

	case CPU_NEVADA:
435
		uasm_i_nop(p); /* QED specifies 2 nops hazard */
L
Linus Torvalds 已提交
436 437 438 439
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * a nop after the tlbw instruction.
		 */
440
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
441
		tlbw(p);
442
		uasm_l_tlbw_hazard(l, *p);
L
Linus Torvalds 已提交
443 444 445
		break;

	case CPU_RM7000:
446 447 448 449
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
450 451 452 453 454 455 456 457 458 459
		tlbw(p);
		break;

	case CPU_RM9000:
		/*
		 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
		 * use of the JTLB for instructions should not occur for 4
		 * cpu cycles and use for data translations should not occur
		 * for 3 cpu cycles.
		 */
460 461 462 463
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
464
		tlbw(p);
465 466 467 468
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
469 470 471 472 473 474 475
		break;

	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4181:
	case CPU_VR4181A:
476 477
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
478
		tlbw(p);
479 480
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
481 482 483 484
		break;

	case CPU_VR4131:
	case CPU_VR4133:
485
	case CPU_R5432:
486 487
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
488 489 490
		tlbw(p);
		break;

491 492 493 494 495
	case CPU_JZRISC:
		tlbw(p);
		uasm_i_nop(p);
		break;

L
Linus Torvalds 已提交
496 497 498 499 500 501 502
	default:
		panic("No TLB refill handler yet (CPU type: %d)",
		      current_cpu_data.cputype);
		break;
	}
}

503 504
static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
								  unsigned int reg)
D
David Daney 已提交
505
{
506 507 508 509 510
	if (kernel_uses_smartmips_rixi) {
		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
		UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
#ifdef CONFIG_64BIT_PHYS_ADDR
511
		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
512 513 514 515 516
#else
		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
#endif
	}
}
D
David Daney 已提交
517

518
#ifdef CONFIG_HUGETLB_PAGE
D
David Daney 已提交
519

520 521 522
static __cpuinit void build_restore_pagemask(u32 **p,
					     struct uasm_reloc **r,
					     unsigned int tmp,
523 524
					     enum label_id lid,
					     int restore_scratch)
525
{
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	if (restore_scratch) {
		/* Reset default page size */
		if (PM_DEFAULT_MASK >> 16) {
			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		} else if (PM_DEFAULT_MASK) {
			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		} else {
			uasm_i_mtc0(p, 0, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		}
		if (scratch_reg > 0)
			UASM_i_MFC0(p, 1, 31, scratch_reg);
		else
			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
D
David Daney 已提交
545
	} else {
546 547 548 549 550 551 552 553 554 555 556 557 558 559
		/* Reset default page size */
		if (PM_DEFAULT_MASK >> 16) {
			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
		} else if (PM_DEFAULT_MASK) {
			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
		} else {
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, 0, C0_PAGEMASK);
		}
D
David Daney 已提交
560 561 562
	}
}

563 564 565 566
static __cpuinit void build_huge_tlb_write_entry(u32 **p,
						 struct uasm_label **l,
						 struct uasm_reloc **r,
						 unsigned int tmp,
567 568
						 enum tlb_write_entry wmode,
						 int restore_scratch)
569 570 571 572 573 574 575 576
{
	/* Set huge page tlb entry size */
	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
	uasm_i_mtc0(p, tmp, C0_PAGEMASK);

	build_tlb_write_entry(p, l, r, wmode);

577
	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
578 579
}

D
David Daney 已提交
580 581 582 583 584 585 586 587
/*
 * Check if Huge PTE is present, if so then jump to LABEL.
 */
static void __cpuinit
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
		unsigned int pmd, int lid)
{
	UASM_i_LW(p, tmp, 0, pmd);
588 589 590 591 592 593
	if (use_bbit_insns()) {
		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
	} else {
		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
		uasm_il_bnez(p, r, tmp, lid);
	}
D
David Daney 已提交
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
}

static __cpuinit void build_huge_update_entries(u32 **p,
						unsigned int pte,
						unsigned int tmp)
{
	int small_sequence;

	/*
	 * A huge PTE describes an area the size of the
	 * configured huge page size. This is twice the
	 * of the large TLB entry size we intend to use.
	 * A TLB entry half the size of the configured
	 * huge page size is configured into entrylo0
	 * and entrylo1 to cover the contiguous huge PTE
	 * address space.
	 */
	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;

	/* We can clobber tmp.  It isn't used after this.*/
	if (!small_sequence)
		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));

617
	build_convert_pte_to_entrylo(p, pte);
618
	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
D
David Daney 已提交
619 620 621 622 623 624
	/* convert to entrylo1 */
	if (small_sequence)
		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
	else
		UASM_i_ADDU(p, pte, pte, tmp);

625
	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
D
David Daney 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
}

static __cpuinit void build_huge_handler_tail(u32 **p,
					      struct uasm_reloc **r,
					      struct uasm_label **l,
					      unsigned int pte,
					      unsigned int ptr)
{
#ifdef CONFIG_SMP
	UASM_i_SC(p, pte, 0, ptr);
	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
#else
	UASM_i_SW(p, pte, 0, ptr);
#endif
	build_huge_update_entries(p, pte, ptr);
642
	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
D
David Daney 已提交
643 644 645
}
#endif /* CONFIG_HUGETLB_PAGE */

646
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
647 648 649 650
/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pmd entry.
 */
651
static void __cpuinit
652
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
653 654
		 unsigned int tmp, unsigned int ptr)
{
655
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
L
Linus Torvalds 已提交
656
	long pgdc = (long)pgd_current;
657
#endif
L
Linus Torvalds 已提交
658 659 660
	/*
	 * The vmalloc handling is not in the hotpath.
	 */
661
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679

	if (check_for_high_segbits) {
		/*
		 * The kernel currently implicitely assumes that the
		 * MIPS SEGBITS parameter for the processor is
		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
		 * allocate virtual addresses outside the maximum
		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
		 * that doesn't prevent user code from accessing the
		 * higher xuseg addresses.  Here, we make sure that
		 * everything but the lower xuseg addresses goes down
		 * the module_alloc/vmalloc path.
		 */
		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
		uasm_il_bnez(p, r, ptr, label_vmalloc);
	} else {
		uasm_il_bltz(p, r, tmp, label_vmalloc);
	}
680
	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
L
Linus Torvalds 已提交
681

682
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	if (pgd_reg != -1) {
		/* pgd is in pgd_reg */
		UASM_i_MFC0(p, ptr, 31, pgd_reg);
	} else {
		/*
		 * &pgd << 11 stored in CONTEXT [23..63].
		 */
		UASM_i_MFC0(p, ptr, C0_CONTEXT);

		/* Clear lower 23 bits of context. */
		uasm_i_dins(p, ptr, 0, 0, 23);

		/* 1 0  1 0 1  << 6  xkphys cached */
		uasm_i_ori(p, ptr, ptr, 0x540);
		uasm_i_drotr(p, ptr, ptr, 11);
	}
699
#elif defined(CONFIG_SMP)
700 701 702 703
# ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
704
	uasm_i_mfc0(p, ptr, C0_TCBIND);
705
	uasm_i_dsrl_safe(p, ptr, ptr, 19);
706
# else
L
Linus Torvalds 已提交
707
	/*
708
	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
L
Linus Torvalds 已提交
709 710
	 * stored in CONTEXT.
	 */
711
	uasm_i_dmfc0(p, ptr, C0_CONTEXT);
712
	uasm_i_dsrl_safe(p, ptr, ptr, 23);
713
# endif
714 715 716 717
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_daddu(p, ptr, ptr, tmp);
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
718
#else
719 720
	UASM_i_LA_mostly(p, ptr, pgdc);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
721 722
#endif

723
	uasm_l_vmalloc_done(l, *p);
R
Ralf Baechle 已提交
724

725 726
	/* get pgd offset in bytes */
	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
727 728 729

	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
730
#ifndef __PAGETABLE_PMD_FOLDED
731 732
	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
733
	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
734 735
	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
736
#endif
L
Linus Torvalds 已提交
737 738 739 740 741 742
}

/*
 * BVADDR is the faulting address, PTR is scratch.
 * PTR will hold the pgd for vmalloc.
 */
743
static void __cpuinit
744
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
745 746
			unsigned int bvaddr, unsigned int ptr,
			enum vmalloc64_mode mode)
L
Linus Torvalds 已提交
747 748
{
	long swpd = (long)swapper_pg_dir;
749 750 751 752
	int single_insn_swpd;
	int did_vmalloc_branch = 0;

	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
L
Linus Torvalds 已提交
753

754
	uasm_l_vmalloc(l, *p);
L
Linus Torvalds 已提交
755

756
	if (mode != not_refill && check_for_high_segbits) {
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
		if (single_insn_swpd) {
			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
			did_vmalloc_branch = 1;
			/* fall through */
		} else {
			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
		}
	}
	if (!did_vmalloc_branch) {
		if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
			uasm_il_b(p, r, label_vmalloc_done);
			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
		} else {
			UASM_i_LA_mostly(p, ptr, swpd);
			uasm_il_b(p, r, label_vmalloc_done);
			if (uasm_in_compat_space_p(swpd))
				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
			else
				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
		}
	}
779
	if (mode != not_refill && check_for_high_segbits) {
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
		uasm_l_large_segbits_fault(l, *p);
		/*
		 * We get here if we are an xsseg address, or if we are
		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
		 *
		 * Ignoring xsseg (assume disabled so would generate
		 * (address errors?), the only remaining possibility
		 * is the upper xuseg addresses.  On processors with
		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
		 * addresses would have taken an address error. We try
		 * to mimic that here by taking a load/istream page
		 * fault.
		 */
		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
		uasm_i_jr(p, ptr);
795 796 797 798 799 800 801 802 803

		if (mode == refill_scratch) {
			if (scratch_reg > 0)
				UASM_i_MFC0(p, 1, 31, scratch_reg);
			else
				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
		} else {
			uasm_i_nop(p);
		}
L
Linus Torvalds 已提交
804 805 806
	}
}

807
#else /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
808 809 810 811 812

/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pgd entry.
 */
813
static void __cpuinit __maybe_unused
L
Linus Torvalds 已提交
814 815 816 817 818 819
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
820 821 822 823
#ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
824 825 826
	uasm_i_mfc0(p, ptr, C0_TCBIND);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 19);
827 828 829 830
#else
	/*
	 * smp_processor_id() << 3 is stored in CONTEXT.
         */
831 832 833
	uasm_i_mfc0(p, ptr, C0_CONTEXT);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 23);
834
#endif
835
	uasm_i_addu(p, ptr, tmp, ptr);
L
Linus Torvalds 已提交
836
#else
837
	UASM_i_LA_mostly(p, ptr, pgdc);
L
Linus Torvalds 已提交
838
#endif
839 840 841 842 843
	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
L
Linus Torvalds 已提交
844 845
}

846
#endif /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
847

848
static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
L
Linus Torvalds 已提交
849
{
R
Ralf Baechle 已提交
850
	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
L
Linus Torvalds 已提交
851 852
	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);

853
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
	case CPU_VR41XX:
	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4131:
	case CPU_VR4181:
	case CPU_VR4181A:
	case CPU_VR4133:
		shift += 2;
		break;

	default:
		break;
	}

	if (shift)
870 871
		UASM_i_SRL(p, ctx, ctx, shift);
	uasm_i_andi(p, ctx, ctx, mask);
L
Linus Torvalds 已提交
872 873
}

874
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
L
Linus Torvalds 已提交
875 876 877 878 879 880 881 882
{
	/*
	 * Bug workaround for the Nevada. It seems as if under certain
	 * circumstances the move from cp0_context might produce a
	 * bogus result when the mfc0 instruction and its consumer are
	 * in a different cacheline or a load instruction, probably any
	 * memory reference, is between them.
	 */
883
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
884
	case CPU_NEVADA:
885
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
886 887 888 889 890
		GET_CONTEXT(p, tmp); /* get context reg */
		break;

	default:
		GET_CONTEXT(p, tmp); /* get context reg */
891
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
892 893 894 895
		break;
	}

	build_adjust_context(p, tmp);
896
	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
L
Linus Torvalds 已提交
897 898
}

899
static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907
					unsigned int ptep)
{
	/*
	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
	 * Kernel is a special case. Only a few CPUs use it.
	 */
#ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits) {
908 909
		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
910 911 912 913 914 915 916
		if (kernel_uses_smartmips_rixi) {
			UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
			UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
			UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
			UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		} else {
917
			uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
918
			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
919
			uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
920
		}
921
		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
922 923 924 925 926
	} else {
		int pte_off_even = sizeof(pte_t) / 2;
		int pte_off_odd = pte_off_even + sizeof(pte_t);

		/* The pte entries are pre-shifted */
927
		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
928
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
929
		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
930
		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
931 932
	}
#else
933 934
	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
L
Linus Torvalds 已提交
935 936
	if (r45k_bvahwbug())
		build_tlb_probe_entry(p);
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	if (kernel_uses_smartmips_rixi) {
		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		if (r4k_250MHZhwbug())
			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
		UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
		if (r4k_250MHZhwbug())
			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
		if (r45k_bvahwbug())
			uasm_i_mfc0(p, tmp, C0_INDEX);
	}
L
Linus Torvalds 已提交
954
	if (r4k_250MHZhwbug())
955 956
		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
957 958 959
#endif
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
struct mips_huge_tlb_info {
	int huge_pte;
	int restore_scratch;
};

static struct mips_huge_tlb_info __cpuinit
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
			       struct uasm_reloc **r, unsigned int tmp,
			       unsigned int ptr, int c0_scratch)
{
	struct mips_huge_tlb_info rv;
	unsigned int even, odd;
	int vmalloc_branch_delay_filled = 0;
	const int scratch = 1; /* Our extra working register */

	rv.huge_pte = scratch;
	rv.restore_scratch = 0;

	if (check_for_high_segbits) {
		UASM_i_MFC0(p, tmp, C0_BADVADDR);

		if (pgd_reg != -1)
			UASM_i_MFC0(p, ptr, 31, pgd_reg);
		else
			UASM_i_MFC0(p, ptr, C0_CONTEXT);

		if (c0_scratch >= 0)
			UASM_i_MTC0(p, scratch, 31, c0_scratch);
		else
			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);

		uasm_i_dsrl_safe(p, scratch, tmp,
				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
		uasm_il_bnez(p, r, scratch, label_vmalloc);

		if (pgd_reg == -1) {
			vmalloc_branch_delay_filled = 1;
			/* Clear lower 23 bits of context. */
			uasm_i_dins(p, ptr, 0, 0, 23);
		}
	} else {
		if (pgd_reg != -1)
			UASM_i_MFC0(p, ptr, 31, pgd_reg);
		else
			UASM_i_MFC0(p, ptr, C0_CONTEXT);

		UASM_i_MFC0(p, tmp, C0_BADVADDR);

		if (c0_scratch >= 0)
			UASM_i_MTC0(p, scratch, 31, c0_scratch);
		else
			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);

		if (pgd_reg == -1)
			/* Clear lower 23 bits of context. */
			uasm_i_dins(p, ptr, 0, 0, 23);

		uasm_il_bltz(p, r, tmp, label_vmalloc);
	}

	if (pgd_reg == -1) {
		vmalloc_branch_delay_filled = 1;
		/* 1 0  1 0 1  << 6  xkphys cached */
		uasm_i_ori(p, ptr, ptr, 0x540);
		uasm_i_drotr(p, ptr, ptr, 11);
	}

#ifdef __PAGETABLE_PMD_FOLDED
#define LOC_PTEP scratch
#else
#define LOC_PTEP ptr
#endif

	if (!vmalloc_branch_delay_filled)
		/* get pgd offset in bytes */
		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);

	uasm_l_vmalloc_done(l, *p);

	/*
	 *                         tmp          ptr
	 * fall-through case =   badvaddr  *pgd_current
	 * vmalloc case      =   badvaddr  swapper_pg_dir
	 */

	if (vmalloc_branch_delay_filled)
		/* get pgd offset in bytes */
		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);

#ifdef __PAGETABLE_PMD_FOLDED
	GET_CONTEXT(p, tmp); /* get context reg */
#endif
	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);

	if (use_lwx_insns()) {
		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
	} else {
		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
	}

#ifndef __PAGETABLE_PMD_FOLDED
	/* get pmd offset in bytes */
	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
	GET_CONTEXT(p, tmp); /* get context reg */

	if (use_lwx_insns()) {
		UASM_i_LWX(p, scratch, scratch, ptr);
	} else {
		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
		UASM_i_LW(p, scratch, 0, ptr);
	}
#endif
	/* Adjust the context during the load latency. */
	build_adjust_context(p, tmp);

#ifdef CONFIG_HUGETLB_PAGE
	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
	/*
	 * The in the LWX case we don't want to do the load in the
	 * delay slot.  It cannot issue in the same cycle and may be
	 * speculative and unneeded.
	 */
	if (use_lwx_insns())
		uasm_i_nop(p);
#endif /* CONFIG_HUGETLB_PAGE */


	/* build_update_entries */
	if (use_lwx_insns()) {
		even = ptr;
		odd = tmp;
		UASM_i_LWX(p, even, scratch, tmp);
		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
		UASM_i_LWX(p, odd, scratch, tmp);
	} else {
		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
		even = tmp;
		odd = ptr;
		UASM_i_LW(p, even, 0, ptr); /* get even pte */
		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
	}
	if (kernel_uses_smartmips_rixi) {
		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
		uasm_i_drotr(p, even, even,
			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
		uasm_i_drotr(p, odd, odd,
			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
	}
	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */

	if (c0_scratch >= 0) {
		UASM_i_MFC0(p, scratch, 31, c0_scratch);
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		rv.restore_scratch = 1;
	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
	} else {
		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		rv.restore_scratch = 1;
	}

	uasm_i_eret(p); /* return from trap */

	return rv;
}

1139 1140 1141 1142 1143 1144 1145 1146
/*
 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
 * because EXL == 0.  If we wrap, we can also use the 32 instruction
 * slots before the XTLB refill exception handler which belong to the
 * unused TLB refill exception.
 */
#define MIPS64_REFILL_INSNS 32

1147
static void __cpuinit build_r4000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
1148 1149
{
	u32 *p = tlb_handler;
1150 1151
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1152 1153
	u32 *f;
	unsigned int final_len;
1154 1155
	struct mips_huge_tlb_info htlb_info __maybe_unused;
	enum vmalloc64_mode vmalloc_mode __maybe_unused;
L
Linus Torvalds 已提交
1156 1157 1158 1159 1160 1161

	memset(tlb_handler, 0, sizeof(tlb_handler));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));
	memset(final_handler, 0, sizeof(final_handler));

1162 1163
	if (scratch_reg == 0)
		scratch_reg = allocate_kscratch();
1164

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
							  scratch_reg);
		vmalloc_mode = refill_scratch;
	} else {
		htlb_info.huge_pte = K0;
		htlb_info.restore_scratch = 0;
		vmalloc_mode = refill_noscratch;
		/*
		 * create the plain linear handler
		 */
		if (bcm1250_m3_war()) {
			unsigned int segbits = 44;

			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
			uasm_i_xor(&p, K0, K0, K1);
			uasm_i_dsrl_safe(&p, K1, K0, 62);
			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
			uasm_i_or(&p, K0, K0, K1);
			uasm_il_bnez(&p, &r, K0, label_leave);
			/* No need for uasm_i_nop */
		}
L
Linus Torvalds 已提交
1189

1190
#ifdef CONFIG_64BIT
1191
		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
L
Linus Torvalds 已提交
1192
#else
1193
		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
L
Linus Torvalds 已提交
1194 1195
#endif

D
David Daney 已提交
1196
#ifdef CONFIG_HUGETLB_PAGE
1197
		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
D
David Daney 已提交
1198 1199
#endif

1200 1201 1202 1203 1204 1205
		build_get_ptep(&p, K0, K1);
		build_update_entries(&p, K0, K1);
		build_tlb_write_entry(&p, &l, &r, tlb_random);
		uasm_l_leave(&l, p);
		uasm_i_eret(&p); /* return from trap */
	}
D
David Daney 已提交
1206 1207
#ifdef CONFIG_HUGETLB_PAGE
	uasm_l_tlb_huge_update(&l, p);
1208 1209 1210
	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
				   htlb_info.restore_scratch);
D
David Daney 已提交
1211 1212
#endif

1213
#ifdef CONFIG_64BIT
1214
	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
L
Linus Torvalds 已提交
1215 1216 1217 1218 1219 1220
#endif

	/*
	 * Overflow check: For the 64bit handler, we need at least one
	 * free instruction slot for the wrap-around branch. In worst
	 * case, if the intended insertion point is a delay slot, we
M
Matt LaPlante 已提交
1221
	 * need three, with the second nop'ed and the third being
L
Linus Torvalds 已提交
1222 1223
	 * unused.
	 */
1224 1225
	/* Loongson2 ebase is different than r4k, we have more space */
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
1226 1227 1228
	if ((p - tlb_handler) > 64)
		panic("TLB refill handler space exceeded");
#else
1229 1230 1231 1232
	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
		&& uasm_insn_has_bdelay(relocs,
					tlb_handler + MIPS64_REFILL_INSNS - 3)))
L
Linus Torvalds 已提交
1233 1234 1235 1236 1237 1238
		panic("TLB refill handler space exceeded");
#endif

	/*
	 * Now fold the handler in the TLB refill handler space.
	 */
1239
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
1240 1241
	f = final_handler;
	/* Simplest case, just copy the handler. */
1242
	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
1243
	final_len = p - tlb_handler;
1244
#else /* CONFIG_64BIT */
1245 1246
	f = final_handler + MIPS64_REFILL_INSNS;
	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
L
Linus Torvalds 已提交
1247
		/* Just copy the handler. */
1248
		uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
1249 1250
		final_len = p - tlb_handler;
	} else {
D
David Daney 已提交
1251 1252
#if defined(CONFIG_HUGETLB_PAGE)
		const enum label_id ls = label_tlb_huge_update;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
#else
		const enum label_id ls = label_vmalloc;
#endif
		u32 *split;
		int ov = 0;
		int i;

		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
			;
		BUG_ON(i == ARRAY_SIZE(labels));
		split = labels[i].addr;
L
Linus Torvalds 已提交
1264 1265

		/*
1266
		 * See if we have overflown one way or the other.
L
Linus Torvalds 已提交
1267
		 */
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
		if (split > tlb_handler + MIPS64_REFILL_INSNS ||
		    split < p - MIPS64_REFILL_INSNS)
			ov = 1;

		if (ov) {
			/*
			 * Split two instructions before the end.  One
			 * for the branch and one for the instruction
			 * in the delay slot.
			 */
			split = tlb_handler + MIPS64_REFILL_INSNS - 2;

			/*
			 * If the branch would fall in a delay slot,
			 * we must back up an additional instruction
			 * so that it is no longer in a delay slot.
			 */
			if (uasm_insn_has_bdelay(relocs, split - 1))
				split--;
		}
L
Linus Torvalds 已提交
1288
		/* Copy first part of the handler. */
1289
		uasm_copy_handler(relocs, labels, tlb_handler, split, f);
L
Linus Torvalds 已提交
1290 1291
		f += split - tlb_handler;

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
		if (ov) {
			/* Insert branch. */
			uasm_l_split(&l, final_handler);
			uasm_il_b(&f, &r, label_split);
			if (uasm_insn_has_bdelay(relocs, split))
				uasm_i_nop(&f);
			else {
				uasm_copy_handler(relocs, labels,
						  split, split + 1, f);
				uasm_move_labels(labels, f, f + 1, -1);
				f++;
				split++;
			}
L
Linus Torvalds 已提交
1305 1306 1307
		}

		/* Copy the rest of the handler. */
1308
		uasm_copy_handler(relocs, labels, split, p, final_handler);
1309 1310
		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
			    (p - split);
L
Linus Torvalds 已提交
1311
	}
1312
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1313

1314 1315 1316
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 final_len);
L
Linus Torvalds 已提交
1317

1318
	memcpy((void *)ebase, final_handler, 0x100);
1319 1320

	dump_handler((u32 *)ebase, 64);
L
Linus Torvalds 已提交
1321 1322 1323 1324 1325 1326 1327 1328
}

/*
 * 128 instructions for the fastpath handler is generous and should
 * never be exceeded.
 */
#define FASTPATH_SIZE 128

1329 1330 1331
u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;

static void __cpuinit build_r4000_setup_pgd(void)
{
	const int a0 = 4;
	const int a1 = 5;
	u32 *p = tlbmiss_handler_setup_pgd;
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;

	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	pgd_reg = allocate_kscratch();

	if (pgd_reg == -1) {
		/* PGD << 11 in c0_Context */
		/*
		 * If it is a ckseg0 address, convert to a physical
		 * address.  Shifting right by 29 and adding 4 will
		 * result in zero for these addresses.
		 *
		 */
		UASM_i_SRA(&p, a1, a0, 29);
		UASM_i_ADDIU(&p, a1, a1, 4);
		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
		uasm_i_nop(&p);
		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
		uasm_l_tlbl_goaround1(&l, p);
		UASM_i_SLL(&p, a0, a0, 11);
		uasm_i_jr(&p, 31);
		UASM_i_MTC0(&p, a0, C0_CONTEXT);
	} else {
		/* PGD in c0_KScratch */
		uasm_i_jr(&p, 31);
		UASM_i_MTC0(&p, a0, 31, pgd_reg);
	}
	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
		panic("tlbmiss_handler_setup_pgd space exceeded");
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
		 (unsigned int)(p - tlbmiss_handler_setup_pgd));

	dump_handler(tlbmiss_handler_setup_pgd,
		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
}
#endif
L
Linus Torvalds 已提交
1381

1382
static void __cpuinit
1383
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
L
Linus Torvalds 已提交
1384 1385 1386 1387
{
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1388
		uasm_i_lld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1389 1390
	else
# endif
1391
		UASM_i_LL(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1392 1393 1394
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1395
		uasm_i_ld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1396 1397
	else
# endif
1398
		UASM_i_LW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1399 1400 1401
#endif
}

1402
static void __cpuinit
1403
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1404
	unsigned int mode)
L
Linus Torvalds 已提交
1405
{
1406 1407 1408 1409
#ifdef CONFIG_64BIT_PHYS_ADDR
	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif

1410
	uasm_i_ori(p, pte, pte, mode);
L
Linus Torvalds 已提交
1411 1412 1413
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1414
		uasm_i_scd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1415 1416
	else
# endif
1417
		UASM_i_SC(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1418 1419

	if (r10000_llsc_war())
1420
		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
1421
	else
1422
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
1423 1424 1425

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
1426 1427 1428 1429 1430 1431 1432
		/* no uasm_i_nop needed */
		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
		/* no uasm_i_nop needed */
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1433
	} else
1434
		uasm_i_nop(p);
L
Linus Torvalds 已提交
1435
# else
1436
	uasm_i_nop(p);
L
Linus Torvalds 已提交
1437 1438 1439 1440
# endif
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1441
		uasm_i_sd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1442 1443
	else
# endif
1444
		UASM_i_SW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1445 1446 1447

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
1448 1449 1450 1451
		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	}
# endif
#endif
}

/*
 * Check if PTE is present, if not then jump to LABEL. PTR points to
 * the page table where this PTE is located, PTE will be re-loaded
 * with it's original value.
 */
1462
static void __cpuinit
1463
build_pte_present(u32 **p, struct uasm_reloc **r,
L
Linus Torvalds 已提交
1464 1465
		  unsigned int pte, unsigned int ptr, enum label_id lid)
{
1466
	if (kernel_uses_smartmips_rixi) {
1467 1468 1469 1470 1471 1472 1473 1474
		if (use_bbit_insns()) {
			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
			uasm_i_nop(p);
		} else {
			uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
			uasm_il_beqz(p, r, pte, lid);
			iPTE_LW(p, pte, ptr);
		}
1475 1476 1477 1478
	} else {
		uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
		uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
		uasm_il_bnez(p, r, pte, lid);
1479
		iPTE_LW(p, pte, ptr);
1480
	}
L
Linus Torvalds 已提交
1481 1482 1483
}

/* Make PTE valid, store result in PTR. */
1484
static void __cpuinit
1485
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1486 1487
		 unsigned int ptr)
{
1488 1489 1490
	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
1491 1492 1493 1494 1495 1496
}

/*
 * Check if PTE can be written to, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
1497
static void __cpuinit
1498
build_pte_writable(u32 **p, struct uasm_reloc **r,
L
Linus Torvalds 已提交
1499 1500
		   unsigned int pte, unsigned int ptr, enum label_id lid)
{
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
	if (use_bbit_insns()) {
		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
		uasm_i_nop(p);
		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
		uasm_i_nop(p);
	} else {
		uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
		uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
		uasm_il_bnez(p, r, pte, lid);
		iPTE_LW(p, pte, ptr);
	}
L
Linus Torvalds 已提交
1512 1513 1514 1515 1516
}

/* Make PTE writable, update software status bits as well, then store
 * at PTR.
 */
1517
static void __cpuinit
1518
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1519 1520
		 unsigned int ptr)
{
1521 1522 1523 1524
	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
			     | _PAGE_DIRTY);

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
1525 1526 1527 1528 1529 1530
}

/*
 * Check if PTE can be modified, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
1531
static void __cpuinit
1532
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
L
Linus Torvalds 已提交
1533 1534
		     unsigned int pte, unsigned int ptr, enum label_id lid)
{
1535 1536 1537 1538 1539 1540 1541 1542
	if (use_bbit_insns()) {
		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
		uasm_i_nop(p);
	} else {
		uasm_i_andi(p, pte, pte, _PAGE_WRITE);
		uasm_il_beqz(p, r, pte, lid);
		iPTE_LW(p, pte, ptr);
	}
L
Linus Torvalds 已提交
1543 1544
}

1545
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1546 1547


L
Linus Torvalds 已提交
1548 1549 1550 1551
/*
 * R3000 style TLB load/store/modify handlers.
 */

1552 1553 1554 1555
/*
 * This places the pte into ENTRYLO0 and writes it with tlbwi.
 * Then it returns.
 */
1556
static void __cpuinit
1557
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
L
Linus Torvalds 已提交
1558
{
1559 1560 1561 1562 1563
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
	uasm_i_tlbwi(p);
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
1564 1565 1566
}

/*
1567 1568 1569 1570
 * This places the pte into ENTRYLO0 and writes it with tlbwi
 * or tlbwr as appropriate.  This is because the index register
 * may have the probe fail bit set as a result of a trap on a
 * kseg2 access, i.e. without refill.  Then it returns.
L
Linus Torvalds 已提交
1571
 */
1572
static void __cpuinit
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
			     struct uasm_reloc **r, unsigned int pte,
			     unsigned int tmp)
{
	uasm_i_mfc0(p, tmp, C0_INDEX);
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
	uasm_i_tlbwi(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
	uasm_l_r3000_write_probe_fail(l, *p);
	uasm_i_tlbwr(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
1588 1589
}

1590
static void __cpuinit
L
Linus Torvalds 已提交
1591 1592 1593 1594 1595
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
				   unsigned int ptr)
{
	long pgdc = (long)pgd_current;

1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
	uasm_i_mfc0(p, pte, C0_BADVADDR);
	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, pte, pte, 22); /* load delay */
	uasm_i_sll(p, pte, pte, 2);
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_mfc0(p, pte, C0_CONTEXT);
	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_lw(p, pte, 0, ptr);
	uasm_i_tlbp(p); /* load delay */
L
Linus Torvalds 已提交
1608 1609
}

1610
static void __cpuinit build_r3000_tlb_load_handler(void)
L
Linus Torvalds 已提交
1611 1612
{
	u32 *p = handle_tlbl;
1613 1614
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1615 1616 1617 1618 1619 1620

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1621
	build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1622
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1623
	build_make_valid(&p, &r, K0, K1);
1624
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1625

1626 1627 1628
	uasm_l_nopage_tlbl(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1629 1630 1631 1632

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1633 1634 1635
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1636

1637
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1638 1639
}

1640
static void __cpuinit build_r3000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1641 1642
{
	u32 *p = handle_tlbs;
1643 1644
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1645 1646 1647 1648 1649 1650

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1651
	build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1652
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1653
	build_make_write(&p, &r, K0, K1);
1654
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1655

1656 1657 1658
	uasm_l_nopage_tlbs(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1659 1660 1661 1662

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

1663 1664 1665
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
1666

1667
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
1668 1669
}

1670
static void __cpuinit build_r3000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
1671 1672
{
	u32 *p = handle_tlbm;
1673 1674
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1675 1676 1677 1678 1679 1680

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1681
	build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1682
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1683
	build_make_write(&p, &r, K0, K1);
1684
	build_r3000_pte_reload_tlbwi(&p, K0, K1);
L
Linus Torvalds 已提交
1685

1686 1687 1688
	uasm_l_nopage_tlbm(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1689 1690 1691 1692

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

1693 1694 1695
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
L
Linus Torvalds 已提交
1696

1697
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
1698
}
1699
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
L
Linus Torvalds 已提交
1700 1701 1702 1703

/*
 * R4000 style TLB load/store/modify handlers.
 */
1704
static void __cpuinit
1705 1706
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
				   struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1707 1708
				   unsigned int ptr)
{
1709
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
1710 1711 1712 1713 1714
	build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
#else
	build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
#endif

D
David Daney 已提交
1715 1716 1717 1718 1719 1720 1721 1722 1723
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * For huge tlb entries, pmd doesn't contain an address but
	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
	 * see if we need to jump to huge tlb processing.
	 */
	build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
#endif

1724 1725 1726 1727 1728
	UASM_i_MFC0(p, pte, C0_BADVADDR);
	UASM_i_LW(p, ptr, 0, ptr);
	UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
	uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
	UASM_i_ADDU(p, ptr, ptr, pte);
L
Linus Torvalds 已提交
1729 1730

#ifdef CONFIG_SMP
1731 1732
	uasm_l_smp_pgtable_change(l, *p);
#endif
1733
	iPTE_LW(p, pte, ptr); /* get even pte */
1734 1735
	if (!m4kc_tlbp_war())
		build_tlb_probe_entry(p);
L
Linus Torvalds 已提交
1736 1737
}

1738
static void __cpuinit
1739 1740
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
				   struct uasm_reloc **r, unsigned int tmp,
L
Linus Torvalds 已提交
1741 1742
				   unsigned int ptr)
{
1743 1744
	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
L
Linus Torvalds 已提交
1745 1746
	build_update_entries(p, tmp, ptr);
	build_tlb_write_entry(p, l, r, tlb_indexed);
1747 1748
	uasm_l_leave(l, *p);
	uasm_i_eret(p); /* return from trap */
L
Linus Torvalds 已提交
1749

1750
#ifdef CONFIG_64BIT
1751
	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
L
Linus Torvalds 已提交
1752 1753 1754
#endif
}

1755
static void __cpuinit build_r4000_tlb_load_handler(void)
L
Linus Torvalds 已提交
1756 1757
{
	u32 *p = handle_tlbl;
1758 1759
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1760 1761 1762 1763 1764 1765

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (bcm1250_m3_war()) {
1766 1767 1768 1769
		unsigned int segbits = 44;

		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1770
		uasm_i_xor(&p, K0, K0, K1);
1771 1772 1773
		uasm_i_dsrl_safe(&p, K1, K0, 62);
		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1774
		uasm_i_or(&p, K0, K0, K1);
1775 1776
		uasm_il_bnez(&p, &r, K0, label_leave);
		/* No need for uasm_i_nop */
L
Linus Torvalds 已提交
1777 1778 1779
	}

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1780
	build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1781 1782
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
1783 1784 1785 1786 1787 1788

	if (kernel_uses_smartmips_rixi) {
		/*
		 * If the page is not _PAGE_VALID, RI or XI could not
		 * have triggered it.  Skip the expensive test..
		 */
1789 1790 1791 1792 1793 1794 1795
		if (use_bbit_insns()) {
			uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
				      label_tlbl_goaround1);
		} else {
			uasm_i_andi(&p, K0, K0, _PAGE_VALID);
			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
		}
1796 1797 1798 1799
		uasm_i_nop(&p);

		uasm_i_tlbr(&p);
		/* Examine  entrylo 0 or 1 based on ptr. */
1800 1801 1802 1803 1804 1805
		if (use_bbit_insns()) {
			uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
		} else {
			uasm_i_andi(&p, K0, K1, sizeof(pte_t));
			uasm_i_beqz(&p, K0, 8);
		}
1806 1807 1808 1809 1810 1811 1812

		UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
		UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
		/*
		 * If the entryLo (now in K0) is valid (bit 1), RI or
		 * XI must have triggered it.
		 */
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
		if (use_bbit_insns()) {
			uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl);
			/* Reload the PTE value */
			iPTE_LW(&p, K0, K1);
			uasm_l_tlbl_goaround1(&l, p);
		} else {
			uasm_i_andi(&p, K0, K0, 2);
			uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
			uasm_l_tlbl_goaround1(&l, p);
			/* Reload the PTE value */
			iPTE_LW(&p, K0, K1);
		}
1825
	}
L
Linus Torvalds 已提交
1826 1827 1828
	build_make_valid(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

D
David Daney 已提交
1829 1830 1831 1832 1833 1834 1835 1836 1837
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when build_r4000_tlbchange_handler_head
	 * spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
	iPTE_LW(&p, K0, K1);
	build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
	build_tlb_probe_entry(&p);
1838 1839 1840 1841 1842 1843

	if (kernel_uses_smartmips_rixi) {
		/*
		 * If the page is not _PAGE_VALID, RI or XI could not
		 * have triggered it.  Skip the expensive test..
		 */
1844 1845 1846 1847 1848 1849 1850
		if (use_bbit_insns()) {
			uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
				      label_tlbl_goaround2);
		} else {
			uasm_i_andi(&p, K0, K0, _PAGE_VALID);
			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
		}
1851 1852 1853 1854
		uasm_i_nop(&p);

		uasm_i_tlbr(&p);
		/* Examine  entrylo 0 or 1 based on ptr. */
1855 1856 1857 1858 1859 1860
		if (use_bbit_insns()) {
			uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
		} else {
			uasm_i_andi(&p, K0, K1, sizeof(pte_t));
			uasm_i_beqz(&p, K0, 8);
		}
1861 1862 1863 1864 1865 1866
		UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
		UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
		/*
		 * If the entryLo (now in K0) is valid (bit 1), RI or
		 * XI must have triggered it.
		 */
1867 1868 1869 1870 1871 1872
		if (use_bbit_insns()) {
			uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2);
		} else {
			uasm_i_andi(&p, K0, K0, 2);
			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
		}
1873 1874 1875 1876 1877 1878 1879
		/* Reload the PTE value */
		iPTE_LW(&p, K0, K1);

		/*
		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
		 * it is restored in build_huge_tlb_write_entry.
		 */
1880
		build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0);
1881 1882 1883

		uasm_l_tlbl_goaround2(&l, p);
	}
D
David Daney 已提交
1884 1885 1886 1887
	uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
	build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif

1888 1889 1890
	uasm_l_nopage_tlbl(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1891 1892 1893 1894

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1895 1896 1897
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1898

1899
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1900 1901
}

1902
static void __cpuinit build_r4000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1903 1904
{
	u32 *p = handle_tlbs;
1905 1906
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1907 1908 1909 1910 1911 1912

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1913
	build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1914 1915
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
1916 1917 1918
	build_make_write(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

D
David Daney 已提交
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when
	 * build_r4000_tlbchange_handler_head spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
	iPTE_LW(&p, K0, K1);
	build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
	build_tlb_probe_entry(&p);
	uasm_i_ori(&p, K0, K0,
		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
	build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif

1933 1934 1935
	uasm_l_nopage_tlbs(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1936 1937 1938 1939

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

1940 1941 1942
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
1943

1944
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
1945 1946
}

1947
static void __cpuinit build_r4000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
1948 1949
{
	u32 *p = handle_tlbm;
1950 1951
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1952 1953 1954 1955 1956 1957

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1958
	build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1959 1960
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
1961 1962 1963 1964
	/* Present and writable bits set, set accessed and dirty bits. */
	build_make_write(&p, &r, K0, K1);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);

D
David Daney 已提交
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when
	 * build_r4000_tlbchange_handler_head spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
	iPTE_LW(&p, K0, K1);
	build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
	build_tlb_probe_entry(&p);
	uasm_i_ori(&p, K0, K0,
		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
	build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif

1979 1980 1981
	uasm_l_nopage_tlbm(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1982 1983 1984 1985

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

1986 1987 1988
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
1989

1990
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
1991 1992
}

1993
void __cpuinit build_tlb_refill_handler(void)
L
Linus Torvalds 已提交
1994 1995 1996 1997 1998 1999 2000 2001
{
	/*
	 * The refill handler is generated per-CPU, multi-node systems
	 * may have local storage for it. The other handlers are only
	 * needed once.
	 */
	static int run_once = 0;

2002 2003 2004 2005
#ifdef CONFIG_64BIT
	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
#endif

2006
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
2007 2008 2009 2010 2011 2012 2013
	case CPU_R2000:
	case CPU_R3000:
	case CPU_R3000A:
	case CPU_R3081E:
	case CPU_TX3912:
	case CPU_TX3922:
	case CPU_TX3927:
2014
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
L
Linus Torvalds 已提交
2015 2016 2017 2018 2019 2020 2021
		build_r3000_tlb_refill_handler();
		if (!run_once) {
			build_r3000_tlb_load_handler();
			build_r3000_tlb_store_handler();
			build_r3000_tlb_modify_handler();
			run_once++;
		}
2022 2023 2024
#else
		panic("No R3000 TLB refill handler");
#endif
L
Linus Torvalds 已提交
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
		break;

	case CPU_R6000:
	case CPU_R6000A:
		panic("No R6000 TLB refill handler yet");
		break;

	case CPU_R8000:
		panic("No R8000 TLB refill handler yet");
		break;

	default:
		if (!run_once) {
2038 2039 2040
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
			build_r4000_setup_pgd();
#endif
L
Linus Torvalds 已提交
2041 2042 2043 2044 2045
			build_r4000_tlb_load_handler();
			build_r4000_tlb_store_handler();
			build_r4000_tlb_modify_handler();
			run_once++;
		}
2046
		build_r4000_tlb_refill_handler();
L
Linus Torvalds 已提交
2047 2048
	}
}
2049

2050
void __cpuinit flush_tlb_handlers(void)
2051
{
2052
	local_flush_icache_range((unsigned long)handle_tlbl,
2053
			   (unsigned long)handle_tlbl + sizeof(handle_tlbl));
2054
	local_flush_icache_range((unsigned long)handle_tlbs,
2055
			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
2056
	local_flush_icache_range((unsigned long)handle_tlbm,
2057
			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2058 2059 2060 2061
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
#endif
2062
}