tlbex.c 57.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Synthesize TLB refill handlers at runtime.
 *
8
 * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
9
 * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
10
 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
D
David Daney 已提交
11
 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12
 * Copyright (C) 2011  MIPS Technologies, Inc.
13 14 15 16 17 18 19 20 21
 *
 * ... and the days got worse and worse and now you see
 * I've gone completly out of my mind.
 *
 * They're coming to take me a away haha
 * they're coming to take me a away hoho hihi haha
 * to the funny farm where code is beautiful all the time ...
 *
 * (Condolences to Napoleon XIV)
L
Linus Torvalds 已提交
22 23
 */

24
#include <linux/bug.h>
L
Linus Torvalds 已提交
25 26
#include <linux/kernel.h>
#include <linux/types.h>
27
#include <linux/smp.h>
L
Linus Torvalds 已提交
28 29
#include <linux/string.h>
#include <linux/init.h>
30
#include <linux/cache.h>
L
Linus Torvalds 已提交
31

32 33
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
34
#include <asm/war.h>
35
#include <asm/uasm.h>
36
#include <asm/setup.h>
37

38 39 40 41 42 43 44 45 46
/*
 * TLB load/store/modify handlers.
 *
 * Only the fastpath gets synthesized at runtime, the slowpath for
 * do_page_fault remains normal asm.
 */
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);

47 48 49 50 51 52 53 54 55 56 57 58
struct work_registers {
	int r1;
	int r2;
	int r3;
};

struct tlb_reg_save {
	unsigned long a;
	unsigned long b;
} ____cacheline_aligned_in_smp;

static struct tlb_reg_save handler_reg_save[NR_CPUS];
59

60
static inline int r45k_bvahwbug(void)
L
Linus Torvalds 已提交
61 62 63 64 65
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

66
static inline int r4k_250MHZhwbug(void)
L
Linus Torvalds 已提交
67 68 69 70 71
{
	/* XXX: We should probe for the presence of this bug, but we don't. */
	return 0;
}

72
static inline int __maybe_unused bcm1250_m3_war(void)
L
Linus Torvalds 已提交
73 74 75 76
{
	return BCM1250_M3_WAR;
}

77
static inline int __maybe_unused r10000_llsc_war(void)
L
Linus Torvalds 已提交
78 79 80 81
{
	return R10000_LLSC_WAR;
}

82 83 84 85 86 87 88 89 90 91 92 93
static int use_bbit_insns(void)
{
	switch (current_cpu_type()) {
	case CPU_CAVIUM_OCTEON:
	case CPU_CAVIUM_OCTEON_PLUS:
	case CPU_CAVIUM_OCTEON2:
		return 1;
	default:
		return 0;
	}
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static int use_lwx_insns(void)
{
	switch (current_cpu_type()) {
	case CPU_CAVIUM_OCTEON2:
		return 1;
	default:
		return 0;
	}
}
#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
static bool scratchpad_available(void)
{
	return true;
}
static int scratchpad_offset(int i)
{
	/*
	 * CVMSEG starts at address -32768 and extends for
	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
	 */
	i += 1; /* Kernel use starts at the top and works down. */
	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
}
#else
static bool scratchpad_available(void)
{
	return false;
}
static int scratchpad_offset(int i)
{
	BUG();
126 127
	/* Really unreachable, but evidently some GCC want this. */
	return 0;
128 129
}
#endif
130 131 132 133 134 135 136 137 138
/*
 * Found by experiment: At least some revisions of the 4kc throw under
 * some circumstances a machine check exception, triggered by invalid
 * values in the index register.  Delaying the tlbp instruction until
 * after the next branch,  plus adding an additional nop in front of
 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 * why; it's not an issue caused by the core RTL.
 *
 */
139
static int __cpuinit m4kc_tlbp_war(void)
140 141 142 143 144
{
	return (current_cpu_data.processor_id & 0xffff00) ==
	       (PRID_COMP_MIPS | PRID_IMP_4KC);
}

145
/* Handle labels (which must be positive integers). */
L
Linus Torvalds 已提交
146
enum label_id {
147
	label_second_part = 1,
L
Linus Torvalds 已提交
148 149 150 151 152
	label_leave,
	label_vmalloc,
	label_vmalloc_done,
	label_tlbw_hazard,
	label_split,
153 154
	label_tlbl_goaround1,
	label_tlbl_goaround2,
L
Linus Torvalds 已提交
155 156 157 158 159
	label_nopage_tlbl,
	label_nopage_tlbs,
	label_nopage_tlbm,
	label_smp_pgtable_change,
	label_r3000_write_probe_fail,
160
	label_large_segbits_fault,
D
David Daney 已提交
161 162 163
#ifdef CONFIG_HUGETLB_PAGE
	label_tlb_huge_update,
#endif
L
Linus Torvalds 已提交
164 165
};

166 167 168 169 170 171
UASM_L_LA(_second_part)
UASM_L_LA(_leave)
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard)
UASM_L_LA(_split)
172 173
UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2)
174 175 176 177 178
UASM_L_LA(_nopage_tlbl)
UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
179
UASM_L_LA(_large_segbits_fault)
D
David Daney 已提交
180 181 182
#ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA(_tlb_huge_update)
#endif
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
/*
 * For debug purposes.
 */
static inline void dump_handler(const u32 *handler, int count)
{
	int i;

	pr_debug("\t.set push\n");
	pr_debug("\t.set noreorder\n");

	for (i = 0; i < count; i++)
		pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);

	pr_debug("\t.set pop\n");
}

L
Linus Torvalds 已提交
200 201 202 203 204
/* The only general purpose registers allowed in TLB handlers. */
#define K0		26
#define K1		27

/* Some CP0 registers */
205 206 207 208 209
#define C0_INDEX	0, 0
#define C0_ENTRYLO0	2, 0
#define C0_TCBIND	2, 2
#define C0_ENTRYLO1	3, 0
#define C0_CONTEXT	4, 0
D
David Daney 已提交
210
#define C0_PAGEMASK	5, 0
211 212 213 214
#define C0_BADVADDR	8, 0
#define C0_ENTRYHI	10, 0
#define C0_EPC		14, 0
#define C0_XCONTEXT	20, 0
L
Linus Torvalds 已提交
215

216
#ifdef CONFIG_64BIT
217
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
L
Linus Torvalds 已提交
218
#else
219
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227 228 229
#endif

/* The worst case length of the handler is around 18 instructions for
 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 * Maximum space available is 32 instructions for R3000 and 64
 * instructions for R4000.
 *
 * We deliberately chose a buffer size of 128, so we won't scribble
 * over anything important on overflow before we panic.
 */
230
static u32 tlb_handler[128] __cpuinitdata;
L
Linus Torvalds 已提交
231 232

/* simply assume worst case size for labels and relocs */
233 234
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
L
Linus Torvalds 已提交
235

236 237 238 239
#ifdef CONFIG_64BIT
static int check_for_high_segbits __cpuinitdata;
#endif

240
static int check_for_high_segbits __cpuinitdata;
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

static unsigned int kscratch_used_mask __cpuinitdata;

static int __cpuinit allocate_kscratch(void)
{
	int r;
	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;

	r = ffs(a);

	if (r == 0)
		return -1;

	r--; /* make it zero based */

	kscratch_used_mask |= (1 << r);

	return r;
}

261
static int scratch_reg __cpuinitdata;
262
static int pgd_reg __cpuinitdata;
263 264
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
static struct work_registers __cpuinit build_get_work_registers(u32 **p)
{
	struct work_registers r;

	int smp_processor_id_reg;
	int smp_processor_id_sel;
	int smp_processor_id_shift;

	if (scratch_reg > 0) {
		/* Save in CPU local C0_KScratch? */
		UASM_i_MTC0(p, 1, 31, scratch_reg);
		r.r1 = K0;
		r.r2 = K1;
		r.r3 = 1;
		return r;
	}

	if (num_possible_cpus() > 1) {
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
		smp_processor_id_shift = 51;
		smp_processor_id_reg = 20; /* XContext */
		smp_processor_id_sel = 0;
#else
# ifdef CONFIG_32BIT
		smp_processor_id_shift = 25;
		smp_processor_id_reg = 4; /* Context */
		smp_processor_id_sel = 0;
# endif
# ifdef CONFIG_64BIT
		smp_processor_id_shift = 26;
		smp_processor_id_reg = 4; /* Context */
		smp_processor_id_sel = 0;
# endif
#endif
		/* Get smp_processor_id */
		UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
		UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);

		/* handler_reg_save index in K0 */
		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));

		UASM_i_LA(p, K1, (long)&handler_reg_save);
		UASM_i_ADDU(p, K0, K0, K1);
	} else {
		UASM_i_LA(p, K0, (long)&handler_reg_save);
	}
	/* K0 now points to save area, save $1 and $2  */
	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);

	r.r1 = K1;
	r.r2 = 1;
	r.r3 = 2;
	return r;
}

static void __cpuinit build_restore_work_registers(u32 **p)
{
	if (scratch_reg > 0) {
		UASM_i_MFC0(p, 1, 31, scratch_reg);
		return;
	}
	/* K0 already points to save area, restore $1 and $2  */
	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
}

332
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
333

334 335 336
/*
 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 * we cannot do r3000 under these circumstances.
337 338 339
 *
 * Declare pgd_current here instead of including mmu_context.h to avoid type
 * conflicts for tlbmiss_handler_setup_pgd
340
 */
341
extern unsigned long pgd_current[];
342

L
Linus Torvalds 已提交
343 344 345
/*
 * The R3000 TLB handler is simple.
 */
346
static void __cpuinit build_r3000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353
{
	long pgdc = (long)pgd_current;
	u32 *p;

	memset(tlb_handler, 0, sizeof(tlb_handler));
	p = tlb_handler;

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	uasm_i_mfc0(&p, K0, C0_BADVADDR);
	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
	uasm_i_srl(&p, K0, K0, 22); /* load delay */
	uasm_i_sll(&p, K0, K0, 2);
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_mfc0(&p, K0, C0_CONTEXT);
	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_lw(&p, K0, 0, K1);
	uasm_i_nop(&p); /* load delay */
	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
	uasm_i_tlbwr(&p); /* cp0 delay */
	uasm_i_jr(&p, K1);
	uasm_i_rfe(&p); /* branch delay */
L
Linus Torvalds 已提交
371 372 373 374

	if (p > tlb_handler + 32)
		panic("TLB refill handler space exceeded");

375 376
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 (unsigned int)(p - tlb_handler));
L
Linus Torvalds 已提交
377

378
	memcpy((void *)ebase, tlb_handler, 0x80);
379 380

	dump_handler((u32 *)ebase, 32);
L
Linus Torvalds 已提交
381
}
382
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
L
Linus Torvalds 已提交
383 384 385 386 387 388 389 390

/*
 * The R4000 TLB handler is much more complicated. We have two
 * consecutive handler areas with 32 instructions space each.
 * Since they aren't used at the same time, we can overflow in the
 * other one.To keep things simple, we first assume linear space,
 * then we relocate it to the final handler layout as needed.
 */
391
static u32 final_handler[64] __cpuinitdata;
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

/*
 * Hazards
 *
 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 * 2. A timing hazard exists for the TLBP instruction.
 *
 *      stalling_instruction
 *      TLBP
 *
 * The JTLB is being read for the TLBP throughout the stall generated by the
 * previous instruction. This is not really correct as the stalling instruction
 * can modify the address used to access the JTLB.  The failure symptom is that
 * the TLBP instruction will use an address created for the stalling instruction
 * and not the address held in C0_ENHI and thus report the wrong results.
 *
 * The software work-around is to not allow the instruction preceding the TLBP
 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 *
 * Errata 2 will not be fixed.  This errata is also on the R5000.
 *
 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 */
415
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
L
Linus Torvalds 已提交
416
{
417
	switch (current_cpu_type()) {
418
	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
419
	case CPU_R4600:
420
	case CPU_R4700:
L
Linus Torvalds 已提交
421 422 423
	case CPU_R5000:
	case CPU_R5000A:
	case CPU_NEVADA:
424 425
		uasm_i_nop(p);
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
426 427 428
		break;

	default:
429
		uasm_i_tlbp(p);
L
Linus Torvalds 已提交
430 431 432 433 434 435
		break;
	}
}

/*
 * Write random or indexed TLB entry, and care about the hazards from
L
Lucas De Marchi 已提交
436
 * the preceding mtc0 and for the following eret.
L
Linus Torvalds 已提交
437 438 439
 */
enum tlb_write_entry { tlb_random, tlb_indexed };

440
static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
441
					 struct uasm_reloc **r,
L
Linus Torvalds 已提交
442 443 444 445 446
					 enum tlb_write_entry wmode)
{
	void(*tlbw)(u32 **) = NULL;

	switch (wmode) {
447 448
	case tlb_random: tlbw = uasm_i_tlbwr; break;
	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
L
Linus Torvalds 已提交
449 450
	}

451
	if (cpu_has_mips_r2) {
452 453
		if (cpu_has_mips_r2_exec_hazard)
			uasm_i_ehb(p);
454 455 456 457
		tlbw(p);
		return;
	}

458
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
459 460 461 462 463 464 465 466 467 468
	case CPU_R4000PC:
	case CPU_R4000SC:
	case CPU_R4000MC:
	case CPU_R4400PC:
	case CPU_R4400SC:
	case CPU_R4400MC:
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * two nops after the tlbw instruction.
		 */
469
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
470
		tlbw(p);
471 472
		uasm_l_tlbw_hazard(l, *p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
473 474 475 476 477 478
		break;

	case CPU_R4600:
	case CPU_R4700:
	case CPU_R5000:
	case CPU_R5000A:
479
		uasm_i_nop(p);
480
		tlbw(p);
481
		uasm_i_nop(p);
482 483 484
		break;

	case CPU_R4300:
L
Linus Torvalds 已提交
485 486
	case CPU_5KC:
	case CPU_TX49XX:
487
	case CPU_PR4450:
488
	case CPU_XLR:
489
		uasm_i_nop(p);
L
Linus Torvalds 已提交
490 491 492 493 494
		tlbw(p);
		break;

	case CPU_R10000:
	case CPU_R12000:
K
Kumba 已提交
495
	case CPU_R14000:
L
Linus Torvalds 已提交
496
	case CPU_4KC:
497
	case CPU_4KEC:
498
	case CPU_M14KC:
L
Linus Torvalds 已提交
499
	case CPU_SB1:
A
Andrew Isaacson 已提交
500
	case CPU_SB1A:
L
Linus Torvalds 已提交
501 502 503
	case CPU_4KSC:
	case CPU_20KC:
	case CPU_25KF:
504 505 506 507 508
	case CPU_BMIPS32:
	case CPU_BMIPS3300:
	case CPU_BMIPS4350:
	case CPU_BMIPS4380:
	case CPU_BMIPS5000:
509
	case CPU_LOONGSON2:
510
	case CPU_R5500:
511
		if (m4kc_tlbp_war())
512
			uasm_i_nop(p);
513
	case CPU_ALCHEMY:
L
Linus Torvalds 已提交
514 515 516 517
		tlbw(p);
		break;

	case CPU_NEVADA:
518
		uasm_i_nop(p); /* QED specifies 2 nops hazard */
L
Linus Torvalds 已提交
519 520 521 522
		/*
		 * This branch uses up a mtc0 hazard nop slot and saves
		 * a nop after the tlbw instruction.
		 */
523
		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
L
Linus Torvalds 已提交
524
		tlbw(p);
525
		uasm_l_tlbw_hazard(l, *p);
L
Linus Torvalds 已提交
526 527 528
		break;

	case CPU_RM7000:
529 530 531 532
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540 541 542
		tlbw(p);
		break;

	case CPU_RM9000:
		/*
		 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
		 * use of the JTLB for instructions should not occur for 4
		 * cpu cycles and use for data translations should not occur
		 * for 3 cpu cycles.
		 */
543 544 545 546
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
547
		tlbw(p);
548 549 550 551
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
		uasm_i_ssnop(p);
L
Linus Torvalds 已提交
552 553 554 555 556 557 558
		break;

	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4181:
	case CPU_VR4181A:
559 560
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
561
		tlbw(p);
562 563
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
564 565 566 567
		break;

	case CPU_VR4131:
	case CPU_VR4133:
568
	case CPU_R5432:
569 570
		uasm_i_nop(p);
		uasm_i_nop(p);
L
Linus Torvalds 已提交
571 572 573
		tlbw(p);
		break;

574 575 576 577 578
	case CPU_JZRISC:
		tlbw(p);
		uasm_i_nop(p);
		break;

L
Linus Torvalds 已提交
579 580 581 582 583 584 585
	default:
		panic("No TLB refill handler yet (CPU type: %d)",
		      current_cpu_data.cputype);
		break;
	}
}

586 587
static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
								  unsigned int reg)
D
David Daney 已提交
588
{
589 590 591 592 593
	if (kernel_uses_smartmips_rixi) {
		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
		UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
#ifdef CONFIG_64BIT_PHYS_ADDR
594
		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
595 596 597 598 599
#else
		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
#endif
	}
}
D
David Daney 已提交
600

601
#ifdef CONFIG_HUGETLB_PAGE
D
David Daney 已提交
602

603 604 605
static __cpuinit void build_restore_pagemask(u32 **p,
					     struct uasm_reloc **r,
					     unsigned int tmp,
606 607
					     enum label_id lid,
					     int restore_scratch)
608
{
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	if (restore_scratch) {
		/* Reset default page size */
		if (PM_DEFAULT_MASK >> 16) {
			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		} else if (PM_DEFAULT_MASK) {
			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		} else {
			uasm_i_mtc0(p, 0, C0_PAGEMASK);
			uasm_il_b(p, r, lid);
		}
		if (scratch_reg > 0)
			UASM_i_MFC0(p, 1, 31, scratch_reg);
		else
			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
D
David Daney 已提交
628
	} else {
629 630 631 632 633 634 635 636 637 638 639 640 641 642
		/* Reset default page size */
		if (PM_DEFAULT_MASK >> 16) {
			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
		} else if (PM_DEFAULT_MASK) {
			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
		} else {
			uasm_il_b(p, r, lid);
			uasm_i_mtc0(p, 0, C0_PAGEMASK);
		}
D
David Daney 已提交
643 644 645
	}
}

646 647 648 649
static __cpuinit void build_huge_tlb_write_entry(u32 **p,
						 struct uasm_label **l,
						 struct uasm_reloc **r,
						 unsigned int tmp,
650 651
						 enum tlb_write_entry wmode,
						 int restore_scratch)
652 653 654 655 656 657 658 659
{
	/* Set huge page tlb entry size */
	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
	uasm_i_mtc0(p, tmp, C0_PAGEMASK);

	build_tlb_write_entry(p, l, r, wmode);

660
	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
661 662
}

D
David Daney 已提交
663 664 665 666 667 668 669 670
/*
 * Check if Huge PTE is present, if so then jump to LABEL.
 */
static void __cpuinit
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
		unsigned int pmd, int lid)
{
	UASM_i_LW(p, tmp, 0, pmd);
671 672 673 674 675 676
	if (use_bbit_insns()) {
		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
	} else {
		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
		uasm_il_bnez(p, r, tmp, lid);
	}
D
David Daney 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
}

static __cpuinit void build_huge_update_entries(u32 **p,
						unsigned int pte,
						unsigned int tmp)
{
	int small_sequence;

	/*
	 * A huge PTE describes an area the size of the
	 * configured huge page size. This is twice the
	 * of the large TLB entry size we intend to use.
	 * A TLB entry half the size of the configured
	 * huge page size is configured into entrylo0
	 * and entrylo1 to cover the contiguous huge PTE
	 * address space.
	 */
	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;

	/* We can clobber tmp.  It isn't used after this.*/
	if (!small_sequence)
		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));

700
	build_convert_pte_to_entrylo(p, pte);
701
	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
D
David Daney 已提交
702 703 704 705 706 707
	/* convert to entrylo1 */
	if (small_sequence)
		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
	else
		UASM_i_ADDU(p, pte, pte, tmp);

708
	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
D
David Daney 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
}

static __cpuinit void build_huge_handler_tail(u32 **p,
					      struct uasm_reloc **r,
					      struct uasm_label **l,
					      unsigned int pte,
					      unsigned int ptr)
{
#ifdef CONFIG_SMP
	UASM_i_SC(p, pte, 0, ptr);
	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
#else
	UASM_i_SW(p, pte, 0, ptr);
#endif
	build_huge_update_entries(p, pte, ptr);
725
	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
D
David Daney 已提交
726 727 728
}
#endif /* CONFIG_HUGETLB_PAGE */

729
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
730 731 732 733
/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pmd entry.
 */
734
static void __cpuinit
735
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
L
Linus Torvalds 已提交
736 737
		 unsigned int tmp, unsigned int ptr)
{
738
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
L
Linus Torvalds 已提交
739
	long pgdc = (long)pgd_current;
740
#endif
L
Linus Torvalds 已提交
741 742 743
	/*
	 * The vmalloc handling is not in the hotpath.
	 */
744
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

	if (check_for_high_segbits) {
		/*
		 * The kernel currently implicitely assumes that the
		 * MIPS SEGBITS parameter for the processor is
		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
		 * allocate virtual addresses outside the maximum
		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
		 * that doesn't prevent user code from accessing the
		 * higher xuseg addresses.  Here, we make sure that
		 * everything but the lower xuseg addresses goes down
		 * the module_alloc/vmalloc path.
		 */
		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
		uasm_il_bnez(p, r, ptr, label_vmalloc);
	} else {
		uasm_il_bltz(p, r, tmp, label_vmalloc);
	}
763
	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
L
Linus Torvalds 已提交
764

765
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
	if (pgd_reg != -1) {
		/* pgd is in pgd_reg */
		UASM_i_MFC0(p, ptr, 31, pgd_reg);
	} else {
		/*
		 * &pgd << 11 stored in CONTEXT [23..63].
		 */
		UASM_i_MFC0(p, ptr, C0_CONTEXT);

		/* Clear lower 23 bits of context. */
		uasm_i_dins(p, ptr, 0, 0, 23);

		/* 1 0  1 0 1  << 6  xkphys cached */
		uasm_i_ori(p, ptr, ptr, 0x540);
		uasm_i_drotr(p, ptr, ptr, 11);
	}
782
#elif defined(CONFIG_SMP)
783 784 785 786
# ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
787
	uasm_i_mfc0(p, ptr, C0_TCBIND);
788
	uasm_i_dsrl_safe(p, ptr, ptr, 19);
789
# else
L
Linus Torvalds 已提交
790
	/*
791
	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
L
Linus Torvalds 已提交
792 793
	 * stored in CONTEXT.
	 */
794
	uasm_i_dmfc0(p, ptr, C0_CONTEXT);
795
	uasm_i_dsrl_safe(p, ptr, ptr, 23);
796
# endif
797 798 799 800
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_daddu(p, ptr, ptr, tmp);
	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
801
#else
802 803
	UASM_i_LA_mostly(p, ptr, pgdc);
	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
L
Linus Torvalds 已提交
804 805
#endif

806
	uasm_l_vmalloc_done(l, *p);
R
Ralf Baechle 已提交
807

808 809
	/* get pgd offset in bytes */
	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
810 811 812

	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
813
#ifndef __PAGETABLE_PMD_FOLDED
814 815
	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
816
	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
817 818
	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
819
#endif
L
Linus Torvalds 已提交
820 821 822 823 824 825
}

/*
 * BVADDR is the faulting address, PTR is scratch.
 * PTR will hold the pgd for vmalloc.
 */
826
static void __cpuinit
827
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
828 829
			unsigned int bvaddr, unsigned int ptr,
			enum vmalloc64_mode mode)
L
Linus Torvalds 已提交
830 831
{
	long swpd = (long)swapper_pg_dir;
832 833 834 835
	int single_insn_swpd;
	int did_vmalloc_branch = 0;

	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
L
Linus Torvalds 已提交
836

837
	uasm_l_vmalloc(l, *p);
L
Linus Torvalds 已提交
838

839
	if (mode != not_refill && check_for_high_segbits) {
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
		if (single_insn_swpd) {
			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
			did_vmalloc_branch = 1;
			/* fall through */
		} else {
			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
		}
	}
	if (!did_vmalloc_branch) {
		if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
			uasm_il_b(p, r, label_vmalloc_done);
			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
		} else {
			UASM_i_LA_mostly(p, ptr, swpd);
			uasm_il_b(p, r, label_vmalloc_done);
			if (uasm_in_compat_space_p(swpd))
				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
			else
				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
		}
	}
862
	if (mode != not_refill && check_for_high_segbits) {
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
		uasm_l_large_segbits_fault(l, *p);
		/*
		 * We get here if we are an xsseg address, or if we are
		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
		 *
		 * Ignoring xsseg (assume disabled so would generate
		 * (address errors?), the only remaining possibility
		 * is the upper xuseg addresses.  On processors with
		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
		 * addresses would have taken an address error. We try
		 * to mimic that here by taking a load/istream page
		 * fault.
		 */
		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
		uasm_i_jr(p, ptr);
878 879 880 881 882 883 884 885 886

		if (mode == refill_scratch) {
			if (scratch_reg > 0)
				UASM_i_MFC0(p, 1, 31, scratch_reg);
			else
				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
		} else {
			uasm_i_nop(p);
		}
L
Linus Torvalds 已提交
887 888 889
	}
}

890
#else /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
891 892 893 894 895

/*
 * TMP and PTR are scratch.
 * TMP will be clobbered, PTR will hold the pgd entry.
 */
896
static void __cpuinit __maybe_unused
L
Linus Torvalds 已提交
897 898 899 900 901 902
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
903 904 905 906
#ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
907 908 909
	uasm_i_mfc0(p, ptr, C0_TCBIND);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 19);
910 911 912 913
#else
	/*
	 * smp_processor_id() << 3 is stored in CONTEXT.
         */
914 915 916
	uasm_i_mfc0(p, ptr, C0_CONTEXT);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 23);
917
#endif
918
	uasm_i_addu(p, ptr, tmp, ptr);
L
Linus Torvalds 已提交
919
#else
920
	UASM_i_LA_mostly(p, ptr, pgdc);
L
Linus Torvalds 已提交
921
#endif
922 923 924 925 926
	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
L
Linus Torvalds 已提交
927 928
}

929
#endif /* !CONFIG_64BIT */
L
Linus Torvalds 已提交
930

931
static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
L
Linus Torvalds 已提交
932
{
R
Ralf Baechle 已提交
933
	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
L
Linus Torvalds 已提交
934 935
	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);

936
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
	case CPU_VR41XX:
	case CPU_VR4111:
	case CPU_VR4121:
	case CPU_VR4122:
	case CPU_VR4131:
	case CPU_VR4181:
	case CPU_VR4181A:
	case CPU_VR4133:
		shift += 2;
		break;

	default:
		break;
	}

	if (shift)
953 954
		UASM_i_SRL(p, ctx, ctx, shift);
	uasm_i_andi(p, ctx, ctx, mask);
L
Linus Torvalds 已提交
955 956
}

957
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
L
Linus Torvalds 已提交
958 959 960 961 962 963 964 965
{
	/*
	 * Bug workaround for the Nevada. It seems as if under certain
	 * circumstances the move from cp0_context might produce a
	 * bogus result when the mfc0 instruction and its consumer are
	 * in a different cacheline or a load instruction, probably any
	 * memory reference, is between them.
	 */
966
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
967
	case CPU_NEVADA:
968
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
969 970 971 972 973
		GET_CONTEXT(p, tmp); /* get context reg */
		break;

	default:
		GET_CONTEXT(p, tmp); /* get context reg */
974
		UASM_i_LW(p, ptr, 0, ptr);
L
Linus Torvalds 已提交
975 976 977 978
		break;
	}

	build_adjust_context(p, tmp);
979
	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
L
Linus Torvalds 已提交
980 981
}

982
static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
L
Linus Torvalds 已提交
983 984 985 986 987 988 989 990
					unsigned int ptep)
{
	/*
	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
	 * Kernel is a special case. Only a few CPUs use it.
	 */
#ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits) {
991 992
		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
993 994 995 996 997 998 999
		if (kernel_uses_smartmips_rixi) {
			UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
			UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
			UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
			UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		} else {
1000
			uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1001
			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1002
			uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1003
		}
1004
		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
1005 1006 1007 1008 1009
	} else {
		int pte_off_even = sizeof(pte_t) / 2;
		int pte_off_odd = pte_off_even + sizeof(pte_t);

		/* The pte entries are pre-shifted */
1010
		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1011
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1012
		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1013
		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
1014 1015
	}
#else
1016 1017
	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
L
Linus Torvalds 已提交
1018 1019
	if (r45k_bvahwbug())
		build_tlb_probe_entry(p);
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	if (kernel_uses_smartmips_rixi) {
		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		if (r4k_250MHZhwbug())
			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
		UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
		if (r4k_250MHZhwbug())
			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
		if (r45k_bvahwbug())
			uasm_i_mfc0(p, tmp, C0_INDEX);
	}
L
Linus Torvalds 已提交
1037
	if (r4k_250MHZhwbug())
1038 1039
		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
L
Linus Torvalds 已提交
1040 1041 1042
#endif
}

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
struct mips_huge_tlb_info {
	int huge_pte;
	int restore_scratch;
};

static struct mips_huge_tlb_info __cpuinit
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
			       struct uasm_reloc **r, unsigned int tmp,
			       unsigned int ptr, int c0_scratch)
{
	struct mips_huge_tlb_info rv;
	unsigned int even, odd;
	int vmalloc_branch_delay_filled = 0;
	const int scratch = 1; /* Our extra working register */

	rv.huge_pte = scratch;
	rv.restore_scratch = 0;

	if (check_for_high_segbits) {
		UASM_i_MFC0(p, tmp, C0_BADVADDR);

		if (pgd_reg != -1)
			UASM_i_MFC0(p, ptr, 31, pgd_reg);
		else
			UASM_i_MFC0(p, ptr, C0_CONTEXT);

		if (c0_scratch >= 0)
			UASM_i_MTC0(p, scratch, 31, c0_scratch);
		else
			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);

		uasm_i_dsrl_safe(p, scratch, tmp,
				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
		uasm_il_bnez(p, r, scratch, label_vmalloc);

		if (pgd_reg == -1) {
			vmalloc_branch_delay_filled = 1;
			/* Clear lower 23 bits of context. */
			uasm_i_dins(p, ptr, 0, 0, 23);
		}
	} else {
		if (pgd_reg != -1)
			UASM_i_MFC0(p, ptr, 31, pgd_reg);
		else
			UASM_i_MFC0(p, ptr, C0_CONTEXT);

		UASM_i_MFC0(p, tmp, C0_BADVADDR);

		if (c0_scratch >= 0)
			UASM_i_MTC0(p, scratch, 31, c0_scratch);
		else
			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);

		if (pgd_reg == -1)
			/* Clear lower 23 bits of context. */
			uasm_i_dins(p, ptr, 0, 0, 23);

		uasm_il_bltz(p, r, tmp, label_vmalloc);
	}

	if (pgd_reg == -1) {
		vmalloc_branch_delay_filled = 1;
		/* 1 0  1 0 1  << 6  xkphys cached */
		uasm_i_ori(p, ptr, ptr, 0x540);
		uasm_i_drotr(p, ptr, ptr, 11);
	}

#ifdef __PAGETABLE_PMD_FOLDED
#define LOC_PTEP scratch
#else
#define LOC_PTEP ptr
#endif

	if (!vmalloc_branch_delay_filled)
		/* get pgd offset in bytes */
		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);

	uasm_l_vmalloc_done(l, *p);

	/*
	 *                         tmp          ptr
	 * fall-through case =   badvaddr  *pgd_current
	 * vmalloc case      =   badvaddr  swapper_pg_dir
	 */

	if (vmalloc_branch_delay_filled)
		/* get pgd offset in bytes */
		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);

#ifdef __PAGETABLE_PMD_FOLDED
	GET_CONTEXT(p, tmp); /* get context reg */
#endif
	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);

	if (use_lwx_insns()) {
		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
	} else {
		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
	}

#ifndef __PAGETABLE_PMD_FOLDED
	/* get pmd offset in bytes */
	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
	GET_CONTEXT(p, tmp); /* get context reg */

	if (use_lwx_insns()) {
		UASM_i_LWX(p, scratch, scratch, ptr);
	} else {
		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
		UASM_i_LW(p, scratch, 0, ptr);
	}
#endif
	/* Adjust the context during the load latency. */
	build_adjust_context(p, tmp);

#ifdef CONFIG_HUGETLB_PAGE
	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
	/*
	 * The in the LWX case we don't want to do the load in the
	 * delay slot.  It cannot issue in the same cycle and may be
	 * speculative and unneeded.
	 */
	if (use_lwx_insns())
		uasm_i_nop(p);
#endif /* CONFIG_HUGETLB_PAGE */


	/* build_update_entries */
	if (use_lwx_insns()) {
		even = ptr;
		odd = tmp;
		UASM_i_LWX(p, even, scratch, tmp);
		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
		UASM_i_LWX(p, odd, scratch, tmp);
	} else {
		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
		even = tmp;
		odd = ptr;
		UASM_i_LW(p, even, 0, ptr); /* get even pte */
		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
	}
	if (kernel_uses_smartmips_rixi) {
		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
		uasm_i_drotr(p, even, even,
			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
		uasm_i_drotr(p, odd, odd,
			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
	} else {
		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
	}
	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */

	if (c0_scratch >= 0) {
		UASM_i_MFC0(p, scratch, 31, c0_scratch);
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		rv.restore_scratch = 1;
	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
	} else {
		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
		build_tlb_write_entry(p, l, r, tlb_random);
		uasm_l_leave(l, *p);
		rv.restore_scratch = 1;
	}

	uasm_i_eret(p); /* return from trap */

	return rv;
}

1222 1223 1224 1225 1226 1227 1228 1229
/*
 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
 * because EXL == 0.  If we wrap, we can also use the 32 instruction
 * slots before the XTLB refill exception handler which belong to the
 * unused TLB refill exception.
 */
#define MIPS64_REFILL_INSNS 32

1230
static void __cpuinit build_r4000_tlb_refill_handler(void)
L
Linus Torvalds 已提交
1231 1232
{
	u32 *p = tlb_handler;
1233 1234
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1235 1236
	u32 *f;
	unsigned int final_len;
1237 1238
	struct mips_huge_tlb_info htlb_info __maybe_unused;
	enum vmalloc64_mode vmalloc_mode __maybe_unused;
L
Linus Torvalds 已提交
1239 1240 1241 1242 1243 1244

	memset(tlb_handler, 0, sizeof(tlb_handler));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));
	memset(final_handler, 0, sizeof(final_handler));

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
							  scratch_reg);
		vmalloc_mode = refill_scratch;
	} else {
		htlb_info.huge_pte = K0;
		htlb_info.restore_scratch = 0;
		vmalloc_mode = refill_noscratch;
		/*
		 * create the plain linear handler
		 */
		if (bcm1250_m3_war()) {
			unsigned int segbits = 44;

			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
			uasm_i_xor(&p, K0, K0, K1);
			uasm_i_dsrl_safe(&p, K1, K0, 62);
			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
			uasm_i_or(&p, K0, K0, K1);
			uasm_il_bnez(&p, &r, K0, label_leave);
			/* No need for uasm_i_nop */
		}
L
Linus Torvalds 已提交
1269

1270
#ifdef CONFIG_64BIT
1271
		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
L
Linus Torvalds 已提交
1272
#else
1273
		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
L
Linus Torvalds 已提交
1274 1275
#endif

D
David Daney 已提交
1276
#ifdef CONFIG_HUGETLB_PAGE
1277
		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
D
David Daney 已提交
1278 1279
#endif

1280 1281 1282 1283 1284 1285
		build_get_ptep(&p, K0, K1);
		build_update_entries(&p, K0, K1);
		build_tlb_write_entry(&p, &l, &r, tlb_random);
		uasm_l_leave(&l, p);
		uasm_i_eret(&p); /* return from trap */
	}
D
David Daney 已提交
1286 1287
#ifdef CONFIG_HUGETLB_PAGE
	uasm_l_tlb_huge_update(&l, p);
1288 1289 1290
	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
				   htlb_info.restore_scratch);
D
David Daney 已提交
1291 1292
#endif

1293
#ifdef CONFIG_64BIT
1294
	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
L
Linus Torvalds 已提交
1295 1296 1297 1298 1299 1300
#endif

	/*
	 * Overflow check: For the 64bit handler, we need at least one
	 * free instruction slot for the wrap-around branch. In worst
	 * case, if the intended insertion point is a delay slot, we
M
Matt LaPlante 已提交
1301
	 * need three, with the second nop'ed and the third being
L
Linus Torvalds 已提交
1302 1303
	 * unused.
	 */
1304 1305
	/* Loongson2 ebase is different than r4k, we have more space */
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
1306 1307 1308
	if ((p - tlb_handler) > 64)
		panic("TLB refill handler space exceeded");
#else
1309 1310 1311 1312
	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
		&& uasm_insn_has_bdelay(relocs,
					tlb_handler + MIPS64_REFILL_INSNS - 3)))
L
Linus Torvalds 已提交
1313 1314 1315 1316 1317 1318
		panic("TLB refill handler space exceeded");
#endif

	/*
	 * Now fold the handler in the TLB refill handler space.
	 */
1319
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
L
Linus Torvalds 已提交
1320 1321
	f = final_handler;
	/* Simplest case, just copy the handler. */
1322
	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
1323
	final_len = p - tlb_handler;
1324
#else /* CONFIG_64BIT */
1325 1326
	f = final_handler + MIPS64_REFILL_INSNS;
	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
L
Linus Torvalds 已提交
1327
		/* Just copy the handler. */
1328
		uasm_copy_handler(relocs, labels, tlb_handler, p, f);
L
Linus Torvalds 已提交
1329 1330
		final_len = p - tlb_handler;
	} else {
D
David Daney 已提交
1331 1332
#if defined(CONFIG_HUGETLB_PAGE)
		const enum label_id ls = label_tlb_huge_update;
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
#else
		const enum label_id ls = label_vmalloc;
#endif
		u32 *split;
		int ov = 0;
		int i;

		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
			;
		BUG_ON(i == ARRAY_SIZE(labels));
		split = labels[i].addr;
L
Linus Torvalds 已提交
1344 1345

		/*
1346
		 * See if we have overflown one way or the other.
L
Linus Torvalds 已提交
1347
		 */
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
		if (split > tlb_handler + MIPS64_REFILL_INSNS ||
		    split < p - MIPS64_REFILL_INSNS)
			ov = 1;

		if (ov) {
			/*
			 * Split two instructions before the end.  One
			 * for the branch and one for the instruction
			 * in the delay slot.
			 */
			split = tlb_handler + MIPS64_REFILL_INSNS - 2;

			/*
			 * If the branch would fall in a delay slot,
			 * we must back up an additional instruction
			 * so that it is no longer in a delay slot.
			 */
			if (uasm_insn_has_bdelay(relocs, split - 1))
				split--;
		}
L
Linus Torvalds 已提交
1368
		/* Copy first part of the handler. */
1369
		uasm_copy_handler(relocs, labels, tlb_handler, split, f);
L
Linus Torvalds 已提交
1370 1371
		f += split - tlb_handler;

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
		if (ov) {
			/* Insert branch. */
			uasm_l_split(&l, final_handler);
			uasm_il_b(&f, &r, label_split);
			if (uasm_insn_has_bdelay(relocs, split))
				uasm_i_nop(&f);
			else {
				uasm_copy_handler(relocs, labels,
						  split, split + 1, f);
				uasm_move_labels(labels, f, f + 1, -1);
				f++;
				split++;
			}
L
Linus Torvalds 已提交
1385 1386 1387
		}

		/* Copy the rest of the handler. */
1388
		uasm_copy_handler(relocs, labels, split, p, final_handler);
1389 1390
		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
			    (p - split);
L
Linus Torvalds 已提交
1391
	}
1392
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1393

1394 1395 1396
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 final_len);
L
Linus Torvalds 已提交
1397

1398
	memcpy((void *)ebase, final_handler, 0x100);
1399 1400

	dump_handler((u32 *)ebase, 64);
L
Linus Torvalds 已提交
1401 1402 1403 1404 1405 1406 1407 1408
}

/*
 * 128 instructions for the fastpath handler is generous and should
 * never be exceeded.
 */
#define FASTPATH_SIZE 128

1409 1410 1411
u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;

static void __cpuinit build_r4000_setup_pgd(void)
{
	const int a0 = 4;
	const int a1 = 5;
	u32 *p = tlbmiss_handler_setup_pgd;
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;

	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	pgd_reg = allocate_kscratch();

	if (pgd_reg == -1) {
		/* PGD << 11 in c0_Context */
		/*
		 * If it is a ckseg0 address, convert to a physical
		 * address.  Shifting right by 29 and adding 4 will
		 * result in zero for these addresses.
		 *
		 */
		UASM_i_SRA(&p, a1, a0, 29);
		UASM_i_ADDIU(&p, a1, a1, 4);
		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
		uasm_i_nop(&p);
		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
		uasm_l_tlbl_goaround1(&l, p);
		UASM_i_SLL(&p, a0, a0, 11);
		uasm_i_jr(&p, 31);
		UASM_i_MTC0(&p, a0, C0_CONTEXT);
	} else {
		/* PGD in c0_KScratch */
		uasm_i_jr(&p, 31);
		UASM_i_MTC0(&p, a0, 31, pgd_reg);
	}
	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
		panic("tlbmiss_handler_setup_pgd space exceeded");
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
		 (unsigned int)(p - tlbmiss_handler_setup_pgd));

	dump_handler(tlbmiss_handler_setup_pgd,
		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
}
#endif
L
Linus Torvalds 已提交
1461

1462
static void __cpuinit
1463
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
L
Linus Torvalds 已提交
1464 1465 1466 1467
{
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1468
		uasm_i_lld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1469 1470
	else
# endif
1471
		UASM_i_LL(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1472 1473 1474
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1475
		uasm_i_ld(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1476 1477
	else
# endif
1478
		UASM_i_LW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1479 1480 1481
#endif
}

1482
static void __cpuinit
1483
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1484
	unsigned int mode)
L
Linus Torvalds 已提交
1485
{
1486 1487 1488 1489
#ifdef CONFIG_64BIT_PHYS_ADDR
	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif

1490
	uasm_i_ori(p, pte, pte, mode);
L
Linus Torvalds 已提交
1491 1492 1493
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1494
		uasm_i_scd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1495 1496
	else
# endif
1497
		UASM_i_SC(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1498 1499

	if (r10000_llsc_war())
1500
		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
1501
	else
1502
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
L
Linus Torvalds 已提交
1503 1504 1505

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
1506 1507 1508 1509 1510 1511 1512
		/* no uasm_i_nop needed */
		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
		/* no uasm_i_nop needed */
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1513
	} else
1514
		uasm_i_nop(p);
L
Linus Torvalds 已提交
1515
# else
1516
	uasm_i_nop(p);
L
Linus Torvalds 已提交
1517 1518 1519 1520
# endif
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
1521
		uasm_i_sd(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1522 1523
	else
# endif
1524
		UASM_i_SW(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1525 1526 1527

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
1528 1529 1530 1531
		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_lw(p, pte, 0, ptr);
L
Linus Torvalds 已提交
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	}
# endif
#endif
}

/*
 * Check if PTE is present, if not then jump to LABEL. PTR points to
 * the page table where this PTE is located, PTE will be re-loaded
 * with it's original value.
 */
1542
static void __cpuinit
1543
build_pte_present(u32 **p, struct uasm_reloc **r,
1544
		  int pte, int ptr, int scratch, enum label_id lid)
L
Linus Torvalds 已提交
1545
{
1546 1547
	int t = scratch >= 0 ? scratch : pte;

1548
	if (kernel_uses_smartmips_rixi) {
1549 1550 1551 1552
		if (use_bbit_insns()) {
			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
			uasm_i_nop(p);
		} else {
1553 1554 1555 1556 1557
			uasm_i_andi(p, t, pte, _PAGE_PRESENT);
			uasm_il_beqz(p, r, t, lid);
			if (pte == t)
				/* You lose the SMP race :-(*/
				iPTE_LW(p, pte, ptr);
1558
		}
1559
	} else {
1560 1561 1562 1563 1564 1565
		uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
		uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
		uasm_il_bnez(p, r, t, lid);
		if (pte == t)
			/* You lose the SMP race :-(*/
			iPTE_LW(p, pte, ptr);
1566
	}
L
Linus Torvalds 已提交
1567 1568 1569
}

/* Make PTE valid, store result in PTR. */
1570
static void __cpuinit
1571
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1572 1573
		 unsigned int ptr)
{
1574 1575 1576
	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
1577 1578 1579 1580 1581 1582
}

/*
 * Check if PTE can be written to, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
1583
static void __cpuinit
1584
build_pte_writable(u32 **p, struct uasm_reloc **r,
1585 1586
		   unsigned int pte, unsigned int ptr, int scratch,
		   enum label_id lid)
L
Linus Torvalds 已提交
1587
{
1588 1589 1590 1591 1592 1593 1594
	int t = scratch >= 0 ? scratch : pte;

	uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
	uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
	uasm_il_bnez(p, r, t, lid);
	if (pte == t)
		/* You lose the SMP race :-(*/
1595
		iPTE_LW(p, pte, ptr);
1596 1597
	else
		uasm_i_nop(p);
L
Linus Torvalds 已提交
1598 1599 1600 1601 1602
}

/* Make PTE writable, update software status bits as well, then store
 * at PTR.
 */
1603
static void __cpuinit
1604
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
L
Linus Torvalds 已提交
1605 1606
		 unsigned int ptr)
{
1607 1608 1609 1610
	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
			     | _PAGE_DIRTY);

	iPTE_SW(p, r, pte, ptr, mode);
L
Linus Torvalds 已提交
1611 1612 1613 1614 1615 1616
}

/*
 * Check if PTE can be modified, if not branch to LABEL. Regardless
 * restore PTE with value from PTR when done.
 */
1617
static void __cpuinit
1618
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1619 1620
		     unsigned int pte, unsigned int ptr, int scratch,
		     enum label_id lid)
L
Linus Torvalds 已提交
1621
{
1622 1623 1624 1625
	if (use_bbit_insns()) {
		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
		uasm_i_nop(p);
	} else {
1626 1627 1628 1629 1630 1631
		int t = scratch >= 0 ? scratch : pte;
		uasm_i_andi(p, t, pte, _PAGE_WRITE);
		uasm_il_beqz(p, r, t, lid);
		if (pte == t)
			/* You lose the SMP race :-(*/
			iPTE_LW(p, pte, ptr);
1632
	}
L
Linus Torvalds 已提交
1633 1634
}

1635
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1636 1637


L
Linus Torvalds 已提交
1638 1639 1640 1641
/*
 * R3000 style TLB load/store/modify handlers.
 */

1642 1643 1644 1645
/*
 * This places the pte into ENTRYLO0 and writes it with tlbwi.
 * Then it returns.
 */
1646
static void __cpuinit
1647
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
L
Linus Torvalds 已提交
1648
{
1649 1650 1651 1652 1653
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
	uasm_i_tlbwi(p);
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
1654 1655 1656
}

/*
1657 1658 1659 1660
 * This places the pte into ENTRYLO0 and writes it with tlbwi
 * or tlbwr as appropriate.  This is because the index register
 * may have the probe fail bit set as a result of a trap on a
 * kseg2 access, i.e. without refill.  Then it returns.
L
Linus Torvalds 已提交
1661
 */
1662
static void __cpuinit
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
			     struct uasm_reloc **r, unsigned int pte,
			     unsigned int tmp)
{
	uasm_i_mfc0(p, tmp, C0_INDEX);
	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
	uasm_i_tlbwi(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
	uasm_l_r3000_write_probe_fail(l, *p);
	uasm_i_tlbwr(p); /* cp0 delay */
	uasm_i_jr(p, tmp);
	uasm_i_rfe(p); /* branch delay */
L
Linus Torvalds 已提交
1678 1679
}

1680
static void __cpuinit
L
Linus Torvalds 已提交
1681 1682 1683 1684 1685
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
				   unsigned int ptr)
{
	long pgdc = (long)pgd_current;

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
	uasm_i_mfc0(p, pte, C0_BADVADDR);
	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, pte, pte, 22); /* load delay */
	uasm_i_sll(p, pte, pte, 2);
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_mfc0(p, pte, C0_CONTEXT);
	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_lw(p, pte, 0, ptr);
	uasm_i_tlbp(p); /* load delay */
L
Linus Torvalds 已提交
1698 1699
}

1700
static void __cpuinit build_r3000_tlb_load_handler(void)
L
Linus Torvalds 已提交
1701 1702
{
	u32 *p = handle_tlbl;
1703 1704
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1705 1706 1707 1708 1709 1710

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1711
	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1712
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1713
	build_make_valid(&p, &r, K0, K1);
1714
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1715

1716 1717 1718
	uasm_l_nopage_tlbl(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1719 1720 1721 1722

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1723 1724 1725
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1726

1727
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1728 1729
}

1730
static void __cpuinit build_r3000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1731 1732
{
	u32 *p = handle_tlbs;
1733 1734
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1735 1736 1737 1738 1739 1740

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1741
	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1742
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1743
	build_make_write(&p, &r, K0, K1);
1744
	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
L
Linus Torvalds 已提交
1745

1746 1747 1748
	uasm_l_nopage_tlbs(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1749 1750 1751 1752

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

1753 1754 1755
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
1756

1757
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
1758 1759
}

1760
static void __cpuinit build_r3000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
1761 1762
{
	u32 *p = handle_tlbm;
1763 1764
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
L
Linus Torvalds 已提交
1765 1766 1767 1768 1769 1770

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	build_r3000_tlbchange_handler_head(&p, K0, K1);
1771
	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1772
	uasm_i_nop(&p); /* load delay */
L
Linus Torvalds 已提交
1773
	build_make_write(&p, &r, K0, K1);
1774
	build_r3000_pte_reload_tlbwi(&p, K0, K1);
L
Linus Torvalds 已提交
1775

1776 1777 1778
	uasm_l_nopage_tlbm(&l, p);
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1779 1780 1781 1782

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

1783 1784 1785
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
L
Linus Torvalds 已提交
1786

1787
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
1788
}
1789
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
L
Linus Torvalds 已提交
1790 1791 1792 1793

/*
 * R4000 style TLB load/store/modify handlers.
 */
1794
static struct work_registers __cpuinit
1795
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1796
				   struct uasm_reloc **r)
L
Linus Torvalds 已提交
1797
{
1798 1799
	struct work_registers wr = build_get_work_registers(p);

1800
#ifdef CONFIG_64BIT
1801
	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
L
Linus Torvalds 已提交
1802
#else
1803
	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
L
Linus Torvalds 已提交
1804 1805
#endif

D
David Daney 已提交
1806 1807 1808 1809 1810 1811
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * For huge tlb entries, pmd doesn't contain an address but
	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
	 * see if we need to jump to huge tlb processing.
	 */
1812
	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
D
David Daney 已提交
1813 1814
#endif

1815 1816 1817 1818 1819
	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
	UASM_i_LW(p, wr.r2, 0, wr.r2);
	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
L
Linus Torvalds 已提交
1820 1821

#ifdef CONFIG_SMP
1822 1823
	uasm_l_smp_pgtable_change(l, *p);
#endif
1824
	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1825 1826
	if (!m4kc_tlbp_war())
		build_tlb_probe_entry(p);
1827
	return wr;
L
Linus Torvalds 已提交
1828 1829
}

1830
static void __cpuinit
1831 1832
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
				   struct uasm_reloc **r, unsigned int tmp,
L
Linus Torvalds 已提交
1833 1834
				   unsigned int ptr)
{
1835 1836
	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
L
Linus Torvalds 已提交
1837 1838
	build_update_entries(p, tmp, ptr);
	build_tlb_write_entry(p, l, r, tlb_indexed);
1839
	uasm_l_leave(l, *p);
1840
	build_restore_work_registers(p);
1841
	uasm_i_eret(p); /* return from trap */
L
Linus Torvalds 已提交
1842

1843
#ifdef CONFIG_64BIT
1844
	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
L
Linus Torvalds 已提交
1845 1846 1847
#endif
}

1848
static void __cpuinit build_r4000_tlb_load_handler(void)
L
Linus Torvalds 已提交
1849 1850
{
	u32 *p = handle_tlbl;
1851 1852
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
1853
	struct work_registers wr;
L
Linus Torvalds 已提交
1854 1855 1856 1857 1858 1859

	memset(handle_tlbl, 0, sizeof(handle_tlbl));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (bcm1250_m3_war()) {
1860 1861 1862 1863
		unsigned int segbits = 44;

		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1864
		uasm_i_xor(&p, K0, K0, K1);
1865 1866 1867
		uasm_i_dsrl_safe(&p, K1, K0, 62);
		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1868
		uasm_i_or(&p, K0, K0, K1);
1869 1870
		uasm_il_bnez(&p, &r, K0, label_leave);
		/* No need for uasm_i_nop */
L
Linus Torvalds 已提交
1871 1872
	}

1873 1874
	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1875 1876
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
1877 1878 1879 1880 1881 1882

	if (kernel_uses_smartmips_rixi) {
		/*
		 * If the page is not _PAGE_VALID, RI or XI could not
		 * have triggered it.  Skip the expensive test..
		 */
1883
		if (use_bbit_insns()) {
1884
			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1885 1886
				      label_tlbl_goaround1);
		} else {
1887 1888
			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1889
		}
1890 1891 1892 1893
		uasm_i_nop(&p);

		uasm_i_tlbr(&p);
		/* Examine  entrylo 0 or 1 based on ptr. */
1894
		if (use_bbit_insns()) {
1895
			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1896
		} else {
1897 1898
			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
			uasm_i_beqz(&p, wr.r3, 8);
1899
		}
1900 1901 1902 1903
		/* load it in the delay slot*/
		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
		/* load it if ptr is odd */
		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1904
		/*
1905
		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1906 1907
		 * XI must have triggered it.
		 */
1908
		if (use_bbit_insns()) {
1909 1910
			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
			uasm_i_nop(&p);
1911 1912
			uasm_l_tlbl_goaround1(&l, p);
		} else {
1913 1914 1915
			uasm_i_andi(&p, wr.r3, wr.r3, 2);
			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
			uasm_i_nop(&p);
1916
		}
1917
		uasm_l_tlbl_goaround1(&l, p);
1918
	}
1919 1920
	build_make_valid(&p, &r, wr.r1, wr.r2);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
L
Linus Torvalds 已提交
1921

D
David Daney 已提交
1922 1923 1924 1925 1926 1927
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when build_r4000_tlbchange_handler_head
	 * spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
1928 1929
	iPTE_LW(&p, wr.r1, wr.r2);
	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
D
David Daney 已提交
1930
	build_tlb_probe_entry(&p);
1931 1932 1933 1934 1935 1936

	if (kernel_uses_smartmips_rixi) {
		/*
		 * If the page is not _PAGE_VALID, RI or XI could not
		 * have triggered it.  Skip the expensive test..
		 */
1937
		if (use_bbit_insns()) {
1938
			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1939 1940
				      label_tlbl_goaround2);
		} else {
1941 1942
			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1943
		}
1944 1945 1946 1947
		uasm_i_nop(&p);

		uasm_i_tlbr(&p);
		/* Examine  entrylo 0 or 1 based on ptr. */
1948
		if (use_bbit_insns()) {
1949
			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1950
		} else {
1951 1952
			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
			uasm_i_beqz(&p, wr.r3, 8);
1953
		}
1954 1955 1956 1957
		/* load it in the delay slot*/
		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
		/* load it if ptr is odd */
		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1958
		/*
1959
		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1960 1961
		 * XI must have triggered it.
		 */
1962
		if (use_bbit_insns()) {
1963
			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
1964
		} else {
1965 1966
			uasm_i_andi(&p, wr.r3, wr.r3, 2);
			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1967
		}
1968 1969
		if (PM_DEFAULT_MASK == 0)
			uasm_i_nop(&p);
1970 1971 1972 1973
		/*
		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
		 * it is restored in build_huge_tlb_write_entry.
		 */
1974
		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
1975 1976 1977

		uasm_l_tlbl_goaround2(&l, p);
	}
1978 1979
	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
D
David Daney 已提交
1980 1981
#endif

1982
	uasm_l_nopage_tlbl(&l, p);
1983
	build_restore_work_registers(&p);
1984 1985
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
1986 1987 1988 1989

	if ((p - handle_tlbl) > FASTPATH_SIZE)
		panic("TLB load handler fastpath space exceeded");

1990 1991 1992
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbl));
L
Linus Torvalds 已提交
1993

1994
	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
L
Linus Torvalds 已提交
1995 1996
}

1997
static void __cpuinit build_r4000_tlb_store_handler(void)
L
Linus Torvalds 已提交
1998 1999
{
	u32 *p = handle_tlbs;
2000 2001
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
2002
	struct work_registers wr;
L
Linus Torvalds 已提交
2003 2004 2005 2006 2007

	memset(handle_tlbs, 0, sizeof(handle_tlbs));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

2008 2009
	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2010 2011
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
2012 2013
	build_make_write(&p, &r, wr.r1, wr.r2);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
L
Linus Torvalds 已提交
2014

D
David Daney 已提交
2015 2016 2017 2018 2019 2020
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when
	 * build_r4000_tlbchange_handler_head spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
2021 2022
	iPTE_LW(&p, wr.r1, wr.r2);
	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
D
David Daney 已提交
2023
	build_tlb_probe_entry(&p);
2024
	uasm_i_ori(&p, wr.r1, wr.r1,
D
David Daney 已提交
2025
		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2026
	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
D
David Daney 已提交
2027 2028
#endif

2029
	uasm_l_nopage_tlbs(&l, p);
2030
	build_restore_work_registers(&p);
2031 2032
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
2033 2034 2035 2036

	if ((p - handle_tlbs) > FASTPATH_SIZE)
		panic("TLB store handler fastpath space exceeded");

2037 2038 2039
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbs));
L
Linus Torvalds 已提交
2040

2041
	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
L
Linus Torvalds 已提交
2042 2043
}

2044
static void __cpuinit build_r4000_tlb_modify_handler(void)
L
Linus Torvalds 已提交
2045 2046
{
	u32 *p = handle_tlbm;
2047 2048
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
2049
	struct work_registers wr;
L
Linus Torvalds 已提交
2050 2051 2052 2053 2054

	memset(handle_tlbm, 0, sizeof(handle_tlbm));
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

2055 2056
	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2057 2058
	if (m4kc_tlbp_war())
		build_tlb_probe_entry(&p);
L
Linus Torvalds 已提交
2059
	/* Present and writable bits set, set accessed and dirty bits. */
2060 2061
	build_make_write(&p, &r, wr.r1, wr.r2);
	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
L
Linus Torvalds 已提交
2062

D
David Daney 已提交
2063 2064 2065 2066 2067 2068
#ifdef CONFIG_HUGETLB_PAGE
	/*
	 * This is the entry point when
	 * build_r4000_tlbchange_handler_head spots a huge page.
	 */
	uasm_l_tlb_huge_update(&l, p);
2069 2070
	iPTE_LW(&p, wr.r1, wr.r2);
	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
D
David Daney 已提交
2071
	build_tlb_probe_entry(&p);
2072
	uasm_i_ori(&p, wr.r1, wr.r1,
D
David Daney 已提交
2073
		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2074
	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
D
David Daney 已提交
2075 2076
#endif

2077
	uasm_l_nopage_tlbm(&l, p);
2078
	build_restore_work_registers(&p);
2079 2080
	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
	uasm_i_nop(&p);
L
Linus Torvalds 已提交
2081 2082 2083 2084

	if ((p - handle_tlbm) > FASTPATH_SIZE)
		panic("TLB modify handler fastpath space exceeded");

2085 2086 2087
	uasm_resolve_relocs(relocs, labels);
	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
		 (unsigned int)(p - handle_tlbm));
2088

2089
	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
L
Linus Torvalds 已提交
2090 2091
}

2092
void __cpuinit build_tlb_refill_handler(void)
L
Linus Torvalds 已提交
2093 2094 2095 2096 2097 2098 2099 2100
{
	/*
	 * The refill handler is generated per-CPU, multi-node systems
	 * may have local storage for it. The other handlers are only
	 * needed once.
	 */
	static int run_once = 0;

2101 2102 2103 2104
#ifdef CONFIG_64BIT
	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
#endif

2105
	switch (current_cpu_type()) {
L
Linus Torvalds 已提交
2106 2107 2108 2109 2110 2111 2112
	case CPU_R2000:
	case CPU_R3000:
	case CPU_R3000A:
	case CPU_R3081E:
	case CPU_TX3912:
	case CPU_TX3922:
	case CPU_TX3927:
2113
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
L
Linus Torvalds 已提交
2114 2115 2116 2117 2118 2119 2120
		build_r3000_tlb_refill_handler();
		if (!run_once) {
			build_r3000_tlb_load_handler();
			build_r3000_tlb_store_handler();
			build_r3000_tlb_modify_handler();
			run_once++;
		}
2121 2122 2123
#else
		panic("No R3000 TLB refill handler");
#endif
L
Linus Torvalds 已提交
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
		break;

	case CPU_R6000:
	case CPU_R6000A:
		panic("No R6000 TLB refill handler yet");
		break;

	case CPU_R8000:
		panic("No R8000 TLB refill handler yet");
		break;

	default:
		if (!run_once) {
2137
			scratch_reg = allocate_kscratch();
2138 2139 2140
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
			build_r4000_setup_pgd();
#endif
L
Linus Torvalds 已提交
2141 2142 2143 2144 2145
			build_r4000_tlb_load_handler();
			build_r4000_tlb_store_handler();
			build_r4000_tlb_modify_handler();
			run_once++;
		}
2146
		build_r4000_tlb_refill_handler();
L
Linus Torvalds 已提交
2147 2148
	}
}
2149

2150
void __cpuinit flush_tlb_handlers(void)
2151
{
2152
	local_flush_icache_range((unsigned long)handle_tlbl,
2153
			   (unsigned long)handle_tlbl + sizeof(handle_tlbl));
2154
	local_flush_icache_range((unsigned long)handle_tlbs,
2155
			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
2156
	local_flush_icache_range((unsigned long)handle_tlbm,
2157
			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2158 2159 2160 2161
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
#endif
2162
}