head_8xx.S 31.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *  MPC8xx modifications by Dan Malek
 *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 *
 *  This file contains low-level support and setup for PowerPC 8xx
 *  embedded processors, including trap and interrupt dispatch.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

22
#include <linux/init.h>
23 24 25 26 27 28 29 30 31
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
32
#include <asm/ptrace.h>
33
#include <asm/fixmap.h>
A
Al Viro 已提交
34
#include <asm/export.h>
35 36 37

/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
38 39 40 41 42 43 44 45 46 47
#define SPRN_MI_TWC_ADDR	0x2b80
#define SPRN_MI_RPN_ADDR	0x2d80
#define SPRN_MD_TWC_ADDR	0x3b80
#define SPRN_MD_RPN_ADDR	0x3d80

#define MTSPR_CPU6(spr, reg, treg)	\
	li	treg, spr##_ADDR;	\
	stw	treg, 12(r0);		\
	lwz	treg, 12(r0);		\
	mtspr	spr, reg
48
#else
49 50
#define MTSPR_CPU6(spr, reg, treg)	\
	mtspr	spr, reg
51
#endif
52

53 54 55 56 57 58 59 60 61 62 63 64 65
/* Macro to test if an address is a kernel address */
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
#define IS_KERNEL(tmp, addr)		\
	andis.	tmp, addr, 0x8000	/* Address >= 0x80000000 */
#define BRANCH_UNLESS_KERNEL(label)	beq	label
#else
#define IS_KERNEL(tmp, addr)		\
	rlwinm	tmp, addr, 16, 16, 31;	\
	cmpli	cr0, tmp, PAGE_OFFSET >> 16
#define BRANCH_UNLESS_KERNEL(label)	blt	label
#endif


66 67 68 69
/*
 * Value for the bits that have fixed value in RPN entries.
 * Also used for tagging DAR for DTLBerror.
 */
70 71 72
#ifdef CONFIG_PPC_16K_PAGES
#define RPN_PATTERN	(0x00f0 | MD_SPS16K)
#else
73
#define RPN_PATTERN	0x00f0
74
#endif
75

76 77 78
#define PAGE_SHIFT_512K		19
#define PAGE_SHIFT_8M		23

79
	__HEAD
80 81
_ENTRY(_stext);
_ENTRY(_start);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107

/* MPC8xx
 * This port was done on an MBX board with an 860.  Right now I only
 * support an ELF compressed (zImage) boot from EPPC-Bug because the
 * code there loads up some registers before calling us:
 *   r3: ptr to board info data
 *   r4: initrd_start or if no initrd then 0
 *   r5: initrd_end - unused if r4 is 0
 *   r6: Start of command line string
 *   r7: End of command line string
 *
 * I decided to use conditional compilation instead of checking PVR and
 * adding more processor specific branches around code I don't need.
 * Since this is an embedded processor, I also appreciate any memory
 * savings I can get.
 *
 * The MPC8xx does not have any BATs, but it supports large page sizes.
 * We first initialize the MMU to support 8M byte pages, then load one
 * entry into each of the instruction and data TLBs to map the first
 * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
 * the "internal" processor registers before MMU_init is called.
 *
 *	-- Dan
 */
	.globl	__start
__start:
108
	mr	r31,r3			/* save device tree ptr */
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

	/* We have to turn on the MMU right away so we get cache modes
	 * set correctly.
	 */
	bl	initial_mmu

/* We now have the lower 8 Meg mapped into TLB entries, and the caches
 * ready to work.
 */

turn_on_mmu:
	mfmsr	r0
	ori	r0,r0,MSR_DR|MSR_IR
	mtspr	SPRN_SRR1,r0
	lis	r0,start_here@h
	ori	r0,r0,start_here@l
	mtspr	SPRN_SRR0,r0
	SYNC
	rfi				/* enables MMU */

/*
 * Exception entry code.  This code runs with address translation
 * turned off, i.e. using physical addresses.
 * We assume sprg3 has the physical address of the current
 * task's thread_struct.
 */
#define EXCEPTION_PROLOG	\
136
	EXCEPTION_PROLOG_0;	\
137
	mfcr	r10;		\
138 139 140
	EXCEPTION_PROLOG_1;	\
	EXCEPTION_PROLOG_2

141 142
#define EXCEPTION_PROLOG_0	\
	mtspr	SPRN_SPRG_SCRATCH0,r10;	\
143
	mtspr	SPRN_SPRG_SCRATCH1,r11
144

145 146 147 148 149
#define EXCEPTION_PROLOG_1	\
	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
	andi.	r11,r11,MSR_PR;	\
	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
	beq	1f;		\
150
	mfspr	r11,SPRN_SPRG_THREAD;	\
151 152 153 154 155 156 157 158 159 160
	lwz	r11,THREAD_INFO-THREAD(r11);	\
	addi	r11,r11,THREAD_SIZE;	\
	tophys(r11,r11);	\
1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */


#define EXCEPTION_PROLOG_2	\
	stw	r10,_CCR(r11);		/* save registers */ \
	stw	r12,GPR12(r11);	\
	stw	r9,GPR9(r11);	\
161
	mfspr	r10,SPRN_SPRG_SCRATCH0;	\
162
	stw	r10,GPR10(r11);	\
163
	mfspr	r12,SPRN_SPRG_SCRATCH1;	\
164 165 166 167 168 169 170 171 172 173 174 175 176 177
	stw	r12,GPR11(r11);	\
	mflr	r10;		\
	stw	r10,_LINK(r11);	\
	mfspr	r12,SPRN_SRR0;	\
	mfspr	r9,SPRN_SRR1;	\
	stw	r1,GPR1(r11);	\
	stw	r1,0(r11);	\
	tovirt(r1,r11);			/* set new kernel sp */	\
	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
	MTMSRD(r10);			/* (except for mach check in rtas) */ \
	stw	r0,GPR0(r11);	\
	SAVE_4GPRS(3, r11);	\
	SAVE_2GPRS(7, r11)

178 179 180 181 182 183 184
/*
 * Exception exit code.
 */
#define EXCEPTION_EPILOG_0	\
	mfspr	r10,SPRN_SPRG_SCRATCH0;	\
	mfspr	r11,SPRN_SPRG_SCRATCH1

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * Note: code which follows this uses cr0.eq (set if from kernel),
 * r11, r12 (SRR0), and r9 (SRR1).
 *
 * Note2: once we have set r1 we are in a position to take exceptions
 * again, and we could thus set MSR:RI at that point.
 */

/*
 * Exception vectors.
 */
#define EXCEPTION(n, label, hdlr, xfer)		\
	. = n;					\
label:						\
	EXCEPTION_PROLOG;			\
	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
	xfer(n, hdlr)

#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
	li	r10,trap;					\
205
	stw	r10,_TRAP(r11);					\
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	li	r10,MSR_KERNEL;					\
	copyee(r10, r9);					\
	bl	tfer;						\
i##n:								\
	.long	hdlr;						\
	.long	ret

#define COPY_EE(d, s)		rlwimi d,s,0,16,16
#define NOCOPY(d, s)

#define EXC_XFER_STD(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
			  ret_from_except_full)

#define EXC_XFER_LITE(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
			  ret_from_except)

#define EXC_XFER_EE(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
			  ret_from_except_full)

#define EXC_XFER_EE_LITE(n, hdlr)	\
	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
			  ret_from_except)

/* System reset */
233
	EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
234 235 236 237 238 239 240

/* Machine check */
	. = 0x200
MachineCheck:
	EXCEPTION_PROLOG
	mfspr r4,SPRN_DAR
	stw r4,_DAR(r11)
241
	li r5,RPN_PATTERN
242
	mtspr SPRN_DAR,r5	/* Tag DAR, to be used in DTLB Error */
243 244 245
	mfspr r5,SPRN_DSISR
	stw r5,_DSISR(r11)
	addi r3,r1,STACK_FRAME_OVERHEAD
246
	EXC_XFER_STD(0x200, machine_check_exception)
247 248

/* Data access exception.
249
 * This is "never generated" by the MPC8xx.
250 251 252 253 254
 */
	. = 0x300
DataAccess:

/* Instruction access exception.
255
 * This is "never generated" by the MPC8xx.
256 257 258 259 260 261 262 263 264 265 266 267 268
 */
	. = 0x400
InstructionAccess:

/* External interrupt */
	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)

/* Alignment exception */
	. = 0x600
Alignment:
	EXCEPTION_PROLOG
	mfspr	r4,SPRN_DAR
	stw	r4,_DAR(r11)
269
	li	r5,RPN_PATTERN
270
	mtspr	SPRN_DAR,r5	/* Tag DAR, to be used in DTLB Error */
271 272 273
	mfspr	r5,SPRN_DSISR
	stw	r5,_DSISR(r11)
	addi	r3,r1,STACK_FRAME_OVERHEAD
274
	EXC_XFER_EE(0x600, alignment_exception)
275 276

/* Program check exception */
277
	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
278 279 280

/* No FPU on MPC8xx.  This exception is not supposed to happen.
*/
281
	EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
282 283 284 285

/* Decrementer */
	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)

286 287
	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
288 289 290 291 292 293 294 295

/* System call */
	. = 0xc00
SystemCall:
	EXCEPTION_PROLOG
	EXC_XFER_EE_LITE(0xc00, DoSyscall)

/* Single step - not used on 601 */
296 297 298
	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
299 300 301 302 303 304 305 306 307

/* On the MPC8xx, this is a software emulation interrupt.  It occurs
 * for all unimplemented and illegal instructions.
 */
	EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)

	. = 0x1100
/*
 * For the MPC8xx, this is a software tablewalk to load the instruction
308 309
 * TLB.  The task switch loads the M_TW register with the pointer to the first
 * level table.
310 311 312 313 314 315
 * If we discover there is no second level table (value is zero) or if there
 * is an invalid pte, we load that into the TLB, which causes another fault
 * into the TLB Error interrupt where we can handle such problems.
 * We have to use the MD_xxx registers for the tablewalk because the
 * equivalent MI_xxx registers only perform the attribute functions.
 */
316 317 318 319 320 321 322 323 324 325 326

#ifdef CONFIG_8xx_CPU15
#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr)	\
	addi	tmp, addr, PAGE_SIZE;	\
	tlbie	tmp;			\
	addi	tmp, addr, -PAGE_SIZE;	\
	tlbie	tmp
#else
#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr)
#endif

327
InstructionTLBMiss:
328
#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
329
	mtspr	SPRN_SPRG_SCRATCH2, r3
330
#endif
331
	EXCEPTION_PROLOG_0
332 333 334 335 336 337
#ifdef CONFIG_PPC_8xx_PERF_EVENT
	lis	r10, (itlb_miss_counter - PAGE_OFFSET)@ha
	lwz	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
	addi	r11, r11, 1
	stw	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
#endif
338 339 340 341

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
342 343
	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
	INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
344 345
	/* Only modules will cause ITLB Misses as we always
	 * pin the first 8MB of kernel memory */
346
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
347
	mfcr	r3
348 349
#endif
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
350 351
	IS_KERNEL(r11, r10)
#endif
352
	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
353
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
354
	BRANCH_UNLESS_KERNEL(3f)
355
	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
356
3:
357
#endif
358 359
	/* Insert level 1 index */
	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
360
	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
361

362
	/* Extract level 2 index */
363
	rlwinm	r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
364 365 366 367 368
#ifdef CONFIG_HUGETLB_PAGE
	mtcr	r11
	bt-	28, 10f		/* bit 28 = Large page (8M) */
	bt-	29, 20f		/* bit 29 = Large page (8M or 512k) */
#endif
369 370
	rlwimi	r10, r11, 0, 0, 32 - PAGE_SHIFT - 1	/* Add level 2 base */
	lwz	r10, 0(r10)	/* Get the pte */
371 372 373 374
4:
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
	mtcr	r3
#endif
375
	/* Insert the APG into the TWC from the Linux PTE. */
376
	rlwimi	r11, r10, 0, 25, 26
377 378
	/* Load the MI_TWC with the attributes for this "segment." */
	MTSPR_CPU6(SPRN_MI_TWC, r11, r3)	/* Set segment attributes */
379

380 381 382
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
	rlwimi	r10, r11, 1, MI_SPS16K
#endif
383
#ifdef CONFIG_SWAP
384 385 386
	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
	and	r11, r11, r10
	rlwimi	r10, r11, 0, _PAGE_PRESENT
387
#endif
388
	li	r11, RPN_PATTERN
389
	/* The Linux PTE won't go exactly into the MMU TLB.
390
	 * Software indicator bits 20-23 and 28 must be clear.
391 392 393 394
	 * Software indicator bits 24, 25, 26, and 27 must be
	 * set.  All other Linux PTE bits control the behavior
	 * of the MMU.
	 */
395 396 397
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
	rlwimi	r10, r11, 0, 0x0ff0	/* Set 24-27, clear 20-23 */
#else
398
	rlwimi	r10, r11, 0, 0x0ff8	/* Set 24-27, clear 20-23,28 */
399
#endif
400
	MTSPR_CPU6(SPRN_MI_RPN, r10, r3)	/* Update TLB entry */
401

402
	/* Restore registers */
403
#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
404
	mfspr	r3, SPRN_SPRG_SCRATCH2
405
#endif
406
	EXCEPTION_EPILOG_0
407 408
	rfi

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
#ifdef CONFIG_HUGETLB_PAGE
10:	/* 8M pages */
#ifdef CONFIG_PPC_16K_PAGES
	/* Extract level 2 index */
	rlwinm	r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
	/* Add level 2 base */
	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
#else
	/* Level 2 base */
	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
	lwz	r10, 0(r10)	/* Get the pte */
	rlwinm	r11, r11, 0, 0xf
	b	4b

20:	/* 512k pages */
	/* Extract level 2 index */
	rlwinm	r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
	/* Add level 2 base */
	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
	lwz	r10, 0(r10)	/* Get the pte */
	rlwinm	r11, r11, 0, 0xf
	b	4b
#endif

434 435
	. = 0x1200
DataStoreTLBMiss:
436
	mtspr	SPRN_SPRG_SCRATCH2, r3
437
	EXCEPTION_PROLOG_0
438 439 440 441 442 443
#ifdef CONFIG_PPC_8xx_PERF_EVENT
	lis	r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
	lwz	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
	addi	r11, r11, 1
	stw	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
#endif
444
	mfcr	r3
445 446 447 448

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
449 450 451 452 453
	mfspr	r10, SPRN_MD_EPN
	rlwinm	r10, r10, 16, 0xfff8
	cmpli	cr0, r10, PAGE_OFFSET@h
	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
	blt+	3f
454
#ifndef CONFIG_PIN_TLB_IMMR
455
	cmpli	cr0, r10, VIRT_IMMR_BASE@h
456
#endif
457 458 459
_ENTRY(DTLBMiss_cmp)
	cmpli	cr7, r10, (PAGE_OFFSET + 0x1800000)@h
	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
460
#ifndef CONFIG_PIN_TLB_IMMR
461 462 463
_ENTRY(DTLBMiss_jmp)
	beq-	DTLBMissIMMR
#endif
464
	blt	cr7, DTLBMissLinear
465
3:
466
	mfspr	r10, SPRN_MD_EPN
467

468 469
	/* Insert level 1 index */
	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
470
	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
471 472 473

	/* We have a pte table, so load fetch the pte from the table.
	 */
474
	/* Extract level 2 index */
475
	rlwinm	r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
476 477 478 479 480
#ifdef CONFIG_HUGETLB_PAGE
	mtcr	r11
	bt-	28, 10f		/* bit 28 = Large page (8M) */
	bt-	29, 20f		/* bit 29 = Large page (8M or 512k) */
#endif
481
	rlwimi	r10, r11, 0, 0, 32 - PAGE_SHIFT - 1	/* Add level 2 base */
482
	lwz	r10, 0(r10)	/* Get the pte */
483 484
4:
	mtcr	r3
485

486 487
	/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
	 * It is bit 26-27 of both the Linux PTE and the TWC (at least
488 489 490 491
	 * I got that right :-).  It will be better when we can put
	 * this into the Linux pgd/pmd and load it in the operation
	 * above.
	 */
492
	rlwimi	r11, r10, 0, 26, 27
493 494 495 496
	/* Insert the WriteThru flag into the TWC from the Linux PTE.
	 * It is bit 25 in the Linux PTE and bit 30 in the TWC
	 */
	rlwimi	r11, r10, 32-5, 30, 30
497
	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
498

499 500 501 502 503
	/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
	 * In 16k pages mode, SPS is always 1 */
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
	rlwimi	r10, r11, 1, MD_SPS16K
#endif
504 505 506 507 508 509 510 511 512
	/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
	 * We also need to know if the insn is a load/store, so:
	 * Clear _PAGE_PRESENT and load that which will
	 * trap into DTLB Error with store bit set accordinly.
	 */
	/* PRESENT=0x1, ACCESSED=0x20
	 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
	 * r10 = (r10 & ~PRESENT) | r11;
	 */
513
#ifdef CONFIG_SWAP
J
Joakim Tjernlund 已提交
514
	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
515
	and	r11, r11, r10
J
Joakim Tjernlund 已提交
516
	rlwimi	r10, r11, 0, _PAGE_PRESENT
517
#endif
518
	/* The Linux PTE won't go exactly into the MMU TLB.
519
	 * Software indicator bits 22 and 28 must be clear.
520 521 522 523
	 * Software indicator bits 24, 25, 26, and 27 must be
	 * set.  All other Linux PTE bits control the behavior
	 * of the MMU.
	 */
524
	li	r11, RPN_PATTERN
525 526 527
#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
	rlwimi	r10, r11, 0, 24, 27	/* Set 24-27 */
#else
528
	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
529
#endif
530
	rlwimi	r10, r11, 0, 20, 20	/* clear 20 */
531
	MTSPR_CPU6(SPRN_MD_RPN, r10, r3)	/* Update TLB entry */
532

533
	/* Restore registers */
534
	mfspr	r3, SPRN_SPRG_SCRATCH2
535 536
	mtspr	SPRN_DAR, r11	/* Tag DAR */
	EXCEPTION_EPILOG_0
537 538
	rfi

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
#ifdef CONFIG_HUGETLB_PAGE
10:	/* 8M pages */
	/* Extract level 2 index */
#ifdef CONFIG_PPC_16K_PAGES
	rlwinm	r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
	/* Add level 2 base */
	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
#else
	/* Level 2 base */
	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
	lwz	r10, 0(r10)	/* Get the pte */
	rlwinm	r11, r11, 0, 0xf
	b	4b

20:	/* 512k pages */
	/* Extract level 2 index */
	rlwinm	r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
	/* Add level 2 base */
	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
	lwz	r10, 0(r10)	/* Get the pte */
	rlwinm	r11, r11, 0, 0xf
	b	4b
#endif
563

564 565 566 567 568 569
/* This is an instruction TLB error on the MPC8xx.  This could be due
 * to many reasons, such as executing guarded memory or illegal instruction
 * addresses.  There is nothing to do but handle a big time error fault.
 */
	. = 0x1300
InstructionTLBError:
570
	EXCEPTION_PROLOG
571
	mr	r4,r12
572 573
	andis.	r5,r9,0x4820		/* Filter relevant SRR1 bits */
	andis.	r10,r9,0x4000
574 575
	beq+	1f
	tlbie	r4
576
itlbie:
577
	/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
578
1:	EXC_XFER_LITE(0x400, handle_page_fault)
579 580

/* This is the data TLB error on the MPC8xx.  This could be due to
581 582
 * many reasons, including a dirty update to a pte.  We bail out to
 * a higher level function that can handle it.
583 584 585
 */
	. = 0x1400
DataTLBError:
586
	EXCEPTION_PROLOG_0
587
	mfcr	r10
588

589
	mfspr	r11, SPRN_DAR
590
	cmpwi	cr0, r11, RPN_PATTERN
591
	beq-	FixupDAR	/* must be a buggy dcbX, icbi insn. */
592
DARFixed:/* Return from dcbx instruction bug workaround */
593 594
	EXCEPTION_PROLOG_1
	EXCEPTION_PROLOG_2
595 596
	mfspr	r5,SPRN_DSISR
	stw	r5,_DSISR(r11)
597
	mfspr	r4,SPRN_DAR
598 599 600
	andis.	r10,r5,0x4000
	beq+	1f
	tlbie	r4
601
dtlbie:
602
1:	li	r10,RPN_PATTERN
603 604 605
	mtspr	SPRN_DAR,r10	/* Tag DAR, to be used in DTLB Error */
	/* 0x300 is DataAccess exception, needed by bad_page_fault() */
	EXC_XFER_LITE(0x300, handle_page_fault)
606

607 608 609 610 611 612 613
	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
614 615 616 617 618

/* On the MPC8xx, these next four traps are used for development
 * support of breakpoints and such.  Someday I will get around to
 * using them.
 */
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
	. = 0x1c00
DataBreakpoint:
	EXCEPTION_PROLOG_0
	mfcr	r10
	mfspr	r11, SPRN_SRR0
	cmplwi	cr0, r11, (dtlbie - PAGE_OFFSET)@l
	cmplwi	cr7, r11, (itlbie - PAGE_OFFSET)@l
	beq-	cr0, 11f
	beq-	cr7, 11f
	EXCEPTION_PROLOG_1
	EXCEPTION_PROLOG_2
	addi	r3,r1,STACK_FRAME_OVERHEAD
	mfspr	r4,SPRN_BAR
	stw	r4,_DAR(r11)
	mfspr	r5,SPRN_DSISR
	EXC_XFER_EE(0x1c00, do_break)
11:
	mtcr	r10
	EXCEPTION_EPILOG_0
	rfi

640 641 642 643 644 645 646 647 648 649 650 651 652 653
#ifdef CONFIG_PPC_8xx_PERF_EVENT
	. = 0x1d00
InstructionBreakpoint:
	EXCEPTION_PROLOG_0
	lis	r10, (instruction_counter - PAGE_OFFSET)@ha
	lwz	r11, (instruction_counter - PAGE_OFFSET)@l(r10)
	addi	r11, r11, -1
	stw	r11, (instruction_counter - PAGE_OFFSET)@l(r10)
	lis	r10, 0xffff
	ori	r10, r10, 0x01
	mtspr	SPRN_COUNTA, r10
	EXCEPTION_EPILOG_0
	rfi
#else
654
	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
655
#endif
656 657
	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
658 659 660

	. = 0x2000

661 662 663 664 665
/*
 * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
 * not enough space in the DataStoreTLBMiss area.
 */
DTLBMissIMMR:
666
	mtcr	r3
667 668 669 670 671 672 673 674 675 676 677
	/* Set 512k byte guarded page and mark it valid */
	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
	MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
			  _PAGE_PRESENT | _PAGE_NO_CACHE
	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */

	li	r11, RPN_PATTERN
	mtspr	SPRN_DAR, r11	/* Tag DAR */
678
	mfspr	r3, SPRN_SPRG_SCRATCH2
679 680 681 682
	EXCEPTION_EPILOG_0
	rfi

DTLBMissLinear:
683
	mtcr	r3
684
	/* Set 8M byte page and mark it valid */
685 686 687
	li	r11, MD_PS8MEG | MD_SVALID
	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
	rlwinm	r10, r10, 16, 0x0f800000	/* 8xx supports max 256Mb RAM */
688 689 690 691 692 693
	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
			  _PAGE_PRESENT
	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */

	li	r11, RPN_PATTERN
	mtspr	SPRN_DAR, r11	/* Tag DAR */
694
	mfspr	r3, SPRN_SPRG_SCRATCH2
695 696 697
	EXCEPTION_EPILOG_0
	rfi

698 699
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
 * by decoding the registers used by the dcbx instruction and adding them.
700
 * DAR is set to the calculated address.
701 702 703 704
 */
 /* define if you don't want to use self modifying code */
#define NO_SELF_MODIFYING_CODE
FixupDAR:/* Entry point for dcbx workaround. */
705
	mtspr	SPRN_SPRG_SCRATCH2, r10
706 707
	/* fetch instruction from memory. */
	mfspr	r10, SPRN_SRR0
708
	IS_KERNEL(r11, r10)
709
	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
710
	BRANCH_UNLESS_KERNEL(3f)
711 712
	rlwinm	r11, r10, 16, 0xfff8
_ENTRY(FixupDAR_cmp)
713
	cmpli	cr7, r11, (PAGE_OFFSET + 0x1800000)@h
714 715 716
	/* create physical page address from effective address */
	tophys(r11, r10)
	blt-	cr7, 201f
717
	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
718 719
	/* Insert level 1 index */
3:	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
720
	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
721 722 723
	mtcr	r11
	bt	28,200f		/* bit 28 = Large page (8M) */
	bt	29,202f		/* bit 29 = Large page (8M or 512K) */
724 725 726 727
	rlwinm	r11, r11,0,0,19	/* Extract page descriptor page address */
	/* Insert level 2 index */
	rlwimi	r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
	lwz	r11, 0(r11)	/* Get the pte */
728
	/* concat physical page address(r11) and page offset(r10) */
729
	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT, 31
730
201:	lwz	r11,0(r11)
731 732 733
/* Check if it really is a dcbx instruction. */
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
 * no need to include them here */
734 735
	xoris	r10, r11, 0x7c00	/* check if major OP code is 31 */
	rlwinm	r10, r10, 0, 21, 5
736 737 738 739 740 741 742 743 744 745
	cmpwi	cr0, r10, 2028	/* Is dcbz? */
	beq+	142f
	cmpwi	cr0, r10, 940	/* Is dcbi? */
	beq+	142f
	cmpwi	cr0, r10, 108	/* Is dcbst? */
	beq+	144f		/* Fix up store bit! */
	cmpwi	cr0, r10, 172	/* Is dcbf? */
	beq+	142f
	cmpwi	cr0, r10, 1964	/* Is icbi? */
	beq+	142f
746 747
141:	mfspr	r10,SPRN_SPRG_SCRATCH2
	b	DARFixed	/* Nope, go back to normal TLB processing */
748

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	/* concat physical page address(r11) and page offset(r10) */
200:
#ifdef CONFIG_PPC_16K_PAGES
	rlwinm	r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
	rlwimi	r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
#else
	rlwinm	r11, r10, 0, ~HUGEPD_SHIFT_MASK
#endif
	lwz	r11, 0(r11)	/* Get the pte */
	/* concat physical page address(r11) and page offset(r10) */
	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
	b	201b

202:
	rlwinm	r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
	rlwimi	r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
	lwz	r11, 0(r11)	/* Get the pte */
	/* concat physical page address(r11) and page offset(r10) */
	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
	b	201b

770 771 772 773 774 775 776 777 778 779 780 781 782 783
144:	mfspr	r10, SPRN_DSISR
	rlwinm	r10, r10,0,7,5	/* Clear store bit for buggy dcbst insn */
	mtspr	SPRN_DSISR, r10
142:	/* continue, it was a dcbx, dcbi instruction. */
#ifndef NO_SELF_MODIFYING_CODE
	andis.	r10,r11,0x1f	/* test if reg RA is r0 */
	li	r10,modified_instr@l
	dcbtst	r0,r10		/* touch for store */
	rlwinm	r11,r11,0,0,20	/* Zero lower 10 bits */
	oris	r11,r11,640	/* Transform instr. to a "add r10,RA,RB" */
	ori	r11,r11,532
	stw	r11,0(r10)	/* store add/and instruction */
	dcbf	0,r10		/* flush new instr. to memory. */
	icbi	0,r10		/* invalidate instr. cache line */
784 785
	mfspr	r11, SPRN_SPRG_SCRATCH1	/* restore r11 */
	mfspr	r10, SPRN_SPRG_SCRATCH0	/* restore r10 */
786 787 788 789 790 791
	isync			/* Wait until new instr is loaded from memory */
modified_instr:
	.space	4		/* this is where the add instr. is stored */
	bne+	143f
	subf	r10,r0,r10	/* r10=r10-r0, only if reg RA is r0 */
143:	mtdar	r10		/* store faulting EA in DAR */
792
	mfspr	r10,SPRN_SPRG_SCRATCH2
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
	b	DARFixed	/* Go back to normal TLB handling */
#else
	mfctr	r10
	mtdar	r10			/* save ctr reg in DAR */
	rlwinm	r10, r11, 24, 24, 28	/* offset into jump table for reg RB */
	addi	r10, r10, 150f@l	/* add start of table */
	mtctr	r10			/* load ctr with jump address */
	xor	r10, r10, r10		/* sum starts at zero */
	bctr				/* jump into table */
150:
	add	r10, r10, r0	;b	151f
	add	r10, r10, r1	;b	151f
	add	r10, r10, r2	;b	151f
	add	r10, r10, r3	;b	151f
	add	r10, r10, r4	;b	151f
	add	r10, r10, r5	;b	151f
	add	r10, r10, r6	;b	151f
	add	r10, r10, r7	;b	151f
	add	r10, r10, r8	;b	151f
	add	r10, r10, r9	;b	151f
	mtctr	r11	;b	154f	/* r10 needs special handling */
	mtctr	r11	;b	153f	/* r11 needs special handling */
	add	r10, r10, r12	;b	151f
	add	r10, r10, r13	;b	151f
	add	r10, r10, r14	;b	151f
	add	r10, r10, r15	;b	151f
	add	r10, r10, r16	;b	151f
	add	r10, r10, r17	;b	151f
	add	r10, r10, r18	;b	151f
	add	r10, r10, r19	;b	151f
	add	r10, r10, r20	;b	151f
	add	r10, r10, r21	;b	151f
	add	r10, r10, r22	;b	151f
	add	r10, r10, r23	;b	151f
	add	r10, r10, r24	;b	151f
	add	r10, r10, r25	;b	151f
	add	r10, r10, r26	;b	151f
	add	r10, r10, r27	;b	151f
	add	r10, r10, r28	;b	151f
	add	r10, r10, r29	;b	151f
	add	r10, r10, r30	;b	151f
	add	r10, r10, r31
151:
	rlwinm. r11,r11,19,24,28	/* offset into jump table for reg RA */
	beq	152f			/* if reg RA is zero, don't add it */
	addi	r11, r11, 150b@l	/* add start of table */
	mtctr	r11			/* load ctr with jump address */
	rlwinm	r11,r11,0,16,10		/* make sure we don't execute this more than once */
	bctr				/* jump into table */
152:
	mfdar	r11
	mtctr	r11			/* restore ctr reg from DAR */
	mtdar	r10			/* save fault EA to DAR */
846
	mfspr	r10,SPRN_SPRG_SCRATCH2
847 848 849
	b	DARFixed		/* Go back to normal TLB handling */

	/* special handling for r10,r11 since these are modified already */
850
153:	mfspr	r11, SPRN_SPRG_SCRATCH1	/* load r11 from SPRN_SPRG_SCRATCH1 */
851 852 853
	add	r10, r10, r11	/* add it */
	mfctr	r11		/* restore r11 */
	b	151b
854
154:	mfspr	r11, SPRN_SPRG_SCRATCH0	/* load r10 from SPRN_SPRG_SCRATCH0 */
855
	add	r10, r10, r11	/* add it */
856 857 858 859
	mfctr	r11		/* restore r11 */
	b	151b
#endif

860 861 862 863 864 865 866 867 868 869 870
/*
 * This is where the main kernel code starts.
 */
start_here:
	/* ptr to current */
	lis	r2,init_task@h
	ori	r2,r2,init_task@l

	/* ptr to phys current thread */
	tophys(r4,r2)
	addi	r4,r4,THREAD	/* init task's THREAD */
871
	mtspr	SPRN_SPRG_THREAD,r4
872 873 874 875 876 877 878 879 880 881 882 883

	/* stack */
	lis	r1,init_thread_union@ha
	addi	r1,r1,init_thread_union@l
	li	r0,0
	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)

	bl	early_init	/* We have to do this with MMU on */

/*
 * Decide what sort of machine this is and initialize the MMU.
 */
884 885
	li	r3,0
	mr	r4,r31
886 887 888 889 890 891 892 893 894 895 896 897 898
	bl	machine_init
	bl	MMU_init

/*
 * Go back to running unmapped so we can load up new values
 * and change to using our exception vectors.
 * On the 8xx, all we have to do is invalidate the TLB to clear
 * the old 8M byte TLB mappings and load the page table base register.
 */
	/* The right way to do this would be to track it down through
	 * init's THREAD like the context switch code does, but this is
	 * easier......until someone changes init's static structures.
	 */
899
	lis	r6, swapper_pg_dir@ha
900 901 902 903
	tophys(r6,r6)
#ifdef CONFIG_8xx_CPU6
	lis	r4, cpu6_errata_word@h
	ori	r4, r4, cpu6_errata_word@l
904
	li	r3, 0x3f80
905 906 907
	stw	r3, 12(r4)
	lwz	r3, 12(r4)
#endif
908
	mtspr	SPRN_M_TW, r6
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
	lis	r4,2f@h
	ori	r4,r4,2f@l
	tophys(r4,r4)
	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	rfi
/* Load up the kernel context */
2:
	SYNC			/* Force all PTE updates to finish */
	tlbia			/* Clear all TLB entries */
	sync			/* wait for tlbia/tlbie to finish */
	TLBSYNC			/* ... on all CPUs */

	/* set up the PTE pointers for the Abatron bdiGDB.
	*/
	tovirt(r6,r6)
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
	tophys(r5,r5)
	stw	r6, 0(r5)

/* Now turn on the MMU for real! */
	li	r4,MSR_KERNEL
	lis	r3,start_kernel@h
	ori	r3,r3,start_kernel@l
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	rfi			/* enable MMU and jump to start_kernel */

/* Set up the initial MMU state so we can do the first level of
 * kernel initialization.  This maps the first 8 MBytes of memory 1:1
 * virtual to physical.  Also, set the cache mode since that is defined
 * by TLB entries and perform any additional mapping (like of the IMMR).
 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
945
 * 24 Mbytes of data, and the 512k IMMR space.  Anything not covered by
946 947 948
 * these mappings is mapped by page tables.
 */
initial_mmu:
949 950 951 952 953 954 955 956
	li	r8, 0
	mtspr	SPRN_MI_CTR, r8		/* remove PINNED ITLB entries */
	lis	r10, MD_RESETVAL@h
#ifndef CONFIG_8xx_COPYBACK
	oris	r10, r10, MD_WTDEF@h
#endif
	mtspr	SPRN_MD_CTR, r10	/* remove PINNED DTLB entries */

957
	tlbia			/* Invalidate all TLB entries */
958 959 960
/* Always pin the first 8 MB ITLB to prevent ITLB
   misses while mucking around with SRR0/SRR1 in asm
*/
961 962
	lis	r8, MI_RSV4I@h
	ori	r8, r8, 0x1c00
963

964 965 966
	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */

#ifdef CONFIG_PIN_TLB
967
	oris	r10, r10, MD_RSV4I@h
968
	mtspr	SPRN_MD_CTR, r10	/* Set data TLB control */
969
#endif
970

971
	/* Now map the lower 8 Meg into the ITLB. */
972 973 974
	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
	ori	r8, r8, MI_EVALID	/* Mark it valid */
	mtspr	SPRN_MI_EPN, r8
975
	li	r8, MI_PS8MEG | (2 << 5)	/* Set 8M byte page, APG 2 */
976 977 978 979
	ori	r8, r8, MI_SVALID	/* Make it valid */
	mtspr	SPRN_MI_TWC, r8
	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
980

981 982
	lis	r8, MI_APG_INIT@h	/* Set protection modes */
	ori	r8, r8, MI_APG_INIT@l
983
	mtspr	SPRN_MI_AP, r8
984 985
	lis	r8, MD_APG_INIT@h
	ori	r8, r8, MD_APG_INIT@l
986 987
	mtspr	SPRN_MD_AP, r8

988
	/* Map a 512k page for the IMMR to get the processor
989 990
	 * internal registers (among other things).
	 */
991 992 993 994
#ifdef CONFIG_PIN_TLB_IMMR
	ori	r10, r10, 0x1c00
	mtspr	SPRN_MD_CTR, r10

995
	mfspr	r9, 638			/* Get current IMMR */
996
	andis.	r9, r9, 0xfff8		/* Get 512 kbytes boundary */
997

998
	lis	r8, VIRT_IMMR_BASE@h	/* Create vaddr for TLB */
999 1000
	ori	r8, r8, MD_EVALID	/* Mark it valid */
	mtspr	SPRN_MD_EPN, r8
1001
	li	r8, MD_PS512K | MD_GUARDED	/* Set 512k byte page */
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	ori	r8, r8, MD_SVALID	/* Make it valid */
	mtspr	SPRN_MD_TWC, r8
	mr	r8, r9			/* Create paddr for TLB */
	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
	mtspr	SPRN_MD_RPN, r8
#endif

	/* Since the cache is enabled according to the information we
	 * just loaded into the TLB, invalidate and enable the caches here.
	 * We should probably check/set other modes....later.
	 */
	lis	r8, IDC_INVALL@h
	mtspr	SPRN_IC_CST, r8
	mtspr	SPRN_DC_CST, r8
	lis	r8, IDC_ENABLE@h
	mtspr	SPRN_IC_CST, r8
#ifdef CONFIG_8xx_COPYBACK
	mtspr	SPRN_DC_CST, r8
#else
	/* For a debug option, I left this here to easily enable
	 * the write through cache mode
	 */
	lis	r8, DC_SFWT@h
	mtspr	SPRN_DC_CST, r8
	lis	r8, IDC_ENABLE@h
	mtspr	SPRN_DC_CST, r8
#endif
1029
	/* Disable debug mode entry on breakpoints */
1030
	mfspr	r8, SPRN_DER
1031 1032 1033
#ifdef CONFIG_PPC_8xx_PERF_EVENT
	rlwinm	r8, r8, 0, ~0xc
#else
1034
	rlwinm	r8, r8, 0, ~0x8
1035
#endif
1036
	mtspr	SPRN_DER, r8
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	blr


/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the data segment,
 * which is page-aligned.
 */
	.data
	.globl	sdata
sdata:
	.globl	empty_zero_page
1049
	.align	PAGE_SHIFT
1050
empty_zero_page:
1051
	.space	PAGE_SIZE
A
Al Viro 已提交
1052
EXPORT_SYMBOL(empty_zero_page)
1053 1054 1055

	.globl	swapper_pg_dir
swapper_pg_dir:
1056
	.space	PGD_TABLE_SIZE
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

/* Room for two PTE table poiners, usually the kernel and current user
 * pointer to their respective root page table (pgdir).
 */
abatron_pteptrs:
	.space	8

#ifdef CONFIG_8xx_CPU6
	.globl	cpu6_errata_word
cpu6_errata_word:
	.space	16
#endif

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
#ifdef CONFIG_PPC_8xx_PERF_EVENT
	.globl	itlb_miss_counter
itlb_miss_counter:
	.space	4

	.globl	dtlb_miss_counter
dtlb_miss_counter:
	.space	4

	.globl	instruction_counter
instruction_counter:
	.space	4
#endif