head_32.S 34.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 *
 *  This file contains the low-level support and setup for the
 *  PowerPC platform, including trap and interrupt dispatch.
 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

24
#include <linux/init.h>
25
#include <asm/reg.h>
26 27 28 29 30 31 32 33
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
34
#include <asm/ptrace.h>
35
#include <asm/bug.h>
36
#include <asm/kvm_book3s_asm.h>
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB)	\
	/* see the comment for clear_bats() -- Cort */ \
	li	RA,0;			\
	mtspr	SPRN_IBAT##n##U,RA;	\
	mtspr	SPRN_DBAT##n##U,RA;	\
	lwz	RA,(n*16)+0(reg);	\
	lwz	RB,(n*16)+4(reg);	\
	mtspr	SPRN_IBAT##n##U,RA;	\
	mtspr	SPRN_IBAT##n##L,RB;	\
	beq	1f;			\
	lwz	RA,(n*16)+8(reg);	\
	lwz	RB,(n*16)+12(reg);	\
	mtspr	SPRN_DBAT##n##U,RA;	\
	mtspr	SPRN_DBAT##n##L,RB;	\
1:

55
	__HEAD
56 57
	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
	.stabs	"head_32.S",N_SO,0,0,0f
58
0:
59
_ENTRY(_stext);
60 61 62 63 64

/*
 * _start is defined this way because the XCOFF loader in the OpenFirmware
 * on the powermac expects the entry point to be a procedure descriptor.
 */
65
_ENTRY(_start);
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	/*
	 * These are here for legacy reasons, the kernel used to
	 * need to look like a coff function entry for the pmac
	 * but we're always started by some kind of bootloader now.
	 *  -- Cort
	 */
	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
	nop

/* PMAC
 * Enter here with the kernel text, data and bss loaded starting at
 * 0, running with virtual == physical mapping.
 * r5 points to the prom entry point (the client interface handler
 * address).  Address translation is turned on, with the prom
 * managing the hash table.  Interrupts are disabled.  The stack
 * pointer (r1) points to just below the end of the half-meg region
 * from 0x380000 - 0x400000, which is mapped in already.
 *
 * If we are booted from MacOS via BootX, we enter with the kernel
 * image loaded somewhere, and the following values in registers:
 *  r3: 'BooX' (0x426f6f58)
 *  r4: virtual address of boot_infos_t
 *  r5: 0
 *
 * PREP
 * This is jumped to on prep systems right after the kernel is relocated
 * to its proper place in memory by the boot loader.  The expected layout
 * of the regs is:
 *   r3: ptr to residual data
 *   r4: initrd_start or if no initrd then 0
 *   r5: initrd_end - unused if r4 is 0
 *   r6: Start of command line string
 *   r7: End of command line string
 *
 * This just gets a minimal mmu environment setup so we can call
 * start_here() to do the real work.
 * -- Cort
 */

	.globl	__start
__start:
/*
 * We have to do any OF calls before we map ourselves to KERNELBASE,
 * because OF may have I/O devices mapped into that area
 * (particularly on CHRP).
 */
113 114
	cmpwi	0,r5,0
	beq	1f
115

116
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
117 118 119 120 121
	/* find out where we are now */
	bcl	20,31,$+4
0:	mflr	r8			/* r8 = runtime addr here */
	addis	r8,r8,(_stext - 0b)@ha
	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
122
	bl	prom_init
123 124 125 126
#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */

	/* We never return. We also hit that trap if trying to boot
	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
127 128
	trap

129 130 131 132 133 134 135 136 137 138 139 140 141
/*
 * Check for BootX signature when supporting PowerMac and branch to
 * appropriate trampoline if it's present
 */
#ifdef CONFIG_PPC_PMAC
1:	lis	r31,0x426f
	ori	r31,r31,0x6f58
	cmpw	0,r3,r31
	bne	1f
	bl	bootx_init
	trap
#endif /* CONFIG_PPC_PMAC */

142
1:	mr	r31,r3			/* save device tree ptr */
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	li	r24,0			/* cpu # */

/*
 * early_init() does the early machine identification and does
 * the necessary low-level setup and clears the BSS
 *  -- Cort <cort@fsmlabs.com>
 */
	bl	early_init

/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
 * the physical address we are running at, returned by early_init()
 */
 	bl	mmu_off
__after_mmu_off:
	bl	clear_bats
	bl	flush_tlbs

	bl	initial_bats
161
#if defined(CONFIG_BOOTX_TEXT)
162 163
	bl	setup_disp_bat
#endif
164 165 166
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
	bl	setup_cpm_bat
#endif
167 168 169
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
	bl	setup_usbgecko_bat
#endif
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

/*
 * Call setup_cpu for CPU 0 and initialize 6xx Idle
 */
	bl	reloc_offset
	li	r24,0			/* cpu# */
	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
#ifdef CONFIG_6xx
	bl	reloc_offset
	bl	init_idle_6xx
#endif /* CONFIG_6xx */


/*
 * We need to run with _start at physical address 0.
 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
 * the exception vectors at 0 (and therefore this copy
 * overwrites OF's exception vectors with our own).
188
 * The MMU is off at this point.
189 190 191 192
 */
	bl	reloc_offset
	mr	r26,r3
	addis	r4,r3,KERNELBASE@h	/* current address of _start */
193 194
	lis	r5,PHYSICAL_START@h
	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	bne	relocate_kernel
/*
 * we now have the 1st 16M of ram mapped with the bats.
 * prep needs the mmu to be turned on here, but pmac already has it on.
 * this shouldn't bother the pmac since it just gets turned on again
 * as we jump to our code at KERNELBASE. -- Cort
 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
 * off, and in other cases, we now turn it off before changing BATs above.
 */
turn_on_mmu:
	mfmsr	r0
	ori	r0,r0,MSR_DR|MSR_IR
	mtspr	SPRN_SRR1,r0
	lis	r0,start_here@h
	ori	r0,r0,start_here@l
	mtspr	SPRN_SRR0,r0
	SYNC
	RFI				/* enables MMU */

/*
 * We need __secondary_hold as a place to hold the other cpus on
 * an SMP machine, even when we are running a UP kernel.
 */
	. = 0xc0			/* for prep bootloader */
	li	r3,1			/* MTX only has 1 cpu */
	.globl	__secondary_hold
__secondary_hold:
	/* tell the master we're here */
223
	stw	r3,__secondary_hold_acknowledge@l(0)
224 225 226 227 228 229 230 231 232 233 234 235
#ifdef CONFIG_SMP
100:	lwz	r4,0(0)
	/* wait until we're told to start */
	cmpw	0,r4,r3
	bne	100b
	/* our cpu # was at addr 0 - go */
	mr	r24,r3			/* cpu # */
	b	__secondary_start
#else
	b	.
#endif /* CONFIG_SMP */

236 237 238 239 240 241 242
	.globl	__secondary_hold_spinloop
__secondary_hold_spinloop:
	.long	0
	.globl	__secondary_hold_acknowledge
__secondary_hold_acknowledge:
	.long	-1

243 244 245 246 247 248 249
/*
 * Exception entry code.  This code runs with address translation
 * turned off, i.e. using physical addresses.
 * We assume sprg3 has the physical address of the current
 * task's thread_struct.
 */
#define EXCEPTION_PROLOG	\
250 251
	mtspr	SPRN_SPRG_SCRATCH0,r10;	\
	mtspr	SPRN_SPRG_SCRATCH1,r11;	\
252 253 254 255 256 257 258 259 260
	mfcr	r10;		\
	EXCEPTION_PROLOG_1;	\
	EXCEPTION_PROLOG_2

#define EXCEPTION_PROLOG_1	\
	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
	andi.	r11,r11,MSR_PR;	\
	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
	beq	1f;		\
261
	mfspr	r11,SPRN_SPRG_THREAD;	\
262 263 264 265 266 267 268 269 270 271
	lwz	r11,THREAD_INFO-THREAD(r11);	\
	addi	r11,r11,THREAD_SIZE;	\
	tophys(r11,r11);	\
1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */


#define EXCEPTION_PROLOG_2	\
	stw	r10,_CCR(r11);		/* save registers */ \
	stw	r12,GPR12(r11);	\
	stw	r9,GPR9(r11);	\
272
	mfspr	r10,SPRN_SPRG_SCRATCH0;	\
273
	stw	r10,GPR10(r11);	\
274
	mfspr	r12,SPRN_SPRG_SCRATCH1;	\
275 276 277 278 279 280 281 282 283 284 285
	stw	r12,GPR11(r11);	\
	mflr	r10;		\
	stw	r10,_LINK(r11);	\
	mfspr	r12,SPRN_SRR0;	\
	mfspr	r9,SPRN_SRR1;	\
	stw	r1,GPR1(r11);	\
	stw	r1,0(r11);	\
	tovirt(r1,r11);			/* set new kernel sp */	\
	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
	MTMSRD(r10);			/* (except for mach check in rtas) */ \
	stw	r0,GPR0(r11);	\
286 287
	lis	r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
	addi	r10,r10,STACK_FRAME_REGS_MARKER@l; \
P
Paul Mackerras 已提交
288
	stw	r10,8(r11);	\
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	SAVE_4GPRS(3, r11);	\
	SAVE_2GPRS(7, r11)

/*
 * Note: code which follows this uses cr0.eq (set if from kernel),
 * r11, r12 (SRR0), and r9 (SRR1).
 *
 * Note2: once we have set r1 we are in a position to take exceptions
 * again, and we could thus set MSR:RI at that point.
 */

/*
 * Exception vectors.
 */
#define EXCEPTION(n, label, hdlr, xfer)		\
	. = n;					\
305
	DO_KVM n;				\
306 307 308 309 310 311 312
label:						\
	EXCEPTION_PROLOG;			\
	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
	xfer(n, hdlr)

#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
	li	r10,trap;					\
313
	stw	r10,_TRAP(r11);					\
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	li	r10,MSR_KERNEL;					\
	copyee(r10, r9);					\
	bl	tfer;						\
i##n:								\
	.long	hdlr;						\
	.long	ret

#define COPY_EE(d, s)		rlwimi d,s,0,16,16
#define NOCOPY(d, s)

#define EXC_XFER_STD(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
			  ret_from_except_full)

#define EXC_XFER_LITE(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
			  ret_from_except)

#define EXC_XFER_EE(n, hdlr)		\
	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
			  ret_from_except_full)

#define EXC_XFER_EE_LITE(n, hdlr)	\
	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
			  ret_from_except)

/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
342 343
   putting it back to what it was (unknown_exception) when done.  */
	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

/* Machine check */
/*
 * On CHRP, this is complicated by the fact that we could get a
 * machine check inside RTAS, and we have no guarantee that certain
 * critical registers will have the values we expect.  The set of
 * registers that might have bad values includes all the GPRs
 * and all the BATs.  We indicate that we are in RTAS by putting
 * a non-zero value, the address of the exception frame to use,
 * in SPRG2.  The machine check handler checks SPRG2 and uses its
 * value if it is non-zero.  If we ever needed to free up SPRG2,
 * we could use a field in the thread_info or thread_struct instead.
 * (Other exception handlers assume that r1 is a valid kernel stack
 * pointer when we take an exception from supervisor mode.)
 *	-- paulus.
 */
	. = 0x200
361
	DO_KVM  0x200
362 363
	mtspr	SPRN_SPRG_SCRATCH0,r10
	mtspr	SPRN_SPRG_SCRATCH1,r11
364 365
	mfcr	r10
#ifdef CONFIG_PPC_CHRP
366
	mfspr	r11,SPRN_SPRG_RTAS
367 368 369 370 371 372 373
	cmpwi	0,r11,0
	bne	7f
#endif /* CONFIG_PPC_CHRP */
	EXCEPTION_PROLOG_1
7:	EXCEPTION_PROLOG_2
	addi	r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_CHRP
374
	mfspr	r4,SPRN_SPRG_RTAS
375 376 377
	cmpwi	cr1,r4,0
	bne	cr1,1f
#endif
378
	EXC_XFER_STD(0x200, machine_check_exception)
379 380 381 382 383 384
#ifdef CONFIG_PPC_CHRP
1:	b	machine_check_in_rtas
#endif

/* Data access exception. */
	. = 0x300
385
	DO_KVM  0x300
386 387 388
DataAccess:
	EXCEPTION_PROLOG
	mfspr	r10,SPRN_DSISR
389
	stw	r10,_DSISR(r11)
390 391 392 393 394
	andis.	r0,r10,0xa470		/* weird error? */
	bne	1f			/* if not, try to put a PTE */
	mfspr	r4,SPRN_DAR		/* into the hash table */
	rlwinm	r3,r10,32-15,21,21	/* DSISR_STORE -> _PAGE_RW */
	bl	hash_page
395
1:	lwz	r5,_DSISR(r11)		/* get DSISR value */
396
	mfspr	r4,SPRN_DAR
397
	EXC_XFER_LITE(0x300, handle_page_fault)
398 399 400 401


/* Instruction access exception. */
	. = 0x400
402
	DO_KVM  0x400
403 404 405 406 407 408 409 410 411
InstructionAccess:
	EXCEPTION_PROLOG
	andis.	r0,r9,0x4000		/* no pte found? */
	beq	1f			/* if so, try to put a PTE */
	li	r3,0			/* into the hash table */
	mr	r4,r12			/* SRR0 is fault address */
	bl	hash_page
1:	mr	r4,r12
	mr	r5,r9
412
	EXC_XFER_LITE(0x400, handle_page_fault)
413 414 415 416 417 418

/* External interrupt */
	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)

/* Alignment exception */
	. = 0x600
419
	DO_KVM  0x600
420 421 422 423 424 425 426
Alignment:
	EXCEPTION_PROLOG
	mfspr	r4,SPRN_DAR
	stw	r4,_DAR(r11)
	mfspr	r5,SPRN_DSISR
	stw	r5,_DSISR(r11)
	addi	r3,r1,STACK_FRAME_OVERHEAD
427
	EXC_XFER_EE(0x600, alignment_exception)
428 429

/* Program check exception */
430
	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
431 432 433

/* Floating-point unavailable */
	. = 0x800
434
	DO_KVM  0x800
435
FPUnavailable:
436 437 438 439 440 441 442
BEGIN_FTR_SECTION
/*
 * Certain Freescale cores don't have a FPU and treat fp instructions
 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
 */
	b 	ProgramCheck
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
443
	EXCEPTION_PROLOG
444 445 446 447
	beq	1f
	bl	load_up_fpu		/* if from user, just load it up */
	b	fast_exception_return
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
P
Paul Mackerras 已提交
448
	EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
449 450 451 452

/* Decrementer */
	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)

453 454
	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
455 456 457

/* System call */
	. = 0xc00
458
	DO_KVM  0xc00
459 460 461 462 463
SystemCall:
	EXCEPTION_PROLOG
	EXC_XFER_EE_LITE(0xc00, DoSyscall)

/* Single step - not used on 601 */
464 465
	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
466 467 468 469 470 471 472 473 474 475

/*
 * The Altivec unavailable trap is at 0x0f20.  Foo.
 * We effectively remap it to 0x3000.
 * We include an altivec unavailable exception vector even if
 * not configured for Altivec, so that you can't panic a
 * non-altivec kernel running on a machine with altivec just
 * by executing an altivec instruction.
 */
	. = 0xf00
476
	DO_KVM  0xf00
477
	b	PerformanceMonitor
478 479

	. = 0xf20
480
	DO_KVM  0xf20
481 482 483 484 485 486 487 488 489
	b	AltiVecUnavailable

/*
 * Handle TLB miss for instruction on 603/603e.
 * Note: we get an alternate set of r0 - r3 to use automatically.
 */
	. = 0x1000
InstructionTLBMiss:
/*
490
 * r0:	scratch
491 492 493 494 495 496
 * r1:	linux style pte ( later becomes ppc hardware pte )
 * r2:	ptr to linux-style pte
 * r3:	scratch
 */
	/* Get PTE (linux-style) and check access */
	mfspr	r3,SPRN_IMISS
497 498
	lis	r1,PAGE_OFFSET@h		/* check if kernel address */
	cmplw	0,r1,r3
499
	mfspr	r2,SPRN_SPRG_THREAD
500 501
	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
	lwz	r2,PGDIR(r2)
502
	bge-	112f
503 504
	mfspr	r2,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
	rlwimi	r1,r2,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
505 506 507 508 509 510 511 512
	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
112:	tophys(r2,r2)
	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
	lwz	r2,0(r2)		/* get pmd entry */
	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
	beq-	InstructionAddressInvalid	/* return if no mapping */
	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
513 514
	lwz	r0,0(r2)		/* get linux-style pte */
	andc.	r1,r1,r0		/* check access & ~permission */
515
	bne-	InstructionAddressInvalid /* return if access not permitted */
516
	ori	r0,r0,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
517 518 519 520
	/*
	 * NOTE! We are assuming this is not an SMP system, otherwise
	 * we would need to update the pte atomically with lwarx/stwcx.
	 */
521
	stw	r0,0(r2)		/* update PTE (accessed bit) */
522
	/* Convert linux-style PTE to low word of PPC-style PTE */
523 524
	rlwinm	r1,r0,32-10,31,31	/* _PAGE_RW -> PP lsb */
	rlwinm	r2,r0,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
525
	and	r1,r1,r2		/* writable if _RW and _DIRTY */
526 527
	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
528
	ori	r1,r1,0xe04		/* clear out reserved bits */
529
	andc	r1,r0,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
530 531 532
BEGIN_FTR_SECTION
	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	mtspr	SPRN_RPA,r1
	tlbli	r3
	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
	mtcrf	0x80,r3
	rfi
InstructionAddressInvalid:
	mfspr	r3,SPRN_SRR1
	rlwinm	r1,r3,9,6,6	/* Get load/store bit */

	addis	r1,r1,0x2000
	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
	or	r2,r2,r1
	mtspr	SPRN_SRR1,r2
	mfspr	r1,SPRN_IMISS	/* Get failing address */
	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
	xor	r1,r1,r2
	mtspr	SPRN_DAR,r1	/* Set fault address */
	mfmsr	r0		/* Restore "normal" registers */
	xoris	r0,r0,MSR_TGPR>>16
	mtcrf	0x80,r3		/* Restore CR0 */
	mtmsr	r0
	b	InstructionAccess

/*
 * Handle TLB miss for DATA Load operation on 603/603e
 */
	. = 0x1100
DataLoadTLBMiss:
/*
564
 * r0:	scratch
565 566 567 568 569 570
 * r1:	linux style pte ( later becomes ppc hardware pte )
 * r2:	ptr to linux-style pte
 * r3:	scratch
 */
	/* Get PTE (linux-style) and check access */
	mfspr	r3,SPRN_DMISS
571 572
	lis	r1,PAGE_OFFSET@h		/* check if kernel address */
	cmplw	0,r1,r3
573
	mfspr	r2,SPRN_SPRG_THREAD
574 575
	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
	lwz	r2,PGDIR(r2)
576
	bge-	112f
577 578
	mfspr	r2,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
	rlwimi	r1,r2,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
579 580 581 582 583 584 585 586
	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
112:	tophys(r2,r2)
	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
	lwz	r2,0(r2)		/* get pmd entry */
	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
	beq-	DataAddressInvalid	/* return if no mapping */
	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
587 588
	lwz	r0,0(r2)		/* get linux-style pte */
	andc.	r1,r1,r0		/* check access & ~permission */
589
	bne-	DataAddressInvalid	/* return if access not permitted */
590
	ori	r0,r0,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
591 592 593 594
	/*
	 * NOTE! We are assuming this is not an SMP system, otherwise
	 * we would need to update the pte atomically with lwarx/stwcx.
	 */
595
	stw	r0,0(r2)		/* update PTE (accessed bit) */
596
	/* Convert linux-style PTE to low word of PPC-style PTE */
597 598
	rlwinm	r1,r0,32-10,31,31	/* _PAGE_RW -> PP lsb */
	rlwinm	r2,r0,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
599
	and	r1,r1,r2		/* writable if _RW and _DIRTY */
600 601
	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
602
	ori	r1,r1,0xe04		/* clear out reserved bits */
603
	andc	r1,r0,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
604 605 606
BEGIN_FTR_SECTION
	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
607
	mtspr	SPRN_RPA,r1
608 609 610 611
	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
	mtcrf	0x80,r2
BEGIN_MMU_FTR_SECTION
	li	r0,1
612
	mfspr	r1,SPRN_SPRG_603_LRU
613 614 615 616
	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
	slw	r0,r0,r2
	xor	r1,r0,r1
	srw	r0,r1,r2
617
	mtspr   SPRN_SPRG_603_LRU,r1
618 619 620 621
	mfspr	r2,SPRN_SRR1
	rlwimi	r2,r0,31-14,14,14
	mtspr   SPRN_SRR1,r2
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	tlbld	r3
	rfi
DataAddressInvalid:
	mfspr	r3,SPRN_SRR1
	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
	addis	r1,r1,0x2000
	mtspr	SPRN_DSISR,r1
	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
	mtspr	SPRN_SRR1,r2
	mfspr	r1,SPRN_DMISS	/* Get failing address */
	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
	beq	20f		/* Jump if big endian */
	xori	r1,r1,3
20:	mtspr	SPRN_DAR,r1	/* Set fault address */
	mfmsr	r0		/* Restore "normal" registers */
	xoris	r0,r0,MSR_TGPR>>16
	mtcrf	0x80,r3		/* Restore CR0 */
	mtmsr	r0
	b	DataAccess

/*
 * Handle TLB miss for DATA Store on 603/603e
 */
	. = 0x1200
DataStoreTLBMiss:
/*
648
 * r0:	scratch
649 650 651 652 653 654
 * r1:	linux style pte ( later becomes ppc hardware pte )
 * r2:	ptr to linux-style pte
 * r3:	scratch
 */
	/* Get PTE (linux-style) and check access */
	mfspr	r3,SPRN_DMISS
655 656
	lis	r1,PAGE_OFFSET@h		/* check if kernel address */
	cmplw	0,r1,r3
657
	mfspr	r2,SPRN_SPRG_THREAD
658 659
	li	r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
	lwz	r2,PGDIR(r2)
660
	bge-	112f
661 662
	mfspr	r2,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
	rlwimi	r1,r2,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
663 664 665 666 667 668 669 670
	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
112:	tophys(r2,r2)
	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
	lwz	r2,0(r2)		/* get pmd entry */
	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
	beq-	DataAddressInvalid	/* return if no mapping */
	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
671 672
	lwz	r0,0(r2)		/* get linux-style pte */
	andc.	r1,r1,r0		/* check access & ~permission */
673
	bne-	DataAddressInvalid	/* return if access not permitted */
674
	ori	r0,r0,_PAGE_ACCESSED|_PAGE_DIRTY
675 676 677 678
	/*
	 * NOTE! We are assuming this is not an SMP system, otherwise
	 * we would need to update the pte atomically with lwarx/stwcx.
	 */
679
	stw	r0,0(r2)		/* update PTE (accessed/dirty bits) */
680
	/* Convert linux-style PTE to low word of PPC-style PTE */
681
	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
682
	li	r1,0xe05		/* clear out reserved bits & PP lsb */
683
	andc	r1,r0,r1		/* PP = user? 2: 0 */
684 685 686
BEGIN_FTR_SECTION
	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
687
	mtspr	SPRN_RPA,r1
688 689 690 691
	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
	mtcrf	0x80,r2
BEGIN_MMU_FTR_SECTION
	li	r0,1
692
	mfspr	r1,SPRN_SPRG_603_LRU
693 694 695 696
	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
	slw	r0,r0,r2
	xor	r1,r0,r1
	srw	r0,r1,r2
697
	mtspr   SPRN_SPRG_603_LRU,r1
698 699 700 701
	mfspr	r2,SPRN_SRR1
	rlwimi	r2,r0,31-14,14,14
	mtspr   SPRN_SRR1,r2
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
702 703 704 705
	tlbld	r3
	rfi

#ifndef CONFIG_ALTIVEC
706
#define altivec_assist_exception	unknown_exception
707 708
#endif

709
	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
710
	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
711 712
	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
713
	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
714 715 716 717 718 719 720 721
	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
722
	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
738 739 740 741 742 743 744 745 746

	.globl mol_trampoline
	.set mol_trampoline, i0x2f00

	. = 0x3000

AltiVecUnavailable:
	EXCEPTION_PROLOG
#ifdef CONFIG_ALTIVEC
747 748 749
	beq	1f
	bl	load_up_altivec		/* if from user, just load it up */
	b	fast_exception_return
750
#endif /* CONFIG_ALTIVEC */
751
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
752
	EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
753

754 755 756 757 758
PerformanceMonitor:
	EXCEPTION_PROLOG
	addi	r3,r1,STACK_FRAME_OVERHEAD
	EXC_XFER_STD(0xf00, performance_monitor_exception)

759 760 761

/*
 * This code is jumped to from the startup code to copy
762
 * the kernel image to physical address PHYSICAL_START.
763 764 765 766 767
 */
relocate_kernel:
	addis	r9,r26,klimit@ha	/* fetch klimit */
	lwz	r25,klimit@l(r9)
	addis	r25,r25,-KERNELBASE@h
768
	lis	r3,PHYSICAL_START@h	/* Destination base address */
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	li	r6,0			/* Destination offset */
	li	r5,0x4000		/* # bytes of memory to copy */
	bl	copy_and_flush		/* copy the first 0x4000 bytes */
	addi	r0,r3,4f@l		/* jump to the address of 4f */
	mtctr	r0			/* in copy and do the rest. */
	bctr				/* jump to the copy */
4:	mr	r5,r25
	bl	copy_and_flush		/* copy the rest */
	b	turn_on_mmu

/*
 * Copy routine used to copy the kernel to start at physical address 0
 * and flush and invalidate the caches as needed.
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 */
785
_ENTRY(copy_and_flush)
786 787
	addi	r5,r5,-4
	addi	r6,r6,-4
788
4:	li	r0,L1_CACHE_BYTES/4
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
	mtctr	r0
3:	addi	r6,r6,4			/* copy a cache line */
	lwzx	r0,r6,r4
	stwx	r0,r6,r3
	bdnz	3b
	dcbst	r6,r3			/* write it to memory */
	sync
	icbi	r6,r3			/* flush the icache line */
	cmplw	0,r6,r5
	blt	4b
	sync				/* additional sync needed on g4 */
	isync
	addi	r5,r5,4
	addi	r6,r6,4
	blr

#ifdef CONFIG_SMP
806 807 808 809 810 811 812
	.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
	mfspr	r3, SPRN_PIR
	stw	r3, __secondary_hold_acknowledge@l(0)
	mr	r24, r3			/* cpu # */
	b	__secondary_start

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	.globl	__secondary_start_pmac_0
__secondary_start_pmac_0:
	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
	li	r24,0
	b	1f
	li	r24,1
	b	1f
	li	r24,2
	b	1f
	li	r24,3
1:
	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
	   set to map the 0xf0000000 - 0xffffffff region */
	mfmsr	r0
	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
	SYNC
	mtmsr	r0
	isync

	.globl	__secondary_start
__secondary_start:
	/* Copy some CPU settings from CPU 0 */
	bl	__restore_cpu_setup

	lis	r3,-KERNELBASE@h
	mr	r4,r24
	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
#ifdef CONFIG_6xx
	lis	r3,-KERNELBASE@h
	bl	init_idle_6xx
#endif /* CONFIG_6xx */

	/* get current_thread_info and current */
	lis	r1,secondary_ti@ha
	tophys(r1,r1)
	lwz	r1,secondary_ti@l(r1)
	tophys(r2,r1)
	lwz	r2,TI_TASK(r2)

	/* stack */
	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
	li	r0,0
	tophys(r3,r1)
	stw	r0,0(r3)

	/* load up the MMU */
	bl	load_up_mmu

	/* ptr to phys current thread */
	tophys(r4,r2)
	addi	r4,r4,THREAD	/* phys address of our thread_struct */
864
	mtspr	SPRN_SPRG_THREAD,r4
865
	li	r3,0
866
	mtspr	SPRN_SPRG_RTAS,r3	/* 0 => not in RTAS */
867 868 869 870 871 872 873 874 875 876 877 878

	/* enable MMU and jump to start_secondary */
	li	r4,MSR_KERNEL
	FIX_SRR1(r4,r5)
	lis	r3,start_secondary@h
	ori	r3,r3,start_secondary@l
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	SYNC
	RFI
#endif /* CONFIG_SMP */

879 880 881 882
#ifdef CONFIG_KVM_BOOK3S_HANDLER
#include "../kvm/book3s_rmhandlers.S"
#endif

883 884 885 886
/*
 * Those generic dummy functions are kept for CPUs not
 * included in CONFIG_6xx
 */
887
#if !defined(CONFIG_6xx)
888
_ENTRY(__save_cpu_setup)
889
	blr
890
_ENTRY(__restore_cpu_setup)
891
	blr
892
#endif /* !defined(CONFIG_6xx) */
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917


/*
 * Load stuff into the MMU.  Intended to be called with
 * IR=0 and DR=0.
 */
load_up_mmu:
	sync			/* Force all PTE updates to finish */
	isync
	tlbia			/* Clear all TLB entries */
	sync			/* wait for tlbia/tlbie to finish */
	TLBSYNC			/* ... on all CPUs */
	/* Load the SDR1 register (hash table base & size) */
	lis	r6,_SDR1@ha
	tophys(r6,r6)
	lwz	r6,_SDR1@l(r6)
	mtspr	SPRN_SDR1,r6
	li	r0,16		/* load up segment register values */
	mtctr	r0		/* for context 0 */
	lis	r3,0x2000	/* Ku = 1, VSID = 0 */
	li	r4,0
3:	mtsrin	r3,r4
	addi	r3,r3,0x111	/* increment VSID */
	addis	r4,r4,0x1000	/* address of next segment */
	bdnz	3b
918

919 920 921 922 923 924 925 926 927 928 929 930
/* Load the BAT registers with the values set up by MMU_init.
   MMU_init takes care of whether we're on a 601 or not. */
	mfpvr	r3
	srwi	r3,r3,16
	cmpwi	r3,1
	lis	r3,BATS@ha
	addi	r3,r3,BATS@l
	tophys(r3,r3)
	LOAD_BAT(0,r3,r4,r5)
	LOAD_BAT(1,r3,r4,r5)
	LOAD_BAT(2,r3,r4,r5)
	LOAD_BAT(3,r3,r4,r5)
931
BEGIN_MMU_FTR_SECTION
932 933 934 935
	LOAD_BAT(4,r3,r4,r5)
	LOAD_BAT(5,r3,r4,r5)
	LOAD_BAT(6,r3,r4,r5)
	LOAD_BAT(7,r3,r4,r5)
936
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
937 938 939 940 941 942 943 944 945 946 947 948 949
	blr

/*
 * This is where the main kernel code starts.
 */
start_here:
	/* ptr to current */
	lis	r2,init_task@h
	ori	r2,r2,init_task@l
	/* Set up for using our exception vectors */
	/* ptr to phys current thread */
	tophys(r4,r2)
	addi	r4,r4,THREAD	/* init task's THREAD */
950
	mtspr	SPRN_SPRG_THREAD,r4
951
	li	r3,0
952
	mtspr	SPRN_SPRG_RTAS,r3	/* 0 => not in RTAS */
953 954 955 956 957 958 959

	/* stack */
	lis	r1,init_thread_union@ha
	addi	r1,r1,init_thread_union@l
	li	r0,0
	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
/*
960
 * Do early platform-specific initialization,
961 962
 * and set up the MMU.
 */
963 964
	li	r3,0
	mr	r4,r31
965
	bl	machine_init
966
	bl	__save_cpu_setup
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	bl	MMU_init

/*
 * Go back to running unmapped so we can load up new values
 * for SDR1 (hash table pointer) and the segment registers
 * and change to using our exception vectors.
 */
	lis	r4,2f@h
	ori	r4,r4,2f@l
	tophys(r4,r4)
	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
	FIX_SRR1(r3,r5)
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	SYNC
	RFI
/* Load up the kernel context */
2:	bl	load_up_mmu

#ifdef CONFIG_BDI_SWITCH
	/* Add helper information for the Abatron bdiGDB debugger.
	 * We do this here because we know the mmu is disabled, and
	 * will be enabled for real in just a few instructions.
	 */
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	stw	r5, 0xf0(r0)	/* This much match your Abatron config */
	lis	r6, swapper_pg_dir@h
	ori	r6, r6, swapper_pg_dir@l
	tophys(r5, r5)
	stw	r6, 0(r5)
#endif /* CONFIG_BDI_SWITCH */

/* Now turn on the MMU for real! */
	li	r4,MSR_KERNEL
	FIX_SRR1(r4,r5)
	lis	r3,start_kernel@h
	ori	r3,r3,start_kernel@l
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	SYNC
	RFI

/*
1011 1012
 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
 *
1013 1014
 * Set up the segment registers for a new context.
 */
1015 1016 1017 1018
_ENTRY(switch_mmu_context)
	lwz	r3,MMCONTEXTID(r4)
	cmpwi	cr0,r3,0
	blt-	4f
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	mulli	r3,r3,897	/* multiply context by skew factor */
	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
	addis	r3,r3,0x6000	/* Set Ks, Ku bits */
	li	r0,NUM_USER_SEGMENTS
	mtctr	r0

#ifdef CONFIG_BDI_SWITCH
	/* Context switch the PTE pointer for the Abatron BDI2000.
	 * The PGDIR is passed as second argument.
	 */
1029
	lwz	r4,MM_PGD(r4)
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	lis	r5, KERNELBASE@h
	lwz	r5, 0xf0(r5)
	stw	r4, 0x4(r5)
#endif
	li	r4,0
	isync
3:
	mtsrin	r3,r4
	addi	r3,r3,0x111	/* next VSID */
	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
	addis	r4,r4,0x1000	/* address of next segment */
	bdnz	3b
	sync
	isync
	blr
1045 1046 1047
4:	trap
	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
	blr
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080

/*
 * An undocumented "feature" of 604e requires that the v bit
 * be cleared before changing BAT values.
 *
 * Also, newer IBM firmware does not clear bat3 and 4 so
 * this makes sure it's done.
 *  -- Cort
 */
clear_bats:
	li	r10,0
	mfspr	r9,SPRN_PVR
	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
	cmpwi	r9, 1
	beq	1f

	mtspr	SPRN_DBAT0U,r10
	mtspr	SPRN_DBAT0L,r10
	mtspr	SPRN_DBAT1U,r10
	mtspr	SPRN_DBAT1L,r10
	mtspr	SPRN_DBAT2U,r10
	mtspr	SPRN_DBAT2L,r10
	mtspr	SPRN_DBAT3U,r10
	mtspr	SPRN_DBAT3L,r10
1:
	mtspr	SPRN_IBAT0U,r10
	mtspr	SPRN_IBAT0L,r10
	mtspr	SPRN_IBAT1U,r10
	mtspr	SPRN_IBAT1L,r10
	mtspr	SPRN_IBAT2U,r10
	mtspr	SPRN_IBAT2L,r10
	mtspr	SPRN_IBAT3U,r10
	mtspr	SPRN_IBAT3L,r10
1081
BEGIN_MMU_FTR_SECTION
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	/* Here's a tweak: at this point, CPU setup have
	 * not been called yet, so HIGH_BAT_EN may not be
	 * set in HID0 for the 745x processors. However, it
	 * seems that doesn't affect our ability to actually
	 * write to these SPRs.
	 */
	mtspr	SPRN_DBAT4U,r10
	mtspr	SPRN_DBAT4L,r10
	mtspr	SPRN_DBAT5U,r10
	mtspr	SPRN_DBAT5L,r10
	mtspr	SPRN_DBAT6U,r10
	mtspr	SPRN_DBAT6L,r10
	mtspr	SPRN_DBAT7U,r10
	mtspr	SPRN_DBAT7L,r10
	mtspr	SPRN_IBAT4U,r10
	mtspr	SPRN_IBAT4L,r10
	mtspr	SPRN_IBAT5U,r10
	mtspr	SPRN_IBAT5L,r10
	mtspr	SPRN_IBAT6U,r10
	mtspr	SPRN_IBAT6L,r10
	mtspr	SPRN_IBAT7U,r10
	mtspr	SPRN_IBAT7L,r10
1104
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1105 1106 1107 1108 1109 1110
	blr

flush_tlbs:
	lis	r10, 0x40
1:	addic.	r10, r10, -0x1000
	tlbie	r10
1111
	bgt	1b
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	sync
	blr

mmu_off:
 	addi	r4, r3, __after_mmu_off - _start
	mfmsr	r3
	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
	beqlr
	andc	r3,r3,r0
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	sync
	RFI

/*
1127 1128
 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
 * (we keep one for debugging) and on others, we use one 256M BAT.
1129 1130
 */
initial_bats:
1131
	lis	r11,PAGE_OFFSET@h
1132 1133 1134 1135 1136 1137 1138 1139
	mfspr	r9,SPRN_PVR
	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
	cmpwi	0,r9,1
	bne	4f
	ori	r11,r11,4		/* set up BAT registers for 601 */
	li	r8,0x7f			/* valid, block length = 8MB */
	mtspr	SPRN_IBAT0U,r11		/* N.B. 601 has valid bit in */
	mtspr	SPRN_IBAT0L,r8		/* lower BAT register */
1140 1141 1142 1143 1144 1145 1146 1147
	addis	r11,r11,0x800000@h
	addis	r8,r8,0x800000@h
	mtspr	SPRN_IBAT1U,r11
	mtspr	SPRN_IBAT1L,r8
	addis	r11,r11,0x800000@h
	addis	r8,r8,0x800000@h
	mtspr	SPRN_IBAT2U,r11
	mtspr	SPRN_IBAT2L,r8
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	isync
	blr

4:	tophys(r8,r11)
#ifdef CONFIG_SMP
	ori	r8,r8,0x12		/* R/W access, M=1 */
#else
	ori	r8,r8,2			/* R/W access */
#endif /* CONFIG_SMP */
	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */

	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx (not 601) have valid */
	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
	mtspr	SPRN_IBAT0L,r8
	mtspr	SPRN_IBAT0U,r11
	isync
	blr


1167
#ifdef CONFIG_BOOTX_TEXT
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
setup_disp_bat:
	/*
	 * setup the display bat prepared for us in prom.c
	 */
	mflr	r8
	bl	reloc_offset
	mtlr	r8
	addis	r8,r3,disp_BAT@ha
	addi	r8,r8,disp_BAT@l
	cmpwi	cr0,r8,0
	beqlr
	lwz	r11,0(r8)
	lwz	r8,4(r8)
	mfspr	r9,SPRN_PVR
	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
	cmpwi	0,r9,1
	beq	1f
	mtspr	SPRN_DBAT3L,r8
	mtspr	SPRN_DBAT3U,r11
	blr
1:	mtspr	SPRN_IBAT3L,r8
	mtspr	SPRN_IBAT3U,r11
	blr
1191
#endif /* CONFIG_BOOTX_TEXT */
1192

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
setup_cpm_bat:
	lis	r8, 0xf000
	ori	r8, r8,	0x002a
	mtspr	SPRN_DBAT1L, r8

	lis	r11, 0xf000
	ori	r11, r11, (BL_1M << 2) | 2
	mtspr	SPRN_DBAT1U, r11

	blr
#endif

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
setup_usbgecko_bat:
	/* prepare a BAT for early io */
#if defined(CONFIG_GAMECUBE)
	lis	r8, 0x0c00
#elif defined(CONFIG_WII)
	lis	r8, 0x0d00
#else
#error Invalid platform for USB Gecko based early debugging.
#endif
	/*
	 * The virtual address used must match the virtual address
	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
	 */
	lis	r11, 0xfffe	/* top 128K */
	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
	mtspr	SPRN_DBAT1L, r8
	mtspr	SPRN_DBAT1U, r11
	blr
#endif

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
 * We first disable the MMU, and then jump to the ROM reset address.
 *
 * r3 is the board info structure, r4 is the location for starting.
 * I use this for building a small kernel that can load other kernels,
 * rather than trying to write or rely on a rom monitor that can tftp load.
 */
       .globl  m8260_gorom
m8260_gorom:
	mfmsr	r0
	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
	sync
	mtmsr	r0
	sync
	mfspr	r11, SPRN_HID0
	lis	r10, 0
	ori	r10,r10,HID0_ICE|HID0_DCE
	andc	r11, r11, r10
	mtspr	SPRN_HID0, r11
	isync
	li	r5, MSR_ME|MSR_RI
	lis	r6,2f@h
	addis	r6,r6,-KERNELBASE@h
	ori	r6,r6,2f@l
	mtspr	SPRN_SRR0,r6
	mtspr	SPRN_SRR1,r5
	isync
	sync
	rfi
2:
	mtlr	r4
	blr
#endif


/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the data segment,
 * which is page-aligned.
 */
	.data
	.globl	sdata
sdata:
	.globl	empty_zero_page
empty_zero_page:
	.space	4096

	.globl	swapper_pg_dir
swapper_pg_dir:
1278
	.space	PGD_TABLE_SIZE
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293

	.globl intercept_table
intercept_table:
	.long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
	.long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
	.long 0, 0, 0, i0x1300, 0, 0, 0, 0
	.long 0, 0, 0, 0, 0, 0, 0, 0
	.long 0, 0, 0, 0, 0, 0, 0, 0
	.long 0, 0, 0, 0, 0, 0, 0, 0

/* Room for two PTE pointers, usually the kernel and current user pointers
 * to their respective root page table.
 */
abatron_pteptrs:
	.space	8