head_fsl_booke.S 26.0 KB
Newer Older
1 2 3 4
/*
 * Kernel execution entry point code.
 *
 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5
 *	Initial PowerPC version.
6
 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7
 *	Rewritten for PReP
8
 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9
 *	Low-level exception handers, MMU support, and rewrite.
10
 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11
 *	PowerPC 8xx modifications.
12
 *    Copyright (c) 1998-1999 TiVo, Inc.
13
 *	PowerPC 403GCX modifications.
14
 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15
 *	PowerPC 403GCX/405GP modifications.
16 17
 *    Copyright 2000 MontaVista Software Inc.
 *	PPC405 modifications
18 19 20 21
 *	PowerPC 403GCX/405GP modifications.
 *	Author: MontaVista Software, Inc.
 *		frank_rowand@mvista.com or source@mvista.com
 *		debbie_chu@mvista.com
22
 *    Copyright 2002-2004 MontaVista Software, Inc.
23
 *	PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24
 *    Copyright 2004 Freescale Semiconductor, Inc
25
 *	PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include "head_booke.h"

/* As with the other PowerPC ports, it is expected that when code
 * execution begins here, the following registers contain valid, yet
 * optional, information:
 *
 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
 *   r4 - Starting address of the init RAM disk
 *   r5 - Ending address of the init RAM disk
 *   r6 - Start of kernel command line string (e.g. "mem=128")
 *   r7 - End of kernel command line string
 *
 */
55 56 57
	.section	.text.head, "ax"
_ENTRY(_stext);
_ENTRY(_start);
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
	/*
	 * Reserve a word at a fixed location to store the address
	 * of abatron_pteptrs
	 */
	nop
/*
 * Save parameters we are passed
 */
	mr	r31,r3
	mr	r30,r4
	mr	r29,r5
	mr	r28,r6
	mr	r27,r7
	li	r24,0		/* CPU number */

/* We try to not make any assumptions about how the boot loader
 * setup or used the TLBs.  We invalidate all mappings from the
 * boot loader and load a single entry in TLB1[0] to map the
76 77
 * first 64M of kernel memory.  Any boot info passed from the
 * bootloader needs to live in this first 64M.
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
 *
 * Requirement on bootloader:
 *  - The page we're executing in needs to reside in TLB1 and
 *    have IPROT=1.  If not an invalidate broadcast could
 *    evict the entry we're currently executing in.
 *
 *  r3 = Index of TLB1 were executing in
 *  r4 = Current MSR[IS]
 *  r5 = Index of TLB1 temp mapping
 *
 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
 * if needed
 */

/* 1. Find the index of the entry we're executing in */
	bl	invstr				/* Find our address */
invstr:	mflr	r6				/* Make it accessible */
	mfmsr	r7
	rlwinm	r4,r7,27,31,31			/* extract MSR[IS] */
	mfspr	r7, SPRN_PID0
	slwi	r7,r7,16
	or	r7,r7,r4
	mtspr	SPRN_MAS6,r7
	tlbsx	0,r6				/* search MSR[IS], SPID=PID0 */
#ifndef CONFIG_E200
	mfspr	r7,SPRN_MAS1
	andis.	r7,r7,MAS1_VALID@h
	bne	match_TLB
	mfspr	r7,SPRN_PID1
	slwi	r7,r7,16
	or	r7,r7,r4
	mtspr	SPRN_MAS6,r7
	tlbsx	0,r6				/* search MSR[IS], SPID=PID1 */
	mfspr	r7,SPRN_MAS1
	andis.	r7,r7,MAS1_VALID@h
	bne	match_TLB
	mfspr	r7, SPRN_PID2
	slwi	r7,r7,16
	or	r7,r7,r4
	mtspr	SPRN_MAS6,r7
	tlbsx	0,r6				/* Fall through, we had to match */
#endif
match_TLB:
	mfspr	r7,SPRN_MAS0
	rlwinm	r3,r7,16,20,31			/* Extract MAS0(Entry) */

	mfspr	r7,SPRN_MAS1			/* Insure IPROT set */
	oris	r7,r7,MAS1_IPROT@h
	mtspr	SPRN_MAS1,r7
	tlbwe

/* 2. Invalidate all entries except the entry we're executing in */
	mfspr	r9,SPRN_TLB1CFG
	andi.	r9,r9,0xfff
	li	r6,0				/* Set Entry counter to 0 */
1:	lis	r7,0x1000			/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r7,r6,16,4,15			/* Setup MAS0 = TLBSEL | ESEL(r6) */
	mtspr	SPRN_MAS0,r7
	tlbre
	mfspr	r7,SPRN_MAS1
	rlwinm	r7,r7,0,2,31			/* Clear MAS1 Valid and IPROT */
	cmpw	r3,r6
	beq	skpinv				/* Dont update the current execution TLB */
	mtspr	SPRN_MAS1,r7
	tlbwe
	isync
skpinv:	addi	r6,r6,1				/* Increment */
	cmpw	r6,r9				/* Are we done? */
	bne	1b				/* If not, repeat */

	/* Invalidate TLB0 */
149
	li	r6,0x04
150 151 152 153 154
	tlbivax 0,r6
#ifdef CONFIG_SMP
	tlbsync
#endif
	/* Invalidate TLB1 */
155
	li	r6,0x0c
156 157 158 159 160 161 162 163 164 165 166 167 168 169
	tlbivax 0,r6
#ifdef CONFIG_SMP
	tlbsync
#endif
	msync

/* 3. Setup a temp mapping and jump to it */
	andi.	r5, r3, 0x1	/* Find an entry not used and is non-zero */
	addi	r5, r5, 0x1
	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
	mtspr	SPRN_MAS0,r7
	tlbre

170
	/* Just modify the entry ID, EPN and RPN for the temp mapping */
171 172 173 174 175 176 177 178 179
	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
	mtspr	SPRN_MAS0,r7
	xori	r6,r4,1		/* Setup TMP mapping in the other Address space */
	slwi	r6,r6,12
	oris	r6,r6,(MAS1_VALID|MAS1_IPROT)@h
	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
	mtspr	SPRN_MAS1,r6
	mfspr	r6,SPRN_MAS2
180
	lis	r7,PHYSICAL_START@h
181 182
	rlwimi	r7,r6,0,20,31
	mtspr	SPRN_MAS2,r7
183 184 185
	mfspr	r6,SPRN_MAS3
	rlwimi	r7,r6,0,20,31
	mtspr	SPRN_MAS3,r7
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	tlbwe

	xori	r6,r4,1
	slwi	r6,r6,5		/* setup new context with other address space */
	bl	1f		/* Find our address */
1:	mflr	r9
	rlwimi	r7,r9,0,20,31
	addi	r7,r7,24
	mtspr	SPRN_SRR0,r7
	mtspr	SPRN_SRR1,r6
	rfi

/* 4. Clear out PIDs & Search info */
	li	r6,0
	mtspr	SPRN_PID0,r6
#ifndef CONFIG_E200
	mtspr	SPRN_PID1,r6
	mtspr	SPRN_PID2,r6
#endif
	mtspr	SPRN_MAS6,r6

/* 5. Invalidate mapping we started in */
	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
	mtspr	SPRN_MAS0,r7
	tlbre
212 213
	mfspr	r6,SPRN_MAS1
	rlwinm	r6,r6,0,2,0	/* clear IPROT */
214 215 216
	mtspr	SPRN_MAS1,r6
	tlbwe
	/* Invalidate TLB1 */
217
	li	r9,0x0c
218 219 220 221 222 223 224 225 226 227
	tlbivax 0,r9
#ifdef CONFIG_SMP
	tlbsync
#endif
	msync

/* 6. Setup KERNELBASE mapping in TLB1[0] */
	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
	mtspr	SPRN_MAS0,r6
	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
228
	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
229 230
	mtspr	SPRN_MAS1,r6
	li	r7,0
231 232
	lis	r6,PAGE_OFFSET@h
	ori	r6,r6,PAGE_OFFSET@l
233 234 235 236 237 238 239
	rlwimi	r6,r7,0,20,31
	mtspr	SPRN_MAS2,r6
	li	r7,(MAS3_SX|MAS3_SW|MAS3_SR)
	mtspr	SPRN_MAS3,r7
	tlbwe

/* 7. Jump to KERNELBASE mapping */
240 241 242
	lis	r6,KERNELBASE@h
	ori	r6,r6,KERNELBASE@l
	rlwimi	r6,r7,0,20,31
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	lis	r7,MSR_KERNEL@h
	ori	r7,r7,MSR_KERNEL@l
	bl	1f			/* Find our address */
1:	mflr	r9
	rlwimi	r6,r9,0,20,31
	addi	r6,r6,24
	mtspr	SPRN_SRR0,r6
	mtspr	SPRN_SRR1,r7
	rfi				/* start execution out of TLB1[0] entry */

/* 8. Clear out the temp mapping */
	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
	mtspr	SPRN_MAS0,r7
	tlbre
258 259
	mfspr	r8,SPRN_MAS1
	rlwinm	r8,r8,0,2,0	/* clear IPROT */
260 261 262
	mtspr	SPRN_MAS1,r8
	tlbwe
	/* Invalidate TLB1 */
263
	li	r9,0x0c
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	tlbivax 0,r9
#ifdef CONFIG_SMP
	tlbsync
#endif
	msync

	/* Establish the interrupt vector offsets */
	SET_IVOR(0,  CriticalInput);
	SET_IVOR(1,  MachineCheck);
	SET_IVOR(2,  DataStorage);
	SET_IVOR(3,  InstructionStorage);
	SET_IVOR(4,  ExternalInput);
	SET_IVOR(5,  Alignment);
	SET_IVOR(6,  Program);
	SET_IVOR(7,  FloatingPointUnavailable);
	SET_IVOR(8,  SystemCall);
	SET_IVOR(9,  AuxillaryProcessorUnavailable);
	SET_IVOR(10, Decrementer);
	SET_IVOR(11, FixedIntervalTimer);
	SET_IVOR(12, WatchdogTimer);
	SET_IVOR(13, DataTLBError);
	SET_IVOR(14, InstructionTLBError);
	SET_IVOR(15, Debug);
	SET_IVOR(32, SPEUnavailable);
	SET_IVOR(33, SPEFloatingPointData);
	SET_IVOR(34, SPEFloatingPointRound);
#ifndef CONFIG_E200
	SET_IVOR(35, PerformanceMonitor);
#endif

	/* Establish the interrupt vector base */
	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
	mtspr	SPRN_IVPR,r4

	/* Setup the defaults for TLB entries */
	li	r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
#ifdef CONFIG_E200
	oris	r2,r2,MAS4_TLBSELD(1)@h
#endif
303
	mtspr	SPRN_MAS4, r2
304 305 306 307 308 309 310 311 312 313

#if 0
	/* Enable DOZE */
	mfspr	r2,SPRN_HID0
	oris	r2,r2,HID0_DOZE@h
	mtspr	SPRN_HID0, r2
#endif
#ifdef CONFIG_E200
	/* enable dedicated debug exception handling resources (Debug APU) */
	mfspr	r2,SPRN_HID0
314
	ori	r2,r2,HID0_DAPUEN@l
315 316 317 318 319 320 321 322 323 324
	mtspr	SPRN_HID0,r2
#endif

#if !defined(CONFIG_BDI_SWITCH)
	/*
	 * The Abatron BDI JTAG debugger does not tolerate others
	 * mucking with the debug registers.
	 */
	lis	r2,DBCR0_IDM@h
	mtspr	SPRN_DBCR0,r2
325
	isync
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	/* clear any residual debug events */
	li	r2,-1
	mtspr	SPRN_DBSR,r2
#endif

	/*
	 * This is where the main kernel code starts.
	 */

	/* ptr to current */
	lis	r2,init_task@h
	ori	r2,r2,init_task@l

	/* ptr to current thread */
	addi	r4,r2,THREAD	/* init task's THREAD */
	mtspr	SPRN_SPRG3,r4

	/* stack */
	lis	r1,init_thread_union@h
	ori	r1,r1,init_thread_union@l
	li	r0,0
	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)

	bl	early_init

	mfspr	r3,SPRN_TLB1CFG
	andi.	r3,r3,0xfff
	lis	r4,num_tlbcam_entries@ha
	stw	r3,num_tlbcam_entries@l(r4)
/*
 * Decide what sort of machine this is and initialize the MMU.
 */
	mr	r3,r31
	mr	r4,r30
	mr	r5,r29
	mr	r6,r28
	mr	r7,r27
	bl	machine_init
	bl	MMU_init

	/* Setup PTE pointers for the Abatron bdiGDB */
	lis	r6, swapper_pg_dir@h
	ori	r6, r6, swapper_pg_dir@l
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	lis	r4, KERNELBASE@h
	ori	r4, r4, KERNELBASE@l
	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
	stw	r6, 0(r5)

	/* Let's move on */
	lis	r4,start_kernel@h
	ori	r4,r4,start_kernel@l
	lis	r3,MSR_KERNEL@h
	ori	r3,r3,MSR_KERNEL@l
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	rfi			/* change context and jump to start_kernel */

/* Macros to hide the PTE size differences
 *
 * FIND_PTE -- walks the page tables given EA & pgdir pointer
 *   r10 -- EA of fault
 *   r11 -- PGDIR pointer
 *   r12 -- free
 *   label 2: is the bailout case
 *
 * if we find the pte (fall through):
 *   r11 is low pte word
 *   r12 is pointer to the pte
 */
#ifdef CONFIG_PTE_64BIT
#define PTE_FLAGS_OFFSET	4
#define FIND_PTE	\
400
	rlwinm	r12, r10, 13, 19, 29;	/* Compute pgdir/pmd offset */	\
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	lwzx	r11, r12, r11;		/* Get pgd/pmd entry */		\
	rlwinm.	r12, r11, 0, 0, 20;	/* Extract pt base address */	\
	beq	2f;			/* Bail if no table */		\
	rlwimi	r12, r10, 23, 20, 28;	/* Compute pte address */	\
	lwz	r11, 4(r12);		/* Get pte entry */
#else
#define PTE_FLAGS_OFFSET	0
#define FIND_PTE	\
	rlwimi	r11, r10, 12, 20, 29;	/* Create L1 (pgdir/pmd) address */	\
	lwz	r11, 0(r11);		/* Get L1 entry */			\
	rlwinm.	r12, r11, 0, 0, 19;	/* Extract L2 (pte) base address */	\
	beq	2f;			/* Bail if no table */			\
	rlwimi	r12, r10, 22, 20, 29;	/* Compute PTE address */		\
	lwz	r11, 0(r12);		/* Get Linux PTE */
#endif

/*
 * Interrupt vector entry code
 *
 * The Book E MMUs are always on so we don't need to handle
 * interrupts in real mode as with previous PPC processors. In
 * this case we handle interrupts in the kernel virtual address
 * space.
 *
 * Interrupt vectors are dynamically placed relative to the
 * interrupt prefix as determined by the address of interrupt_base.
 * The interrupt vectors offsets are programmed using the labels
 * for each interrupt vector entry.
 *
 * Interrupt vectors must be aligned on a 16 byte boundary.
 * We align on a 32 byte cache line boundary for good measure.
 */

interrupt_base:
	/* Critical Input Interrupt */
436
	CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
437 438 439 440

	/* Machine Check Interrupt */
#ifdef CONFIG_E200
	/* no RFMCI, MCSRRs on E200 */
441
	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
442
#else
443
	MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
#endif

	/* Data Storage Interrupt */
	START_EXCEPTION(DataStorage)
	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
	mtspr	SPRN_SPRG1, r11
	mtspr	SPRN_SPRG4W, r12
	mtspr	SPRN_SPRG5W, r13
	mfcr	r11
	mtspr	SPRN_SPRG7W, r11

	/*
	 * Check if it was a store fault, if not then bail
	 * because a user tried to access a kernel or
	 * read-protected page.  Otherwise, get the
	 * offending address and handle it.
	 */
	mfspr	r10, SPRN_ESR
	andis.	r10, r10, ESR_ST@h
	beq	2f

	mfspr	r10, SPRN_DEAR		/* Get faulting address */

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
470
	lis	r11, PAGE_OFFSET@h
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	cmplw	0, r10, r11
	bge	2f

	/* Get the PGD for the current thread */
3:
	mfspr	r11,SPRN_SPRG3
	lwz	r11,PGDIR(r11)
4:
	FIND_PTE

	/* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
	andi.	r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
	cmpwi	0, r13, _PAGE_RW|_PAGE_USER
	bne	2f			/* Bail if not */

	/* Update 'changed'. */
	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
	stw	r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */

	/* MAS2 not updated as the entry does exist in the tlb, this
	   fault taken to detect state transition (eg: COW -> DIRTY)
	 */
	andi.	r11, r11, _PAGE_HWEXEC
	rlwimi	r11, r11, 31, 27, 27	/* SX <- _PAGE_HWEXEC */
495
	ori	r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550

	/* update search PID in MAS6, AS = 0 */
	mfspr	r12, SPRN_PID0
	slwi	r12, r12, 16
	mtspr	SPRN_MAS6, r12

	/* find the TLB index that caused the fault.  It has to be here. */
	tlbsx	0, r10

	/* only update the perm bits, assume the RPN is fine */
	mfspr	r12, SPRN_MAS3
	rlwimi	r12, r11, 0, 20, 31
	mtspr	SPRN_MAS3,r12
	tlbwe

	/* Done...restore registers and get out of here.  */
	mfspr	r11, SPRN_SPRG7R
	mtcr	r11
	mfspr	r13, SPRN_SPRG5R
	mfspr	r12, SPRN_SPRG4R
	mfspr	r11, SPRN_SPRG1
	mfspr	r10, SPRN_SPRG0
	rfi			/* Force context change */

2:
	/*
	 * The bailout.  Restore registers to pre-exception conditions
	 * and call the heavyweights to help us out.
	 */
	mfspr	r11, SPRN_SPRG7R
	mtcr	r11
	mfspr	r13, SPRN_SPRG5R
	mfspr	r12, SPRN_SPRG4R
	mfspr	r11, SPRN_SPRG1
	mfspr	r10, SPRN_SPRG0
	b	data_access

	/* Instruction Storage Interrupt */
	INSTRUCTION_STORAGE_EXCEPTION

	/* External Input Interrupt */
	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)

	/* Alignment Interrupt */
	ALIGNMENT_EXCEPTION

	/* Program Interrupt */
	PROGRAM_EXCEPTION

	/* Floating Point Unavailable Interrupt */
#ifdef CONFIG_PPC_FPU
	FP_UNAVAILABLE_EXCEPTION
#else
#ifdef CONFIG_E200
	/* E200 treats 'normal' floating point instructions as FP Unavail exception */
551
	EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
552
#else
553
	EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
554 555 556 557 558 559 560 561 562
#endif
#endif

	/* System Call Interrupt */
	START_EXCEPTION(SystemCall)
	NORMAL_EXCEPTION_PROLOG
	EXC_XFER_EE_LITE(0x0c00, DoSyscall)

	/* Auxillary Processor Unavailable Interrupt */
563
	EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
564 565 566 567 568 569

	/* Decrementer Interrupt */
	DECREMENTER_EXCEPTION

	/* Fixed Internal Timer Interrupt */
	/* TODO: Add FIT support */
570
	EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
571 572 573 574 575

	/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
#else
576
	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
#endif

	/* Data TLB Error Interrupt */
	START_EXCEPTION(DataTLBError)
	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
	mtspr	SPRN_SPRG1, r11
	mtspr	SPRN_SPRG4W, r12
	mtspr	SPRN_SPRG5W, r13
	mfcr	r11
	mtspr	SPRN_SPRG7W, r11
	mfspr	r10, SPRN_DEAR		/* Get faulting address */

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
592
	lis	r11, PAGE_OFFSET@h
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	cmplw	5, r10, r11
	blt	5, 3f
	lis	r11, swapper_pg_dir@h
	ori	r11, r11, swapper_pg_dir@l

	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
	rlwinm	r12,r12,0,16,1
	mtspr	SPRN_MAS1,r12

	b	4f

	/* Get the PGD for the current thread */
3:
	mfspr	r11,SPRN_SPRG3
	lwz	r11,PGDIR(r11)

4:
	FIND_PTE
	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
	beq	2f			/* Bail if not present */

#ifdef CONFIG_PTE_64BIT
	lwz	r13, 0(r12)
#endif
	ori	r11, r11, _PAGE_ACCESSED
	stw	r11, PTE_FLAGS_OFFSET(r12)

	 /* Jump to common tlb load */
	b	finish_tlb_load
2:
	/* The bailout.  Restore registers to pre-exception conditions
	 * and call the heavyweights to help us out.
	 */
	mfspr	r11, SPRN_SPRG7R
	mtcr	r11
	mfspr	r13, SPRN_SPRG5R
	mfspr	r12, SPRN_SPRG4R
	mfspr	r11, SPRN_SPRG1
	mfspr	r10, SPRN_SPRG0
	b	data_access

	/* Instruction TLB Error Interrupt */
	/*
	 * Nearly the same as above, except we get our
	 * information from different registers and bailout
	 * to a different point.
	 */
	START_EXCEPTION(InstructionTLBError)
	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
	mtspr	SPRN_SPRG1, r11
	mtspr	SPRN_SPRG4W, r12
	mtspr	SPRN_SPRG5W, r13
	mfcr	r11
	mtspr	SPRN_SPRG7W, r11
	mfspr	r10, SPRN_SRR0		/* Get faulting address */

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
652
	lis	r11, PAGE_OFFSET@h
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	cmplw	5, r10, r11
	blt	5, 3f
	lis	r11, swapper_pg_dir@h
	ori	r11, r11, swapper_pg_dir@l

	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
	rlwinm	r12,r12,0,16,1
	mtspr	SPRN_MAS1,r12

	b	4f

	/* Get the PGD for the current thread */
3:
	mfspr	r11,SPRN_SPRG3
	lwz	r11,PGDIR(r11)

4:
	FIND_PTE
	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
	beq	2f			/* Bail if not present */

#ifdef CONFIG_PTE_64BIT
	lwz	r13, 0(r12)
#endif
	ori	r11, r11, _PAGE_ACCESSED
	stw	r11, PTE_FLAGS_OFFSET(r12)

	/* Jump to common TLB load point */
	b	finish_tlb_load

2:
	/* The bailout.  Restore registers to pre-exception conditions
	 * and call the heavyweights to help us out.
	 */
	mfspr	r11, SPRN_SPRG7R
	mtcr	r11
	mfspr	r13, SPRN_SPRG5R
	mfspr	r12, SPRN_SPRG4R
	mfspr	r11, SPRN_SPRG1
	mfspr	r10, SPRN_SPRG0
	b	InstructionStorage

#ifdef CONFIG_SPE
	/* SPE Unavailable */
	START_EXCEPTION(SPEUnavailable)
	NORMAL_EXCEPTION_PROLOG
	bne	load_up_spe
700
	addi	r3,r1,STACK_FRAME_OVERHEAD
701 702
	EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
703
	EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
704 705 706 707 708 709
#endif /* CONFIG_SPE */

	/* SPE Floating Point Data */
#ifdef CONFIG_SPE
	EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
#else
710
	EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
711 712 713
#endif /* CONFIG_SPE */

	/* SPE Floating Point Round */
714
	EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
715 716

	/* Performance Monitor */
717
	EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746


	/* Debug Interrupt */
	DEBUG_EXCEPTION

/*
 * Local functions
 */

	/*
	 * Data TLB exceptions will bail out to this point
	 * if they can't resolve the lightweight TLB fault.
	 */
data_access:
	NORMAL_EXCEPTION_PROLOG
	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
	stw	r5,_ESR(r11)
	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h
	bne	1f
	EXC_XFER_EE_LITE(0x0300, handle_page_fault)
1:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	EXC_XFER_EE_LITE(0x0300, CacheLockingException)

/*

 * Both the instruction and data TLB miss get to this
 * point to load the TLB.
747 748 749
 *	r10 - EA of fault
 *	r11 - TLB (info from Linux PTE)
 *	r12, r13 - available to use
750
 *	CR5 - results of addr >= PAGE_OFFSET
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
 *	MAS0, MAS1 - loaded with proper value when we get here
 *	MAS2, MAS3 - will need additional info from Linux PTE
 *	Upon exit, we reload everything and RFI.
 */
finish_tlb_load:
	/*
	 * We set execute, because we don't have the granularity to
	 * properly set this at the page level (Linux problem).
	 * Many of these bits are software only.  Bits we don't set
	 * here we (properly should) assume have the appropriate value.
	 */

	mfspr	r12, SPRN_MAS2
#ifdef CONFIG_PTE_64BIT
	rlwimi	r12, r11, 26, 24, 31	/* extract ...WIMGE from pte */
#else
	rlwimi	r12, r11, 26, 27, 31	/* extract WIMGE from pte */
#endif
	mtspr	SPRN_MAS2, r12

	bge	5, 1f

	/* is user addr */
	andi.	r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
	andi.	r10, r11, _PAGE_USER	/* Test for _PAGE_USER */
	srwi	r10, r12, 1
	or	r12, r12, r10	/* Copy user perms into supervisor */
	iseleq	r12, 0, r12
	b	2f

	/* is kernel addr */
1:	rlwinm	r12, r11, 31, 29, 29	/* Extract _PAGE_HWWRITE into SW */
	ori	r12, r12, (MAS3_SX | MAS3_SR)

#ifdef CONFIG_PTE_64BIT
2:	rlwimi	r12, r13, 24, 0, 7	/* grab RPN[32:39] */
	rlwimi	r12, r11, 24, 8, 19	/* grab RPN[40:51] */
	mtspr	SPRN_MAS3, r12
BEGIN_FTR_SECTION
	srwi	r10, r13, 8		/* grab RPN[8:31] */
	mtspr	SPRN_MAS7, r10
END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
#else
2:	rlwimi	r11, r12, 0, 20, 31	/* Extract RPN from PTE and merge with perms */
	mtspr	SPRN_MAS3, r11
#endif
#ifdef CONFIG_E200
	/* Round robin TLB1 entries assignment */
	mfspr	r12, SPRN_MAS0

	/* Extract TLB1CFG(NENTRY) */
	mfspr	r11, SPRN_TLB1CFG
	andi.	r11, r11, 0xfff

	/* Extract MAS0(NV) */
	andi.	r13, r12, 0xfff
	addi	r13, r13, 1
	cmpw	0, r13, r11
	addi	r12, r12, 1

	/* check if we need to wrap */
	blt	7f

	/* wrap back to first free tlbcam entry */
	lis	r13, tlbcam_index@ha
	lwz	r13, tlbcam_index@l(r13)
	rlwimi	r12, r13, 0, 20, 31
7:
819
	mtspr	SPRN_MAS0,r12
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
#endif /* CONFIG_E200 */

	tlbwe

	/* Done...restore registers and get out of here.  */
	mfspr	r11, SPRN_SPRG7R
	mtcr	r11
	mfspr	r13, SPRN_SPRG5R
	mfspr	r12, SPRN_SPRG4R
	mfspr	r11, SPRN_SPRG1
	mfspr	r10, SPRN_SPRG0
	rfi					/* Force context change */

#ifdef CONFIG_SPE
/* Note that the SPE support is closely modeled after the AltiVec
 * support.  Changes to one are likely to be applicable to the
 * other!  */
load_up_spe:
/*
 * Disable SPE for the task which had SPE previously,
 * and save its SPE registers in its thread_struct.
 * Enables SPE for use in the kernel on return.
 * On SMP we know the SPE units are free, since we give it up every
 * switch.  -- Kumar
 */
	mfmsr	r5
	oris	r5,r5,MSR_SPE@h
	mtmsr	r5			/* enable use of SPE now */
	isync
/*
 * For SMP, we don't do lazy SPE switching because it just gets too
 * horrendously complex, especially when a task switches from one CPU
 * to another.  Instead we call giveup_spe in switch_to.
 */
#ifndef CONFIG_SMP
	lis	r3,last_task_used_spe@ha
	lwz	r4,last_task_used_spe@l(r3)
	cmpi	0,r4,0
	beq	1f
	addi	r4,r4,THREAD	/* want THREAD of last_task_used_spe */
	SAVE_32EVRS(0,r10,r4)
861
	evxor	evr10, evr10, evr10	/* clear out evr10 */
862 863
	evmwumiaa evr10, evr10, evr10	/* evr10 <- ACC = 0 * 0 + ACC */
	li	r5,THREAD_ACC
864
	evstddx	evr10, r4, r5		/* save off accumulator */
865 866 867 868 869 870
	lwz	r5,PT_REGS(r4)
	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r10,MSR_SPE@h
	andc	r4,r4,r10	/* disable SPE for previous task */
	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
871
#endif /* !CONFIG_SMP */
872 873 874 875 876 877 878 879 880 881 882 883
	/* enable use of SPE after return */
	oris	r9,r9,MSR_SPE@h
	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
	li	r4,1
	li	r10,THREAD_ACC
	stw	r4,THREAD_USED_SPE(r5)
	evlddx	evr4,r10,r5
	evmra	evr4,evr4
	REST_32EVRS(0,r10,r5)
#ifndef CONFIG_SMP
	subi	r4,r5,THREAD
	stw	r4,last_task_used_spe@l(r3)
884
#endif /* !CONFIG_SMP */
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	/* restore registers and return */
2:	REST_4GPRS(3, r11)
	lwz	r10,_CCR(r11)
	REST_GPR(1, r11)
	mtcr	r10
	lwz	r10,_LINK(r11)
	mtlr	r10
	REST_GPR(10, r11)
	mtspr	SPRN_SRR1,r9
	mtspr	SPRN_SRR0,r12
	REST_GPR(9, r11)
	REST_GPR(12, r11)
	lwz	r11,GPR11(r11)
	rfi

/*
 * SPE unavailable trap from kernel - print a message, but let
 * the task use SPE in the kernel until it returns to user mode.
 */
KernelSPE:
	lwz	r3,_MSR(r1)
	oris	r3,r3,MSR_SPE@h
	stw	r3,_MSR(r1)	/* enable use of SPE after return */
	lis	r3,87f@h
	ori	r3,r3,87f@l
	mr	r4,r2		/* current */
	lwz	r5,_NIP(r1)
	bl	printk
	b	ret_from_except
87:	.string	"SPE used in kernel  (task=%p, pc=%x)  \n"
	.align	4,0

#endif /* CONFIG_SPE */

/*
 * Global functions
 */

/*
 * extern void loadcam_entry(unsigned int index)
 *
 * Load TLBCAM[index] entry in to the L2 CAM MMU
 */
_GLOBAL(loadcam_entry)
	lis	r4,TLBCAM@ha
	addi	r4,r4,TLBCAM@l
	mulli	r5,r3,20
	add	r3,r5,r4
	lwz	r4,0(r3)
	mtspr	SPRN_MAS0,r4
	lwz	r4,4(r3)
	mtspr	SPRN_MAS1,r4
	lwz	r4,8(r3)
	mtspr	SPRN_MAS2,r4
	lwz	r4,12(r3)
	mtspr	SPRN_MAS3,r4
	tlbwe
	isync
	blr

/*
 * extern void giveup_altivec(struct task_struct *prev)
 *
 * The e500 core does not have an AltiVec unit.
 */
_GLOBAL(giveup_altivec)
	blr

#ifdef CONFIG_SPE
/*
 * extern void giveup_spe(struct task_struct *prev)
 *
 */
_GLOBAL(giveup_spe)
	mfmsr	r5
	oris	r5,r5,MSR_SPE@h
	mtmsr	r5			/* enable use of SPE now */
	isync
	cmpi	0,r3,0
	beqlr-				/* if no previous owner, done */
	addi	r3,r3,THREAD		/* want THREAD of task */
	lwz	r5,PT_REGS(r3)
	cmpi	0,r5,0
	SAVE_32EVRS(0, r4, r3)
969
	evxor	evr6, evr6, evr6	/* clear out evr6 */
970 971
	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
	li	r4,THREAD_ACC
972
	evstddx	evr6, r4, r3		/* save off accumulator */
973 974 975 976 977 978 979 980 981 982 983 984
	mfspr	r6,SPRN_SPEFSCR
	stw	r6,THREAD_SPEFSCR(r3)	/* save spefscr register value */
	beq	1f
	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r3,MSR_SPE@h
	andc	r4,r4,r3		/* disable SPE for previous task */
	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
	li	r5,0
	lis	r4,last_task_used_spe@ha
	stw	r5,last_task_used_spe@l(r4)
985
#endif /* !CONFIG_SMP */
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	blr
#endif /* CONFIG_SPE */

/*
 * extern void giveup_fpu(struct task_struct *prev)
 *
 * Not all FSL Book-E cores have an FPU
 */
#ifndef CONFIG_PPC_FPU
_GLOBAL(giveup_fpu)
	blr
#endif

/*
 * extern void abort(void)
 *
 * At present, this routine just applies a system reset.
 */
_GLOBAL(abort)
	li	r13,0
1006
	mtspr	SPRN_DBCR0,r13		/* disable all debug events */
1007
	isync
1008 1009 1010
	mfmsr	r13
	ori	r13,r13,MSR_DE@l	/* Enable Debug Events */
	mtmsr	r13
1011
	isync
1012 1013 1014
	mfspr	r13,SPRN_DBCR0
	lis	r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
	mtspr	SPRN_DBCR0,r13
1015
	isync
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035

_GLOBAL(set_context)

#ifdef CONFIG_BDI_SWITCH
	/* Context switch the PTE pointer for the Abatron BDI2000.
	 * The PGDIR is the second parameter.
	 */
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	stw	r4, 0x4(r5)
#endif
	mtspr	SPRN_PID,r3
	isync			/* Force context change */
	blr

/*
 * We put a few things here that have to be page-aligned. This stuff
 * goes at the beginning of the data segment, which is page-aligned.
 */
	.data
1036 1037 1038 1039 1040
	.align	12
	.globl	sdata
sdata:
	.globl	empty_zero_page
empty_zero_page:
1041
	.space	4096
1042 1043
	.globl	swapper_pg_dir
swapper_pg_dir:
1044
	.space	PGD_TABLE_SIZE
1045 1046 1047 1048

/* Reserved 4k for the critical exception stack & 4k for the machine
 * check stack per CPU for kernel mode exceptions */
	.section .bss
1049
	.align 12
1050 1051
exception_stack_bottom:
	.space	BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1052 1053
	.globl	exception_stack_top
exception_stack_top:
1054 1055 1056 1057 1058 1059 1060

/*
 * Room for two PTE pointers, usually the kernel and current user pointers
 * to their respective root page table.
 */
abatron_pteptrs:
	.space	8