head_fsl_booke.S 32.4 KB
Newer Older
1 2 3 4
/*
 * Kernel execution entry point code.
 *
 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5
 *	Initial PowerPC version.
6
 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7
 *	Rewritten for PReP
8
 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9
 *	Low-level exception handers, MMU support, and rewrite.
10
 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11
 *	PowerPC 8xx modifications.
12
 *    Copyright (c) 1998-1999 TiVo, Inc.
13
 *	PowerPC 403GCX modifications.
14
 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15
 *	PowerPC 403GCX/405GP modifications.
16 17
 *    Copyright 2000 MontaVista Software Inc.
 *	PPC405 modifications
18 19 20 21
 *	PowerPC 403GCX/405GP modifications.
 *	Author: MontaVista Software, Inc.
 *		frank_rowand@mvista.com or source@mvista.com
 *		debbie_chu@mvista.com
22
 *    Copyright 2002-2004 MontaVista Software, Inc.
23
 *	PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24
 *    Copyright 2004 Freescale Semiconductor, Inc
25
 *	PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
26 27 28 29 30 31 32
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

33
#include <linux/init.h>
34 35 36 37 38 39 40 41 42
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
43
#include <asm/cache.h>
44
#include <asm/ptrace.h>
45 46 47 48 49 50 51 52 53 54 55 56 57
#include "head_booke.h"

/* As with the other PowerPC ports, it is expected that when code
 * execution begins here, the following registers contain valid, yet
 * optional, information:
 *
 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
 *   r4 - Starting address of the init RAM disk
 *   r5 - Ending address of the init RAM disk
 *   r6 - Start of kernel command line string (e.g. "mem=128")
 *   r7 - End of kernel command line string
 *
 */
58
	__HEAD
59 60
_ENTRY(_stext);
_ENTRY(_start);
61 62 63 64 65
	/*
	 * Reserve a word at a fixed location to store the address
	 * of abatron_pteptrs
	 */
	nop
66 67

	/* Translate device tree address to physical, save in r30/r31 */
68 69 70
	bl	get_phys_addr
	mr	r30,r3
	mr	r31,r4
71 72 73 74

	li	r25,0			/* phys kernel start (low) */
	li	r24,0			/* CPU number */
	li	r23,0			/* phys kernel start (high) */
75

76 77 78 79 80 81 82 83
#ifdef CONFIG_RELOCATABLE
	LOAD_REG_ADDR_PIC(r3, _stext)	/* Get our current runtime base */

	/* Translate _stext address to physical, save in r23/r25 */
	bl	get_phys_addr
	mr	r23,r3
	mr	r25,r4

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	bl	0f
0:	mflr	r8
	addis	r3,r8,(is_second_reloc - 0b)@ha
	lwz	r19,(is_second_reloc - 0b)@l(r3)

	/* Check if this is the second relocation. */
	cmpwi	r19,1
	bne	1f

	/*
	 * For the second relocation, we already get the real memstart_addr
	 * from device tree. So we will map PAGE_OFFSET to memstart_addr,
	 * then the virtual address of start kernel should be:
	 *          PAGE_OFFSET + (kernstart_addr - memstart_addr)
	 * Since the offset between kernstart_addr and memstart_addr should
	 * never be beyond 1G, so we can just use the lower 32bit of them
	 * for the calculation.
	 */
	lis	r3,PAGE_OFFSET@h

	addis	r4,r8,(kernstart_addr - 0b)@ha
	addi	r4,r4,(kernstart_addr - 0b)@l
	lwz	r5,4(r4)

	addis	r6,r8,(memstart_addr - 0b)@ha
	addi	r6,r6,(memstart_addr - 0b)@l
	lwz	r7,4(r6)

	subf	r5,r7,r5
	add	r3,r3,r5
	b	2f

1:
117 118 119 120 121 122 123 124 125 126 127 128 129
	/*
	 * We have the runtime (virutal) address of our base.
	 * We calculate our shift of offset from a 64M page.
	 * We could map the 64M page we belong to at PAGE_OFFSET and
	 * get going from there.
	 */
	lis	r4,KERNELBASE@h
	ori	r4,r4,KERNELBASE@l
	rlwinm	r6,r25,0,0x3ffffff		/* r6 = PHYS_START % 64M */
	rlwinm	r5,r4,0,0x3ffffff		/* r5 = KERNELBASE % 64M */
	subf	r3,r5,r6			/* r3 = r6 - r5 */
	add	r3,r4,r3			/* Required Virtual Address */

130 131 132 133 134 135 136 137
2:	bl	relocate

	/*
	 * For the second relocation, we already set the right tlb entries
	 * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
	*/
	cmpwi	r19,1
	beq	set_ivor
138 139
#endif

140 141 142
/* We try to not make any assumptions about how the boot loader
 * setup or used the TLBs.  We invalidate all mappings from the
 * boot loader and load a single entry in TLB1[0] to map the
143 144
 * first 64M of kernel memory.  Any boot info passed from the
 * bootloader needs to live in this first 64M.
145 146 147 148 149 150 151 152 153 154 155 156 157 158
 *
 * Requirement on bootloader:
 *  - The page we're executing in needs to reside in TLB1 and
 *    have IPROT=1.  If not an invalidate broadcast could
 *    evict the entry we're currently executing in.
 *
 *  r3 = Index of TLB1 were executing in
 *  r4 = Current MSR[IS]
 *  r5 = Index of TLB1 temp mapping
 *
 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
 * if needed
 */

159
_ENTRY(__early_start)
160

161
#define ENTRY_MAPPING_BOOT_SETUP
162
#include "fsl_booke_entry_mapping.S"
163
#undef ENTRY_MAPPING_BOOT_SETUP
164

165
set_ivor:
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	/* Establish the interrupt vector offsets */
	SET_IVOR(0,  CriticalInput);
	SET_IVOR(1,  MachineCheck);
	SET_IVOR(2,  DataStorage);
	SET_IVOR(3,  InstructionStorage);
	SET_IVOR(4,  ExternalInput);
	SET_IVOR(5,  Alignment);
	SET_IVOR(6,  Program);
	SET_IVOR(7,  FloatingPointUnavailable);
	SET_IVOR(8,  SystemCall);
	SET_IVOR(9,  AuxillaryProcessorUnavailable);
	SET_IVOR(10, Decrementer);
	SET_IVOR(11, FixedIntervalTimer);
	SET_IVOR(12, WatchdogTimer);
	SET_IVOR(13, DataTLBError);
	SET_IVOR(14, InstructionTLBError);
182
	SET_IVOR(15, DebugCrit);
183 184 185 186 187 188

	/* Establish the interrupt vector base */
	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
	mtspr	SPRN_IVPR,r4

	/* Setup the defaults for TLB entries */
189
	li	r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
190 191 192
#ifdef CONFIG_E200
	oris	r2,r2,MAS4_TLBSELD(1)@h
#endif
193
	mtspr	SPRN_MAS4, r2
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

#if 0
	/* Enable DOZE */
	mfspr	r2,SPRN_HID0
	oris	r2,r2,HID0_DOZE@h
	mtspr	SPRN_HID0, r2
#endif

#if !defined(CONFIG_BDI_SWITCH)
	/*
	 * The Abatron BDI JTAG debugger does not tolerate others
	 * mucking with the debug registers.
	 */
	lis	r2,DBCR0_IDM@h
	mtspr	SPRN_DBCR0,r2
209
	isync
210 211 212 213 214
	/* clear any residual debug events */
	li	r2,-1
	mtspr	SPRN_DBSR,r2
#endif

215 216 217 218
#ifdef CONFIG_SMP
	/* Check to see if we're the second processor, and jump
	 * to the secondary_start code if so
	 */
219
	LOAD_REG_ADDR_PIC(r24, boot_cpuid)
220 221 222
	lwz	r24, 0(r24)
	cmpwi	r24, -1
	mfspr   r24,SPRN_PIR
223 224 225
	bne	__secondary_start
#endif

226 227 228 229 230 231 232 233 234 235
	/*
	 * This is where the main kernel code starts.
	 */

	/* ptr to current */
	lis	r2,init_task@h
	ori	r2,r2,init_task@l

	/* ptr to current thread */
	addi	r4,r2,THREAD	/* init task's THREAD */
236
	mtspr	SPRN_SPRG_THREAD,r4
237 238 239 240 241 242 243

	/* stack */
	lis	r1,init_thread_union@h
	ori	r1,r1,init_thread_union@l
	li	r0,0
	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)

244
	CURRENT_THREAD_INFO(r22, r1)
245 246
	stw	r24, TI_CPU(r22)

247 248
	bl	early_init

249
#ifdef CONFIG_RELOCATABLE
250 251
	mr	r3,r30
	mr	r4,r31
252
#ifdef CONFIG_PHYS_64BIT
253 254
	mr	r5,r23
	mr	r6,r25
255
#else
256
	mr	r5,r25
257 258 259 260
#endif
	bl	relocate_init
#endif

261
#ifdef CONFIG_DYNAMIC_MEMSTART
262 263 264 265 266 267 268 269 270 271
	lis	r3,kernstart_addr@ha
	la	r3,kernstart_addr@l(r3)
#ifdef CONFIG_PHYS_64BIT
	stw	r23,0(r3)
	stw	r25,4(r3)
#else
	stw	r25,0(r3)
#endif
#endif

272 273 274
/*
 * Decide what sort of machine this is and initialize the MMU.
 */
275 276
	mr	r3,r30
	mr	r4,r31
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	bl	machine_init
	bl	MMU_init

	/* Setup PTE pointers for the Abatron bdiGDB */
	lis	r6, swapper_pg_dir@h
	ori	r6, r6, swapper_pg_dir@l
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	lis	r4, KERNELBASE@h
	ori	r4, r4, KERNELBASE@l
	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
	stw	r6, 0(r5)

	/* Let's move on */
	lis	r4,start_kernel@h
	ori	r4,r4,start_kernel@l
	lis	r3,MSR_KERNEL@h
	ori	r3,r3,MSR_KERNEL@l
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	rfi			/* change context and jump to start_kernel */

/* Macros to hide the PTE size differences
 *
 * FIND_PTE -- walks the page tables given EA & pgdir pointer
 *   r10 -- EA of fault
 *   r11 -- PGDIR pointer
 *   r12 -- free
 *   label 2: is the bailout case
 *
 * if we find the pte (fall through):
 *   r11 is low pte word
 *   r12 is pointer to the pte
B
Becky Bruce 已提交
310
 *   r10 is the pshift from the PGD, if we're a hugepage
311 312
 */
#ifdef CONFIG_PTE_64BIT
B
Becky Bruce 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
#ifdef CONFIG_HUGETLB_PAGE
#define FIND_PTE	\
	rlwinm	r12, r10, 13, 19, 29;	/* Compute pgdir/pmd offset */	\
	lwzx	r11, r12, r11;		/* Get pgd/pmd entry */		\
	rlwinm.	r12, r11, 0, 0, 20;	/* Extract pt base address */	\
	blt	1000f;			/* Normal non-huge page */	\
	beq	2f;			/* Bail if no table */		\
	oris	r11, r11, PD_HUGE@h;	/* Put back address bit */	\
	andi.	r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */	\
	xor	r12, r10, r11;		/* drop size bits from pointer */ \
	b	1001f;							\
1000:	rlwimi	r12, r10, 23, 20, 28;	/* Compute pte address */	\
	li	r10, 0;			/* clear r10 */			\
1001:	lwz	r11, 4(r12);		/* Get pte entry */
#else
328
#define FIND_PTE	\
329
	rlwinm	r12, r10, 13, 19, 29;	/* Compute pgdir/pmd offset */	\
330 331 332 333 334
	lwzx	r11, r12, r11;		/* Get pgd/pmd entry */		\
	rlwinm.	r12, r11, 0, 0, 20;	/* Extract pt base address */	\
	beq	2f;			/* Bail if no table */		\
	rlwimi	r12, r10, 23, 20, 28;	/* Compute pte address */	\
	lwz	r11, 4(r12);		/* Get pte entry */
B
Becky Bruce 已提交
335 336
#endif /* HUGEPAGE */
#else /* !PTE_64BIT */
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
#define FIND_PTE	\
	rlwimi	r11, r10, 12, 20, 29;	/* Create L1 (pgdir/pmd) address */	\
	lwz	r11, 0(r11);		/* Get L1 entry */			\
	rlwinm.	r12, r11, 0, 0, 19;	/* Extract L2 (pte) base address */	\
	beq	2f;			/* Bail if no table */			\
	rlwimi	r12, r10, 22, 20, 29;	/* Compute PTE address */		\
	lwz	r11, 0(r12);		/* Get Linux PTE */
#endif

/*
 * Interrupt vector entry code
 *
 * The Book E MMUs are always on so we don't need to handle
 * interrupts in real mode as with previous PPC processors. In
 * this case we handle interrupts in the kernel virtual address
 * space.
 *
 * Interrupt vectors are dynamically placed relative to the
 * interrupt prefix as determined by the address of interrupt_base.
 * The interrupt vectors offsets are programmed using the labels
 * for each interrupt vector entry.
 *
 * Interrupt vectors must be aligned on a 16 byte boundary.
 * We align on a 32 byte cache line boundary for good measure.
 */

interrupt_base:
	/* Critical Input Interrupt */
365
	CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
366 367 368 369

	/* Machine Check Interrupt */
#ifdef CONFIG_E200
	/* no RFMCI, MCSRRs on E200 */
370 371
	CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
			   machine_check_exception)
372
#else
373
	MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
374 375 376 377
#endif

	/* Data Storage Interrupt */
	START_EXCEPTION(DataStorage)
378
	NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
379 380 381 382 383
	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
	stw	r5,_ESR(r11)
	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h
	bne	1f
384
	EXC_XFER_LITE(0x0300, handle_page_fault)
385 386 387
1:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	EXC_XFER_EE_LITE(0x0300, CacheLockingException)
388 389 390 391 392

	/* Instruction Storage Interrupt */
	INSTRUCTION_STORAGE_EXCEPTION

	/* External Input Interrupt */
393
	EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
394 395 396 397 398 399 400 401 402 403 404 405 406

	/* Alignment Interrupt */
	ALIGNMENT_EXCEPTION

	/* Program Interrupt */
	PROGRAM_EXCEPTION

	/* Floating Point Unavailable Interrupt */
#ifdef CONFIG_PPC_FPU
	FP_UNAVAILABLE_EXCEPTION
#else
#ifdef CONFIG_E200
	/* E200 treats 'normal' floating point instructions as FP Unavail exception */
407 408
	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
		  program_check_exception, EXC_XFER_EE)
409
#else
410 411
	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
		  unknown_exception, EXC_XFER_EE)
412 413 414 415 416
#endif
#endif

	/* System Call Interrupt */
	START_EXCEPTION(SystemCall)
417
	NORMAL_EXCEPTION_PROLOG(SYSCALL)
418 419
	EXC_XFER_EE_LITE(0x0c00, DoSyscall)

L
Lucas De Marchi 已提交
420
	/* Auxiliary Processor Unavailable Interrupt */
421 422
	EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
		  unknown_exception, EXC_XFER_EE)
423 424 425 426 427 428

	/* Decrementer Interrupt */
	DECREMENTER_EXCEPTION

	/* Fixed Internal Timer Interrupt */
	/* TODO: Add FIT support */
429 430
	EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
		  unknown_exception, EXC_XFER_EE)
431 432 433

	/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
434
	CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
435
#else
436
	CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
437 438 439 440
#endif

	/* Data TLB Error Interrupt */
	START_EXCEPTION(DataTLBError)
441
	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
442 443
	mfspr	r10, SPRN_SPRG_THREAD
	stw	r11, THREAD_NORMSAVE(0)(r10)
S
Scott Wood 已提交
444 445 446 447 448
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
	mfspr	r11, SPRN_SRR1
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
449 450 451 452
	stw	r12, THREAD_NORMSAVE(1)(r10)
	stw	r13, THREAD_NORMSAVE(2)(r10)
	mfcr	r13
	stw	r13, THREAD_NORMSAVE(3)(r10)
S
Scott Wood 已提交
453
	DO_KVM	BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
454 455 456 457 458
	mfspr	r10, SPRN_DEAR		/* Get faulting address */

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
459
	lis	r11, PAGE_OFFSET@h
460 461 462 463 464 465 466 467 468 469 470 471 472
	cmplw	5, r10, r11
	blt	5, 3f
	lis	r11, swapper_pg_dir@h
	ori	r11, r11, swapper_pg_dir@l

	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
	rlwinm	r12,r12,0,16,1
	mtspr	SPRN_MAS1,r12

	b	4f

	/* Get the PGD for the current thread */
3:
473
	mfspr	r11,SPRN_SPRG_THREAD
474 475 476
	lwz	r11,PGDIR(r11)

4:
477 478 479 480 481 482 483 484 485 486 487 488 489 490
	/* Mask of required permission bits. Note that while we
	 * do copy ESR:ST to _PAGE_RW position as trying to write
	 * to an RO page is pretty common, we don't do it with
	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
	 * event so I'd rather take the overhead when it happens
	 * rather than adding an instruction here. We should measure
	 * whether the whole thing is worth it in the first place
	 * as we could avoid loading SPRN_ESR completely in the first
	 * place...
	 *
	 * TODO: Is it worth doing that mfspr & rlwimi in the first
	 *       place or can we save a couple of instructions here ?
	 */
	mfspr	r12,SPRN_ESR
491 492 493 494
#ifdef CONFIG_PTE_64BIT
	li	r13,_PAGE_PRESENT
	oris	r13,r13,_PAGE_ACCESSED@h
#else
495
	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
496
#endif
497 498
	rlwimi	r13,r12,11,29,29

499
	FIND_PTE
500
	andc.	r13,r13,r11		/* Check permission */
501 502

#ifdef CONFIG_PTE_64BIT
503
#ifdef CONFIG_SMP
B
Becky Bruce 已提交
504 505
	subf	r13,r11,r12		/* create false data dep */
	lwzx	r13,r11,r13		/* Get upper pte bits */
506 507 508
#else
	lwz	r13,0(r12)		/* Get upper pte bits */
#endif
509 510
#endif

511 512 513
	bne	2f			/* Bail if permission/valid mismach */

	/* Jump to common tlb load */
514 515 516 517 518
	b	finish_tlb_load
2:
	/* The bailout.  Restore registers to pre-exception conditions
	 * and call the heavyweights to help us out.
	 */
519 520
	mfspr	r10, SPRN_SPRG_THREAD
	lwz	r11, THREAD_NORMSAVE(3)(r10)
521
	mtcr	r11
522 523 524
	lwz	r13, THREAD_NORMSAVE(2)(r10)
	lwz	r12, THREAD_NORMSAVE(1)(r10)
	lwz	r11, THREAD_NORMSAVE(0)(r10)
525
	mfspr	r10, SPRN_SPRG_RSCRATCH0
526
	b	DataStorage
527 528 529 530 531 532 533 534

	/* Instruction TLB Error Interrupt */
	/*
	 * Nearly the same as above, except we get our
	 * information from different registers and bailout
	 * to a different point.
	 */
	START_EXCEPTION(InstructionTLBError)
535
	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
536 537
	mfspr	r10, SPRN_SPRG_THREAD
	stw	r11, THREAD_NORMSAVE(0)(r10)
S
Scott Wood 已提交
538 539 540 541 542
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
	mfspr	r11, SPRN_SRR1
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
543 544 545 546
	stw	r12, THREAD_NORMSAVE(1)(r10)
	stw	r13, THREAD_NORMSAVE(2)(r10)
	mfcr	r13
	stw	r13, THREAD_NORMSAVE(3)(r10)
S
Scott Wood 已提交
547
	DO_KVM	BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
548 549 550 551 552
	mfspr	r10, SPRN_SRR0		/* Get faulting address */

	/* If we are faulting a kernel address, we have to use the
	 * kernel page tables.
	 */
553
	lis	r11, PAGE_OFFSET@h
554 555 556 557 558 559 560 561 562
	cmplw	5, r10, r11
	blt	5, 3f
	lis	r11, swapper_pg_dir@h
	ori	r11, r11, swapper_pg_dir@l

	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
	rlwinm	r12,r12,0,16,1
	mtspr	SPRN_MAS1,r12

563 564 565 566 567 568 569
	/* Make up the required permissions for kernel code */
#ifdef CONFIG_PTE_64BIT
	li	r13,_PAGE_PRESENT | _PAGE_BAP_SX
	oris	r13,r13,_PAGE_ACCESSED@h
#else
	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif
570 571 572 573
	b	4f

	/* Get the PGD for the current thread */
3:
574
	mfspr	r11,SPRN_SPRG_THREAD
575 576
	lwz	r11,PGDIR(r11)

577
	/* Make up the required permissions for user code */
578
#ifdef CONFIG_PTE_64BIT
579
	li	r13,_PAGE_PRESENT | _PAGE_BAP_UX
580 581
	oris	r13,r13,_PAGE_ACCESSED@h
#else
582
	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
583
#endif
584

585
4:
586
	FIND_PTE
587
	andc.	r13,r13,r11		/* Check permission */
588 589 590

#ifdef CONFIG_PTE_64BIT
#ifdef CONFIG_SMP
B
Becky Bruce 已提交
591 592
	subf	r13,r11,r12		/* create false data dep */
	lwzx	r13,r11,r13		/* Get upper pte bits */
593 594 595 596 597
#else
	lwz	r13,0(r12)		/* Get upper pte bits */
#endif
#endif

598
	bne	2f			/* Bail if permission mismach */
599 600 601 602 603 604 605 606

	/* Jump to common TLB load point */
	b	finish_tlb_load

2:
	/* The bailout.  Restore registers to pre-exception conditions
	 * and call the heavyweights to help us out.
	 */
607 608
	mfspr	r10, SPRN_SPRG_THREAD
	lwz	r11, THREAD_NORMSAVE(3)(r10)
609
	mtcr	r11
610 611 612
	lwz	r13, THREAD_NORMSAVE(2)(r10)
	lwz	r12, THREAD_NORMSAVE(1)(r10)
	lwz	r11, THREAD_NORMSAVE(0)(r10)
613
	mfspr	r10, SPRN_SPRG_RSCRATCH0
614 615
	b	InstructionStorage

616
/* Define SPE handlers for e200 and e500v2 */
617 618 619
#ifdef CONFIG_SPE
	/* SPE Unavailable */
	START_EXCEPTION(SPEUnavailable)
620
	NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
621 622 623 624
	beq	1f
	bl	load_up_spe
	b	fast_exception_return
1:	addi	r3,r1,STACK_FRAME_OVERHEAD
625
	EXC_XFER_EE_LITE(0x2010, KernelSPE)
626
#elif defined(CONFIG_SPE_POSSIBLE)
627
	EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
628
		  unknown_exception, EXC_XFER_EE)
629
#endif /* CONFIG_SPE_POSSIBLE */
630 631 632

	/* SPE Floating Point Data */
#ifdef CONFIG_SPE
633
	EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
634
		  SPEFloatingPointException, EXC_XFER_EE)
635 636

	/* SPE Floating Point Round */
637 638
	EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
		  SPEFloatingPointRoundException, EXC_XFER_EE)
639
#elif defined(CONFIG_SPE_POSSIBLE)
640
	EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
641 642 643
		  unknown_exception, EXC_XFER_EE)
	EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
		  unknown_exception, EXC_XFER_EE)
644 645
#endif /* CONFIG_SPE_POSSIBLE */

646 647

	/* Performance Monitor */
648 649
	EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
		  performance_monitor_exception, EXC_XFER_STD)
650

651
	EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
652

653 654
	CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
			   CriticalDoorbell, unknown_exception)
655 656

	/* Debug Interrupt */
657 658
	DEBUG_DEBUG_EXCEPTION
	DEBUG_CRIT_EXCEPTION
659

S
Scott Wood 已提交
660 661 662 663 664 665 666 667 668 669 670
	GUEST_DOORBELL_EXCEPTION

	CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
			   unknown_exception)

	/* Hypercall */
	EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)

	/* Embedded Hypervisor Privilege */
	EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)

671 672
interrupt_end:

673 674 675 676 677 678 679
/*
 * Local functions
 */

/*
 * Both the instruction and data TLB miss get to this
 * point to load the TLB.
B
Becky Bruce 已提交
680
 *	r10 - tsize encoding (if HUGETLB_PAGE) or available to use
681
 *	r11 - TLB (info from Linux PTE)
682 683
 *	r12 - available to use
 *	r13 - upper bits of PTE (if PTE_64BIT) or available to use
684
 *	CR5 - results of addr >= PAGE_OFFSET
685 686 687 688 689
 *	MAS0, MAS1 - loaded with proper value when we get here
 *	MAS2, MAS3 - will need additional info from Linux PTE
 *	Upon exit, we reload everything and RFI.
 */
finish_tlb_load:
B
Becky Bruce 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
#ifdef CONFIG_HUGETLB_PAGE
	cmpwi	6, r10, 0			/* check for huge page */
	beq	6, finish_tlb_load_cont    	/* !huge */

	/* Alas, we need more scratch registers for hugepages */
	mfspr	r12, SPRN_SPRG_THREAD
	stw	r14, THREAD_NORMSAVE(4)(r12)
	stw	r15, THREAD_NORMSAVE(5)(r12)
	stw	r16, THREAD_NORMSAVE(6)(r12)
	stw	r17, THREAD_NORMSAVE(7)(r12)

	/* Get the next_tlbcam_idx percpu var */
#ifdef CONFIG_SMP
	lwz	r12, THREAD_INFO-THREAD(r12)
	lwz	r15, TI_CPU(r12)
	lis     r14, __per_cpu_offset@h
	ori     r14, r14, __per_cpu_offset@l
	rlwinm  r15, r15, 2, 0, 29
	lwzx    r16, r14, r15
#else
	li	r16, 0
#endif
	lis     r17, next_tlbcam_idx@h
	ori	r17, r17, next_tlbcam_idx@l
	add	r17, r17, r16			/* r17 = *next_tlbcam_idx */
	lwz     r15, 0(r17)			/* r15 = next_tlbcam_idx */

	lis	r14, MAS0_TLBSEL(1)@h		/* select TLB1 (TLBCAM) */
	rlwimi	r14, r15, 16, 4, 15		/* next_tlbcam_idx entry */
	mtspr	SPRN_MAS0, r14

	/* Extract TLB1CFG(NENTRY) */
	mfspr	r16, SPRN_TLB1CFG
	andi.	r16, r16, 0xfff

	/* Update next_tlbcam_idx, wrapping when necessary */
	addi	r15, r15, 1
	cmpw	r15, r16
	blt 	100f
	lis	r14, tlbcam_index@h
	ori	r14, r14, tlbcam_index@l
	lwz	r15, 0(r14)
100:	stw	r15, 0(r17)

	/*
	 * Calc MAS1_TSIZE from r10 (which has pshift encoded)
	 * tlb_enc = (pshift - 10).
	 */
	subi	r15, r10, 10
	mfspr	r16, SPRN_MAS1
	rlwimi	r16, r15, 7, 20, 24
	mtspr	SPRN_MAS1, r16

	/* copy the pshift for use later */
	mr	r14, r10

	/* fall through */

#endif /* CONFIG_HUGETLB_PAGE */

750 751 752 753 754 755
	/*
	 * We set execute, because we don't have the granularity to
	 * properly set this at the page level (Linux problem).
	 * Many of these bits are software only.  Bits we don't set
	 * here we (properly should) assume have the appropriate value.
	 */
B
Becky Bruce 已提交
756
finish_tlb_load_cont:
757 758 759 760 761 762 763 764
#ifdef CONFIG_PTE_64BIT
	rlwinm	r12, r11, 32-2, 26, 31	/* Move in perm bits */
	andi.	r10, r11, _PAGE_DIRTY
	bne	1f
	li	r10, MAS3_SW | MAS3_UW
	andc	r12, r12, r10
1:	rlwimi	r12, r13, 20, 0, 11	/* grab RPN[32:43] */
	rlwimi	r12, r11, 20, 12, 19	/* grab RPN[44:51] */
B
Becky Bruce 已提交
765
2:	mtspr	SPRN_MAS3, r12
766 767 768 769 770
BEGIN_MMU_FTR_SECTION
	srwi	r10, r13, 12		/* grab RPN[12:31] */
	mtspr	SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
771
	li	r10, (_PAGE_EXEC | _PAGE_PRESENT)
B
Becky Bruce 已提交
772
	mr	r13, r11
773 774
	rlwimi	r10, r11, 31, 29, 29	/* extract _PAGE_DIRTY into SW */
	and	r12, r11, r10
775
	andi.	r10, r11, _PAGE_USER	/* Test for _PAGE_USER */
776 777 778
	slwi	r10, r12, 1
	or	r10, r10, r12
	iseleq	r12, r12, r10
B
Becky Bruce 已提交
779 780
	rlwimi	r13, r12, 0, 20, 31	/* Get RPN from PTE, merge w/ perms */
	mtspr	SPRN_MAS3, r13
781
#endif
B
Becky Bruce 已提交
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798

	mfspr	r12, SPRN_MAS2
#ifdef CONFIG_PTE_64BIT
	rlwimi	r12, r11, 32-19, 27, 31	/* extract WIMGE from pte */
#else
	rlwimi	r12, r11, 26, 27, 31	/* extract WIMGE from pte */
#endif
#ifdef CONFIG_HUGETLB_PAGE
	beq	6, 3f			/* don't mask if page isn't huge */
	li	r13, 1
	slw	r13, r13, r14
	subi	r13, r13, 1
	rlwinm	r13, r13, 0, 0, 19	/* bottom bits used for WIMGE/etc */
	andc	r12, r12, r13		/* mask off ea bits within the page */
#endif
3:	mtspr	SPRN_MAS2, r12

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
#ifdef CONFIG_E200
	/* Round robin TLB1 entries assignment */
	mfspr	r12, SPRN_MAS0

	/* Extract TLB1CFG(NENTRY) */
	mfspr	r11, SPRN_TLB1CFG
	andi.	r11, r11, 0xfff

	/* Extract MAS0(NV) */
	andi.	r13, r12, 0xfff
	addi	r13, r13, 1
	cmpw	0, r13, r11
	addi	r12, r12, 1

	/* check if we need to wrap */
	blt	7f

	/* wrap back to first free tlbcam entry */
	lis	r13, tlbcam_index@ha
	lwz	r13, tlbcam_index@l(r13)
	rlwimi	r12, r13, 0, 20, 31
7:
821
	mtspr	SPRN_MAS0,r12
822 823
#endif /* CONFIG_E200 */

B
Becky Bruce 已提交
824
tlb_write_entry:
825 826 827
	tlbwe

	/* Done...restore registers and get out of here.  */
828
	mfspr	r10, SPRN_SPRG_THREAD
B
Becky Bruce 已提交
829 830 831 832 833 834 835 836
#ifdef CONFIG_HUGETLB_PAGE
	beq	6, 8f /* skip restore for 4k page faults */
	lwz	r14, THREAD_NORMSAVE(4)(r10)
	lwz	r15, THREAD_NORMSAVE(5)(r10)
	lwz	r16, THREAD_NORMSAVE(6)(r10)
	lwz	r17, THREAD_NORMSAVE(7)(r10)
#endif
8:	lwz	r11, THREAD_NORMSAVE(3)(r10)
837
	mtcr	r11
838 839 840
	lwz	r13, THREAD_NORMSAVE(2)(r10)
	lwz	r12, THREAD_NORMSAVE(1)(r10)
	lwz	r11, THREAD_NORMSAVE(0)(r10)
841
	mfspr	r10, SPRN_SPRG_RSCRATCH0
842 843 844 845 846 847
	rfi					/* Force context change */

#ifdef CONFIG_SPE
/* Note that the SPE support is closely modeled after the AltiVec
 * support.  Changes to one are likely to be applicable to the
 * other!  */
848
_GLOBAL(load_up_spe)
849 850 851 852 853 854 855 856 857 858 859 860 861
/*
 * Disable SPE for the task which had SPE previously,
 * and save its SPE registers in its thread_struct.
 * Enables SPE for use in the kernel on return.
 * On SMP we know the SPE units are free, since we give it up every
 * switch.  -- Kumar
 */
	mfmsr	r5
	oris	r5,r5,MSR_SPE@h
	mtmsr	r5			/* enable use of SPE now */
	isync
	/* enable use of SPE after return */
	oris	r9,r9,MSR_SPE@h
862
	mfspr	r5,SPRN_SPRG_THREAD	/* current task's THREAD (phys) */
863 864 865 866 867
	li	r4,1
	li	r10,THREAD_ACC
	stw	r4,THREAD_USED_SPE(r5)
	evlddx	evr4,r10,r5
	evmra	evr4,evr4
868
	REST_32EVRS(0,r10,r5,THREAD_EVR0)
869
	blr
870 871 872 873 874 875 876 877 878

/*
 * SPE unavailable trap from kernel - print a message, but let
 * the task use SPE in the kernel until it returns to user mode.
 */
KernelSPE:
	lwz	r3,_MSR(r1)
	oris	r3,r3,MSR_SPE@h
	stw	r3,_MSR(r1)	/* enable use of SPE after return */
879
#ifdef CONFIG_PRINTK
880 881 882 883 884
	lis	r3,87f@h
	ori	r3,r3,87f@l
	mr	r4,r2		/* current */
	lwz	r5,_NIP(r1)
	bl	printk
885
#endif
886
	b	ret_from_except
887
#ifdef CONFIG_PRINTK
888
87:	.string	"SPE used in kernel  (task=%p, pc=%x)  \n"
889
#endif
890 891 892 893
	.align	4,0

#endif /* CONFIG_SPE */

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
/*
 * Translate the effec addr in r3 to phys addr. The phys addr will be put
 * into r3(higher 32bit) and r4(lower 32bit)
 */
get_phys_addr:
	mfmsr	r8
	mfspr	r9,SPRN_PID
	rlwinm	r9,r9,16,0x3fff0000	/* turn PID into MAS6[SPID] */
	rlwimi	r9,r8,28,0x00000001	/* turn MSR[DS] into MAS6[SAS] */
	mtspr	SPRN_MAS6,r9

	tlbsx	0,r3			/* must succeed */

	mfspr	r8,SPRN_MAS1
	mfspr	r12,SPRN_MAS3
	rlwinm	r9,r8,25,0x1f		/* r9 = log2(page size) */
	li	r10,1024
	slw	r10,r10,r9		/* r10 = page size */
	addi	r10,r10,-1
	and	r11,r3,r10		/* r11 = page offset */
	andc	r4,r12,r10		/* r4 = page base */
	or	r4,r4,r11		/* r4 = devtree phys addr */
#ifdef CONFIG_PHYS_64BIT
	mfspr	r3,SPRN_MAS7
#endif
	blr

921 922 923 924
/*
 * Global functions
 */

925
#ifdef CONFIG_E200
926 927 928 929 930 931 932 933 934 935 936 937
/* Adjust or setup IVORs for e200 */
_GLOBAL(__setup_e200_ivors)
	li	r3,DebugDebug@l
	mtspr	SPRN_IVOR15,r3
	li	r3,SPEUnavailable@l
	mtspr	SPRN_IVOR32,r3
	li	r3,SPEFloatingPointData@l
	mtspr	SPRN_IVOR33,r3
	li	r3,SPEFloatingPointRound@l
	mtspr	SPRN_IVOR34,r3
	sync
	blr
938
#endif
939

940 941
#ifdef CONFIG_E500
#ifndef CONFIG_PPC_E500MC
942 943 944 945 946 947 948 949 950 951 952 953 954 955
/* Adjust or setup IVORs for e500v1/v2 */
_GLOBAL(__setup_e500_ivors)
	li	r3,DebugCrit@l
	mtspr	SPRN_IVOR15,r3
	li	r3,SPEUnavailable@l
	mtspr	SPRN_IVOR32,r3
	li	r3,SPEFloatingPointData@l
	mtspr	SPRN_IVOR33,r3
	li	r3,SPEFloatingPointRound@l
	mtspr	SPRN_IVOR34,r3
	li	r3,PerformanceMonitor@l
	mtspr	SPRN_IVOR35,r3
	sync
	blr
956
#else
957 958 959 960 961 962 963 964
/* Adjust or setup IVORs for e500mc */
_GLOBAL(__setup_e500mc_ivors)
	li	r3,DebugDebug@l
	mtspr	SPRN_IVOR15,r3
	li	r3,PerformanceMonitor@l
	mtspr	SPRN_IVOR35,r3
	li	r3,Doorbell@l
	mtspr	SPRN_IVOR36,r3
965 966
	li	r3,CriticalDoorbell@l
	mtspr	SPRN_IVOR37,r3
967 968
	sync
	blr
S
Scott Wood 已提交
969

970 971
/* setup ehv ivors for */
_GLOBAL(__setup_ehv_ivors)
S
Scott Wood 已提交
972 973 974 975 976 977 978 979
	li	r3,GuestDoorbell@l
	mtspr	SPRN_IVOR38,r3
	li	r3,CriticalGuestDoorbell@l
	mtspr	SPRN_IVOR39,r3
	li	r3,Hypercall@l
	mtspr	SPRN_IVOR40,r3
	li	r3,Ehvpriv@l
	mtspr	SPRN_IVOR41,r3
980 981
	sync
	blr
982 983
#endif /* CONFIG_PPC_E500MC */
#endif /* CONFIG_E500 */
984

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
#ifdef CONFIG_SPE
/*
 * extern void giveup_spe(struct task_struct *prev)
 *
 */
_GLOBAL(giveup_spe)
	mfmsr	r5
	oris	r5,r5,MSR_SPE@h
	mtmsr	r5			/* enable use of SPE now */
	isync
	cmpi	0,r3,0
	beqlr-				/* if no previous owner, done */
	addi	r3,r3,THREAD		/* want THREAD of task */
	lwz	r5,PT_REGS(r3)
	cmpi	0,r5,0
1000
	SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
1001
	evxor	evr6, evr6, evr6	/* clear out evr6 */
1002 1003
	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
	li	r4,THREAD_ACC
1004
	evstddx	evr6, r4, r3		/* save off accumulator */
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	beq	1f
	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r3,MSR_SPE@h
	andc	r4,r4,r3		/* disable SPE for previous task */
	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
	blr
#endif /* CONFIG_SPE */

/*
 * extern void abort(void)
 *
 * At present, this routine just applies a system reset.
 */
_GLOBAL(abort)
	li	r13,0
1021
	mtspr	SPRN_DBCR0,r13		/* disable all debug events */
1022
	isync
1023 1024 1025
	mfmsr	r13
	ori	r13,r13,MSR_DE@l	/* Enable Debug Events */
	mtmsr	r13
1026
	isync
1027 1028 1029
	mfspr	r13,SPRN_DBCR0
	lis	r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
	mtspr	SPRN_DBCR0,r13
1030
	isync
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

_GLOBAL(set_context)

#ifdef CONFIG_BDI_SWITCH
	/* Context switch the PTE pointer for the Abatron BDI2000.
	 * The PGDIR is the second parameter.
	 */
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	stw	r4, 0x4(r5)
#endif
	mtspr	SPRN_PID,r3
	isync			/* Force context change */
	blr

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
_GLOBAL(flush_dcache_L1)
	mfspr	r3,SPRN_L1CFG0

	rlwinm	r5,r3,9,3	/* Extract cache block size */
	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
				 * are currently defined.
				 */
	li	r4,32
	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
				 *      log2(number of ways)
				 */
	slw	r5,r4,r5	/* r5 = cache block size */

	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
	mulli	r7,r7,13	/* An 8-way cache will require 13
				 * loads per set.
				 */
	slw	r7,r7,r6

	/* save off HID0 and set DCFA */
	mfspr	r8,SPRN_HID0
	ori	r9,r8,HID0_DCFA@l
	mtspr	SPRN_HID0,r9
	isync

	lis	r4,KERNELBASE@h
	mtctr	r7

1:	lwz	r3,0(r4)	/* Load... */
	add	r4,r4,r5
	bdnz	1b

	msync
	lis	r4,KERNELBASE@h
	mtctr	r7

1:	dcbf	0,r4		/* ...and flush. */
	add	r4,r4,r5
	bdnz	1b
	
	/* restore HID0 */
	mtspr	SPRN_HID0,r8
	isync

	blr

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
_GLOBAL(__flush_disable_L1)
	mflr	r10
	bl	flush_dcache_L1	/* Flush L1 d-cache */
	mtlr	r10

	mfspr	r4, SPRN_L1CSR0	/* Invalidate and disable d-cache */
	li	r5, 2
	rlwimi	r4, r5, 0, 3

	msync
	isync
	mtspr	SPRN_L1CSR0, r4
	isync

1:	mfspr	r4, SPRN_L1CSR0	/* Wait for the invalidate to finish */
	andi.	r4, r4, 2
	bne	1b

	mfspr	r4, SPRN_L1CSR1	/* Invalidate and disable i-cache */
	li	r5, 2
	rlwimi	r4, r5, 0, 3

	mtspr	SPRN_L1CSR1, r4
	isync

	blr

1120 1121 1122 1123
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
	.globl __secondary_start
__secondary_start:
1124 1125
	LOAD_REG_ADDR_PIC(r3, tlbcam_index)
	lwz	r3,0(r3)
1126 1127 1128
	mtctr	r3
	li	r26,0		/* r26 safe? */

1129 1130
	bl	switch_to_as1
	mr	r27,r3		/* tlb entry */
1131 1132 1133 1134 1135
	/* Load each CAM entry */
1:	mr	r3,r26
	bl	loadcam_entry
	addi	r26,r26,1
	bdnz	1b
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	mr	r3,r27		/* tlb entry */
	LOAD_REG_ADDR_PIC(r4, memstart_addr)
	lwz	r4,0(r4)
	mr	r5,r25		/* phys kernel start */
	rlwinm	r5,r5,0,~0x3ffffff	/* aligned 64M */
	subf	r4,r5,r4	/* memstart_addr - phys kernel start */
	li	r5,0		/* no device tree */
	li	r6,0		/* not boot cpu */
	bl	restore_to_as0


	lis	r3,__secondary_hold_acknowledge@h
	ori	r3,r3,__secondary_hold_acknowledge@l
	stw	r24,0(r3)

	li	r3,0
	mr	r4,r24		/* Why? */
	bl	call_setup_cpu
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

	/* get current_thread_info and current */
	lis	r1,secondary_ti@ha
	lwz	r1,secondary_ti@l(r1)
	lwz	r2,TI_TASK(r1)

	/* stack */
	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
	li	r0,0
	stw	r0,0(r1)

	/* ptr to current thread */
	addi	r4,r2,THREAD	/* address of our thread_struct */
1167
	mtspr	SPRN_SPRG_THREAD,r4
1168 1169

	/* Setup the defaults for TLB entries */
1170
	li	r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	mtspr	SPRN_MAS4,r4

	/* Jump to start_secondary */
	lis	r4,MSR_KERNEL@h
	ori	r4,r4,MSR_KERNEL@l
	lis	r3,start_secondary@h
	ori	r3,r3,start_secondary@l
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
	sync
	rfi
	sync

	.globl __secondary_hold_acknowledge
__secondary_hold_acknowledge:
	.long	-1
#endif

1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
/*
 * Create a tlb entry with the same effective and physical address as
 * the tlb entry used by the current running code. But set the TS to 1.
 * Then switch to the address space 1. It will return with the r3 set to
 * the ESEL of the new created tlb.
 */
_GLOBAL(switch_to_as1)
	mflr	r5

	/* Find a entry not used */
	mfspr	r3,SPRN_TLB1CFG
	andi.	r3,r3,0xfff
	mfspr	r4,SPRN_PID
	rlwinm	r4,r4,16,0x3fff0000	/* turn PID into MAS6[SPID] */
	mtspr	SPRN_MAS6,r4
1:	lis	r4,0x1000		/* Set MAS0(TLBSEL) = 1 */
	addi	r3,r3,-1
	rlwimi	r4,r3,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r3) */
	mtspr	SPRN_MAS0,r4
	tlbre
	mfspr	r4,SPRN_MAS1
	andis.	r4,r4,MAS1_VALID@h
	bne	1b

	/* Get the tlb entry used by the current running code */
	bl	0f
0:	mflr	r4
	tlbsx	0,r4

	mfspr	r4,SPRN_MAS1
	ori	r4,r4,MAS1_TS		/* Set the TS = 1 */
	mtspr	SPRN_MAS1,r4

	mfspr	r4,SPRN_MAS0
	rlwinm	r4,r4,0,~MAS0_ESEL_MASK
	rlwimi	r4,r3,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r3) */
	mtspr	SPRN_MAS0,r4
	tlbwe
	isync
	sync

	mfmsr	r4
	ori	r4,r4,MSR_IS | MSR_DS
	mtspr	SPRN_SRR0,r5
	mtspr	SPRN_SRR1,r4
	sync
	rfi

/*
 * Restore to the address space 0 and also invalidate the tlb entry created
 * by switch_to_as1.
1240 1241 1242
 * r3 - the tlb entry which should be invalidated
 * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
 * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
1243
 * r6 - boot cpu
1244 1245 1246 1247 1248 1249 1250 1251
*/
_GLOBAL(restore_to_as0)
	mflr	r0

	bl	0f
0:	mflr	r9
	addi	r9,r9,1f - 0b

1252 1253 1254 1255 1256 1257 1258
	/*
	 * We may map the PAGE_OFFSET in AS0 to a different physical address,
	 * so we need calculate the right jump and device tree address based
	 * on the offset passed by r4.
	 */
	add	r9,r9,r4
	add	r5,r5,r4
1259
	add	r0,r0,r4
1260 1261

2:	mfmsr	r7
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	li	r8,(MSR_IS | MSR_DS)
	andc	r7,r7,r8

	mtspr	SPRN_SRR0,r9
	mtspr	SPRN_SRR1,r7
	sync
	rfi

	/* Invalidate the temporary tlb entry for AS1 */
1:	lis	r9,0x1000		/* Set MAS0(TLBSEL) = 1 */
	rlwimi	r9,r3,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r3) */
	mtspr	SPRN_MAS0,r9
	tlbre
	mfspr	r9,SPRN_MAS1
	rlwinm	r9,r9,0,2,31		/* Clear MAS1 Valid and IPPROT */
	mtspr	SPRN_MAS1,r9
	tlbwe
	isync
1280 1281

	cmpwi	r4,0
1282 1283 1284
	cmpwi	cr1,r6,0
	cror	eq,4*cr1+eq,eq
	bne	3f			/* offset != 0 && is_boot_cpu */
1285 1286 1287
	mtlr	r0
	blr

1288 1289 1290 1291 1292 1293 1294
	/*
	 * The PAGE_OFFSET will map to a different physical address,
	 * jump to _start to do another relocation again.
	*/
3:	mr	r3,r5
	bl	_start

1295 1296 1297 1298 1299
/*
 * We put a few things here that have to be page-aligned. This stuff
 * goes at the beginning of the data segment, which is page-aligned.
 */
	.data
1300 1301 1302 1303 1304
	.align	12
	.globl	sdata
sdata:
	.globl	empty_zero_page
empty_zero_page:
1305
	.space	4096
1306 1307
	.globl	swapper_pg_dir
swapper_pg_dir:
1308
	.space	PGD_TABLE_SIZE
1309 1310 1311 1312 1313 1314 1315

/*
 * Room for two PTE pointers, usually the kernel and current user pointers
 * to their respective root page table.
 */
abatron_pteptrs:
	.space	8