head_64.S 17.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
 *
15 16 17
 *  This file contains the entry point for the 64-bit kernel along
 *  with some early initialization code common to all 64-bit powerpc
 *  variants.
18 19 20 21 22 23 24 25
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/threads.h>
26
#include <asm/reg.h>
27 28 29 30 31 32 33 34
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
35
#include <asm/iseries/lpar_map.h>
36
#include <asm/thread_info.h>
37
#include <asm/firmware.h>
38
#include <asm/page_64.h>
39
#include <asm/exception.h>
40
#include <asm/irqflags.h>
41

42 43 44
/* The physical memory is layed out such that the secondary processor
 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
 * using the layout described in exceptions-64s.S
45 46 47 48
 */

/*
 * Entering into this code we make the following assumptions:
49 50
 *
 *  For pSeries or server processors:
51 52 53 54 55 56
 *   1. The MMU is off & open firmware is running in real mode.
 *   2. The kernel is entered at __start
 *
 *  For iSeries:
 *   1. The MMU is on (as it always is for iSeries)
 *   2. The kernel is entered at system_reset_iSeries
57 58 59 60
 *
 *  For Book3E processors:
 *   1. The MMU is on running in AS0 in a state defined in ePAPR
 *   2. The kernel is entered at __start
61 62 63 64 65 66 67 68
 */

	.text
	.globl  _stext
_stext:
_GLOBAL(__start)
	/* NOP this out unconditionally */
BEGIN_FTR_SECTION
69
	b	.__start_initialization_multiplatform
70 71 72 73 74
END_FTR_SECTION(0, 1)

	/* Catch branch to 0 in real mode */
	trap

75 76 77 78 79
	/* Secondary processors spin on this value until it becomes nonzero.
	 * When it does it contains the real address of the descriptor
	 * of the function that the cpu should jump to to continue
	 * initialization.
	 */
80 81 82 83 84 85 86 87 88 89
	.globl  __secondary_hold_spinloop
__secondary_hold_spinloop:
	.llong	0x0

	/* Secondary processors write this value with their cpu # */
	/* after they enter the spin loop immediately below.	  */
	.globl	__secondary_hold_acknowledge
__secondary_hold_acknowledge:
	.llong	0x0

90 91 92 93 94 95 96 97 98
#ifdef CONFIG_PPC_ISERIES
	/*
	 * At offset 0x20, there is a pointer to iSeries LPAR data.
	 * This is required by the hypervisor
	 */
	. = 0x20
	.llong hvReleaseData-KERNELBASE
#endif /* CONFIG_PPC_ISERIES */

99 100 101 102 103 104 105 106 107 108 109 110 111 112
#ifdef CONFIG_CRASH_DUMP
	/* This flag is set to 1 by a loader if the kernel should run
	 * at the loaded address instead of the linked address.  This
	 * is used by kexec-tools to keep the the kdump kernel in the
	 * crash_kernel region.  The loader is responsible for
	 * observing the alignment requirement.
	 */
	/* Do not move this variable as kexec-tools knows about it. */
	. = 0x5c
	.globl	__run_at_load
__run_at_load:
	.long	0x72756e30	/* "run0" -- relocate to 0 by default */
#endif

113 114
	. = 0x60
/*
115 116
 * The following code is used to hold secondary processors
 * in a spin loop after they have entered the kernel, but
117 118 119
 * before the bulk of the kernel has been relocated.  This code
 * is relocated to physical address 0x60 before prom_init is run.
 * All of it must fit below the first exception vector at 0x100.
120 121
 * Use .globl here not _GLOBAL because we want __secondary_hold
 * to be the actual text address, not a descriptor.
122
 */
123 124
	.globl	__secondary_hold
__secondary_hold:
125 126 127 128
	mfmsr	r24
	ori	r24,r24,MSR_RI
	mtmsrd	r24			/* RI on */

129
	/* Grab our physical cpu number */
130 131 132 133 134
	mr	r24,r3

	/* Tell the master cpu we're here */
	/* Relocation is off & we are located at an address less */
	/* than 0x100, so only need to grab low order offset.    */
135
	std	r24,__secondary_hold_acknowledge-_stext(0)
136 137 138
	sync

	/* All secondary cpus wait here until told to start. */
139
100:	ld	r4,__secondary_hold_spinloop-_stext(0)
140 141
	cmpdi	0,r4,0
	beq	100b
142

143
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
144
	ld	r4,0(r4)		/* deref function descriptor */
145
	mtctr	r4
146
	mr	r3,r24
147
	bctr
148 149 150 151 152 153 154 155 156 157 158
#else
	BUG_OPCODE
#endif

/* This value is used to mark exception frames on the stack. */
	.section ".toc","aw"
exception_marker:
	.tc	ID_72656773_68657265[TC],0x7265677368657265
	.text

/*
159 160 161
 * On server, we include the exception vectors code here as it
 * relies on absolute addressing which is only possible within
 * this compilation unit
162
 */
163 164
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
165
#endif
166

167 168

/*
O
Olof Johansson 已提交
169 170
 * On pSeries and most other platforms, secondary processors spin
 * in the following code.
171 172
 * At entry, r3 = this processor's number (physical cpu id)
 */
O
Olof Johansson 已提交
173
_GLOBAL(generic_secondary_smp_init)
174 175 176 177 178
	mr	r24,r3
	
	/* turn on 64-bit mode */
	bl	.enable_64b_mode

179 180 181
	/* get the TOC pointer (real address) */
	bl	.relative_toc

182 183 184 185
	/* Set up a paca value for this processor. Since we have the
	 * physical cpu id in r24, we need to search the pacas to find
	 * which logical id maps to our physical one.
	 */
186
	LOAD_REG_ADDR(r13, paca)	/* Get base vaddr of paca array	 */
187 188 189 190 191 192 193 194 195 196 197 198
	li	r5,0			/* logical cpu id                */
1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
	cmpw	r6,r24			/* Compare to our id             */
	beq	2f
	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
	addi	r5,r5,1
	cmpwi	r5,NR_CPUS
	blt	1b

	mr	r3,r24			/* not found, copy phys to r3	 */
	b	.kexec_wait		/* next kernel might do better	 */

199
2:	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
200 201 202 203 204 205
	/* From now on, r24 is expected to be logical cpuid */
	mr	r24,r5
3:	HMT_LOW
	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
					/* start.			 */

O
Olof Johansson 已提交
206 207 208 209 210 211
#ifndef CONFIG_SMP
	b	3b			/* Never go on non-SMP		 */
#else
	cmpwi	0,r23,0
	beq	3b			/* Loop until told to go	 */

212 213
	sync				/* order paca.run and cur_cpu_spec */

O
Olof Johansson 已提交
214
	/* See if we need to call a cpu state restore handler */
215
	LOAD_REG_ADDR(r23, cur_cpu_spec)
O
Olof Johansson 已提交
216 217 218 219 220 221 222 223 224
	ld	r23,0(r23)
	ld	r23,CPU_SPEC_RESTORE(r23)
	cmpdi	0,r23,0
	beq	4f
	ld	r23,0(r23)
	mtctr	r23
	bctrl

4:	/* Create a temp kernel stack for use before relocation is on.	*/
225 226 227
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

228
	b	__secondary_start
229 230
#endif

231 232 233 234
/*
 * Turn the MMU off.
 * Assumes we're mapped EA == RA if the MMU is on.
 */
235 236 237 238
_STATIC(__mmu_off)
	mfmsr	r3
	andi.	r0,r3,MSR_IR|MSR_DR
	beqlr
239
	mflr	r4
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	andc	r3,r3,r0
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	sync
	rfid
	b	.	/* prevent speculative execution */


/*
 * Here is our main kernel entry point. We support currently 2 kind of entries
 * depending on the value of r5.
 *
 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
 *                 in r3...r7
 *   
 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
 *                 DT block, r4 is a physical pointer to the kernel itself
 *
 */
_GLOBAL(__start_initialization_multiplatform)
260 261 262 263 264 265 266 267 268 269 270 271
	/* Make sure we are running in 64 bits mode */
	bl	.enable_64b_mode

	/* Get TOC pointer (current runtime address) */
	bl	.relative_toc

	/* find out where we are now */
	bcl	20,31,$+4
0:	mflr	r26			/* r26 = runtime addr here */
	addis	r26,r26,(_stext - 0b)@ha
	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */

272 273 274 275
	/*
	 * Are we booted from a PROM Of-type client-interface ?
	 */
	cmpldi	cr0,r5,0
276 277 278
	beq	1f
	b	.__boot_from_prom		/* yes -> prom */
1:
279 280 281 282 283
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4

	/* Setup some critical 970 SPRs before switching MMU off */
O
Olof Johansson 已提交
284 285 286 287 288 289 290
	mfspr	r0,SPRN_PVR
	srwi	r0,r0,16
	cmpwi	r0,0x39		/* 970 */
	beq	1f
	cmpwi	r0,0x3c		/* 970FX */
	beq	1f
	cmpwi	r0,0x44		/* 970MP */
291 292
	beq	1f
	cmpwi	r0,0x45		/* 970GX */
O
Olof Johansson 已提交
293 294 295
	bne	2f
1:	bl	.__cpu_preinit_ppc970
2:
296

297
	/* Switch off MMU if not already off */
298 299 300
	bl	.__mmu_off
	b	.__after_prom_start

301
_INIT_STATIC(__boot_from_prom)
302
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
303 304 305 306 307 308 309
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
	mr	r29,r5
	mr	r28,r6
	mr	r27,r7

310 311 312
	/*
	 * Align the stack to 16-byte boundary
	 * Depending on the size and layout of the ELF sections in the initial
313
	 * boot binary, the stack pointer may be unaligned on PowerMac
314
	 */
315 316
	rldicr	r1,r1,0,59

317 318 319 320 321 322
#ifdef CONFIG_RELOCATABLE
	/* Relocate code for where we are now */
	mr	r3,r26
	bl	.relocate
#endif

323 324 325 326 327 328 329 330
	/* Restore parameters */
	mr	r3,r31
	mr	r4,r30
	mr	r5,r29
	mr	r6,r28
	mr	r7,r27

	/* Do all of the interaction with OF client interface */
331
	mr	r8,r26
332
	bl	.prom_init
333 334 335 336
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */

	/* We never return. We also hit that trap if trying to boot
	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
337 338 339
	trap

_STATIC(__after_prom_start)
340 341 342 343
#ifdef CONFIG_RELOCATABLE
	/* process relocations for the final address of the kernel */
	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
	sldi	r25,r25,32
344
#ifdef CONFIG_CRASH_DUMP
345 346
	lwz	r7,__run_at_load-_stext(r26)
	cmplwi	cr0,r7,1	/* kdump kernel ? - stay where we are */
347 348 349 350
	bne	1f
	add	r25,r25,r26
#endif
1:	mr	r3,r25
351 352
	bl	.relocate
#endif
353 354

/*
355
 * We need to run with _stext at physical address PHYSICAL_START.
356 357 358 359 360
 * This will leave some code in the first 256B of
 * real memory, which are reserved for software use.
 *
 * Note: This process overwrites the OF exception vectors.
 */
361 362
	li	r3,0			/* target addr */
	mr.	r4,r26			/* In some cases the loader may  */
363
	beq	9f			/* have already put us at zero */
364 365 366
	li	r6,0x100		/* Start offset, the first 0x100 */
					/* bytes were copied earlier.	 */

367 368 369
#ifdef CONFIG_CRASH_DUMP
/*
 * Check if the kernel has to be running as relocatable kernel based on the
370
 * variable __run_at_load, if it is set the kernel is treated as relocatable
371 372
 * kernel, otherwise it will be moved to PHYSICAL_START
 */
373 374
	lwz	r7,__run_at_load-_stext(r26)
	cmplwi	cr0,r7,1
375 376 377 378 379 380 381 382 383
	bne	3f

	li	r5,__end_interrupts - _stext	/* just copy interrupts */
	b	5f
3:
#endif
	lis	r5,(copy_to_here - _stext)@ha
	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */

384 385 386
	bl	.copy_and_flush		/* copy the first n bytes	 */
					/* this includes the code being	 */
					/* executed here.		 */
387 388 389
	addis	r8,r3,(4f - _stext)@ha	/* Jump to the copy of this code */
	addi	r8,r8,(4f - _stext)@l	/* that we just made */
	mtctr	r8
390 391
	bctr

392 393
p_end:	.llong	_end - _stext

394 395 396
4:	/* Now copy the rest of the kernel up to _end */
	addis	r5,r26,(p_end - _stext)@ha
	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
397
5:	bl	.copy_and_flush		/* copy the rest */
398 399 400

9:	b	.start_here_multiplatform

401 402 403 404 405 406 407 408 409 410 411
/*
 * Copy routine used to copy the kernel to start at physical address 0
 * and flush and invalidate the caches as needed.
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 *
 * Note: this routine *only* clobbers r0, r6 and lr
 */
_GLOBAL(copy_and_flush)
	addi	r5,r5,-8
	addi	r6,r6,-8
412
4:	li	r0,8			/* Use the smallest common	*/
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
					/* denominator cache line	*/
					/* size.  This results in	*/
					/* extra cache line flushes	*/
					/* but operation is correct.	*/
					/* Can't get cache line size	*/
					/* from NACA as it is being	*/
					/* moved too.			*/

	mtctr	r0			/* put # words/line in ctr	*/
3:	addi	r6,r6,8			/* copy a cache line		*/
	ldx	r0,r6,r4
	stdx	r0,r6,r3
	bdnz	3b
	dcbst	r6,r3			/* write it to memory		*/
	sync
	icbi	r6,r3			/* flush the icache line	*/
	cmpld	0,r6,r5
	blt	4b
	sync
	addi	r5,r5,8
	addi	r6,r6,8
	blr

.align 8
copy_to_here:

#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_PMAC
/*
 * On PowerMac, secondary processors starts from the reset vector, which
 * is temporarily turned into a call to one of the functions below.
 */
	.section ".text";
	.align 2 ;

448 449 450 451 452 453 454 455 456 457 458
	.globl	__secondary_start_pmac_0
__secondary_start_pmac_0:
	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
	li	r24,0
	b	1f
	li	r24,1
	b	1f
	li	r24,2
	b	1f
	li	r24,3
1:
459 460 461 462 463
	
_GLOBAL(pmac_secondary_start)
	/* turn on 64-bit mode */
	bl	.enable_64b_mode

464 465 466 467 468 469 470 471 472
	li	r0,0
	mfspr	r3,SPRN_HID4
	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
	sync
	mtspr	SPRN_HID4,r3
	isync
	sync
	slbia

473 474 475
	/* get TOC pointer (real address) */
	bl	.relative_toc

476
	/* Copy some CPU settings from CPU 0 */
O
Olof Johansson 已提交
477
	bl	.__restore_cpu_ppc970
478 479 480 481 482 483 484

	/* pSeries do that early though I don't think we really need it */
	mfmsr	r3
	ori	r3,r3,MSR_RI
	mtmsrd	r3			/* RI on */

	/* Set up a paca value for this processor. */
485 486
	LOAD_REG_ADDR(r4,paca)		/* Get base vaddr of paca array	*/
	mulli	r13,r24,PACA_SIZE	/* Calculate vaddr of right paca */
487
	add	r13,r13,r4		/* for this processor.		*/
488
	mtspr	SPRN_SPRG3,r13		/* Save vaddr of paca in SPRG3	*/
489 490 491 492 493

	/* Create a temp kernel stack for use before relocation is on.	*/
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

494
	b	__secondary_start
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510

#endif /* CONFIG_PPC_PMAC */

/*
 * This function is called after the master CPU has released the
 * secondary processors.  The execution environment is relocation off.
 * The paca for this processor has the following fields initialized at
 * this point:
 *   1. Processor number
 *   2. Segment table pointer (virtual address)
 * On entry the following are set:
 *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
 *   r24   = cpu# (in Linux terms)
 *   r13   = paca virtual address
 *   SPRG3 = paca virtual address
 */
511
	.globl	__secondary_start
512
__secondary_start:
513 514
	/* Set thread priority to MEDIUM */
	HMT_MEDIUM
515

516 517
	/* Do early setup for that CPU (stab, slb, hash table pointer) */
	bl	.early_setup_secondary
518 519

	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
520
	LOAD_REG_ADDR(r3, current_set)
521 522 523 524 525
	sldi	r28,r24,3		/* get current_set[cpu#]	 */
	ldx	r1,r3,r28
	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
	std	r1,PACAKSAVE(r13)

526
	/* Clear backchain so we get nice backtraces */
527 528 529 530
	li	r7,0
	mtlr	r7

	/* enable MMU and jump to start_secondary */
531 532
	LOAD_REG_ADDR(r3, .start_secondary_prolog)
	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
533
#ifdef CONFIG_PPC_ISERIES
534
BEGIN_FW_FTR_SECTION
535
	ori	r4,r4,MSR_EE
536 537
	li	r8,1
	stb	r8,PACAHARDIRQEN(r13)
538
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
539
#endif
540 541 542
BEGIN_FW_FTR_SECTION
	stb	r7,PACAHARDIRQEN(r13)
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
543
	stb	r7,PACASOFTIRQEN(r13)
544

545 546
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
547 548 549 550 551
	rfid
	b	.	/* prevent speculative execution */

/* 
 * Running with relocation on at this point.  All we want to do is
552 553
 * zero the stack back-chain pointer and get the TOC virtual address
 * before going into C code.
554 555
 */
_GLOBAL(start_secondary_prolog)
556
	ld	r2,PACATOC(r13)
557 558 559
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
	bl	.start_secondary
560
	b	.
561 562 563 564 565 566 567
#endif

/*
 * This subroutine clobbers r11 and r12
 */
_GLOBAL(enable_64b_mode)
	mfmsr	r11			/* grab the current MSR */
568 569
	li	r12,(MSR_SF | MSR_ISF)@highest
	sldi	r12,r12,48
570 571 572 573 574
	or	r11,r11,r12
	mtmsrd	r11
	isync
	blr

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
 * by the toolchain).  It computes the correct value for wherever we
 * are running at the moment, using position-independent code.
 */
_GLOBAL(relative_toc)
	mflr	r0
	bcl	20,31,$+4
0:	mflr	r9
	ld	r2,(p_toc - 0b)(r9)
	add	r2,r2,r9
	mtlr	r0
	blr

p_toc:	.llong	__toc_start + 0x8000 - 0b

591 592 593
/*
 * This is where the main kernel code starts.
 */
594
_INIT_STATIC(start_here_multiplatform)
595 596
	/* set up the TOC (real address) */
	bl	.relative_toc
597 598 599 600 601 602

	/* Clear out the BSS. It may have been done in prom_init,
	 * already but that's irrelevant since prom_init will soon
	 * be detached from the kernel completely. Besides, we need
	 * to clear it now for kexec-style entry.
	 */
603 604
	LOAD_REG_ADDR(r11,__bss_stop)
	LOAD_REG_ADDR(r8,__bss_start)
605 606
	sub	r11,r11,r8		/* bss size			*/
	addi	r11,r11,7		/* round up to an even double word */
607
	srdi.	r11,r11,3		/* shift right by 3		*/
608 609 610 611 612 613 614 615 616 617 618 619
	beq	4f
	addi	r8,r8,-8
	li	r0,0
	mtctr	r11			/* zero this many doublewords	*/
3:	stdu	r0,8(r8)
	bdnz	3b
4:

	mfmsr	r6
	ori	r6,r6,MSR_RI
	mtmsrd	r6			/* RI on */

620 621 622 623 624 625 626
#ifdef CONFIG_RELOCATABLE
	/* Save the physical address we're running at in kernstart_addr */
	LOAD_REG_ADDR(r4, kernstart_addr)
	clrldi	r0,r25,2
	std	r0,0(r4)
#endif

627
	/* The following gets the stack set up with the regs */
628 629 630 631 632
	/* pointing to the real addr of the kernel stack.  This is   */
	/* all done to support the C function call below which sets  */
	/* up the htab.  This is done because we have relocated the  */
	/* kernel but are still running in real mode. */

633
	LOAD_REG_ADDR(r3,init_thread_union)
634

635
	/* set up a stack pointer */
636 637 638 639 640 641 642 643 644
	addi	r1,r3,THREAD_SIZE
	li	r0,0
	stdu	r0,-STACK_FRAME_OVERHEAD(r1)

	/* Do very early kernel initializations, including initial hash table,
	 * stab and slb setup before we turn on relocation.	*/

	/* Restore parameters passed from prom_init/kexec */
	mr	r3,r31
645
	bl	.early_setup		/* also sets r13 and SPRG3 */
646

647 648
	LOAD_REG_ADDR(r3, .start_here_common)
	ld	r4,PACAKMSR(r13)
649 650
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
651 652 653 654
	rfid
	b	.	/* prevent speculative execution */
	
	/* This is where all platforms converge execution */
655
_INIT_GLOBAL(start_here_common)
656
	/* relocation is on at this point */
657
	std	r1,PACAKSAVE(r13)
658

659
	/* Load the TOC (virtual address) */
660 661 662 663 664 665 666
	ld	r2,PACATOC(r13)

	bl	.setup_system

	/* Load up the kernel context */
5:
	li	r5,0
667 668 669
	stb	r5,PACASOFTIRQEN(r13)	/* Soft Disabled */
#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
670
	mfmsr	r5
671
	ori	r5,r5,MSR_EE		/* Hard Enabled on iSeries*/
672
	mtmsrd	r5
673
	li	r5,1
674
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
675
#endif
676
	stb	r5,PACAHARDIRQEN(r13)	/* Hard Disabled on others */
677

678
	bl	.start_kernel
679

680 681
	/* Not reached */
	BUG_OPCODE
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696

/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the bss, which is page-aligned.
 */
	.section ".bss"

	.align	PAGE_SHIFT

	.globl	empty_zero_page
empty_zero_page:
	.space	PAGE_SIZE

	.globl	swapper_pg_dir
swapper_pg_dir:
697
	.space	PGD_TABLE_SIZE