head_64.S 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
 *
15 16 17
 *  This file contains the entry point for the 64-bit kernel along
 *  with some early initialization code common to all 64-bit powerpc
 *  variants.
18 19 20 21 22 23 24 25
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/threads.h>
26
#include <asm/reg.h>
27 28 29 30 31 32 33 34
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
35
#include <asm/thread_info.h>
36
#include <asm/firmware.h>
37
#include <asm/page_64.h>
38
#include <asm/irqflags.h>
39
#include <asm/kvm_book3s_asm.h>
40
#include <asm/ptrace.h>
41
#include <asm/hw_irq.h>
42

L
Lucas De Marchi 已提交
43
/* The physical memory is laid out such that the secondary processor
44 45
 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
 * using the layout described in exceptions-64s.S
46 47 48 49
 */

/*
 * Entering into this code we make the following assumptions:
50 51
 *
 *  For pSeries or server processors:
52 53
 *   1. The MMU is off & open firmware is running in real mode.
 *   2. The kernel is entered at __start
54 55
 * -or- For OPAL entry:
 *   1. The MMU is off, processor in HV mode, primary CPU enters at 0
56 57
 *      with device-tree in gpr3. We also get OPAL base in r8 and
 *	entry in r9 for debugging purposes
58
 *   2. Secondary processors enter at 0x60 with PIR in gpr3
59
 *
60 61 62
 *  For Book3E processors:
 *   1. The MMU is on running in AS0 in a state defined in ePAPR
 *   2. The kernel is entered at __start
63 64 65 66 67 68 69 70
 */

	.text
	.globl  _stext
_stext:
_GLOBAL(__start)
	/* NOP this out unconditionally */
BEGIN_FTR_SECTION
71
	b	.__start_initialization_multiplatform
72 73 74 75 76
END_FTR_SECTION(0, 1)

	/* Catch branch to 0 in real mode */
	trap

77 78 79 80 81
	/* Secondary processors spin on this value until it becomes nonzero.
	 * When it does it contains the real address of the descriptor
	 * of the function that the cpu should jump to to continue
	 * initialization.
	 */
82 83 84 85 86 87 88 89 90 91
	.globl  __secondary_hold_spinloop
__secondary_hold_spinloop:
	.llong	0x0

	/* Secondary processors write this value with their cpu # */
	/* after they enter the spin loop immediately below.	  */
	.globl	__secondary_hold_acknowledge
__secondary_hold_acknowledge:
	.llong	0x0

92
#ifdef CONFIG_RELOCATABLE
93 94 95 96 97 98 99 100 101 102 103 104 105
	/* This flag is set to 1 by a loader if the kernel should run
	 * at the loaded address instead of the linked address.  This
	 * is used by kexec-tools to keep the the kdump kernel in the
	 * crash_kernel region.  The loader is responsible for
	 * observing the alignment requirement.
	 */
	/* Do not move this variable as kexec-tools knows about it. */
	. = 0x5c
	.globl	__run_at_load
__run_at_load:
	.long	0x72756e30	/* "run0" -- relocate to 0 by default */
#endif

106 107
	. = 0x60
/*
108 109
 * The following code is used to hold secondary processors
 * in a spin loop after they have entered the kernel, but
110 111 112
 * before the bulk of the kernel has been relocated.  This code
 * is relocated to physical address 0x60 before prom_init is run.
 * All of it must fit below the first exception vector at 0x100.
113 114
 * Use .globl here not _GLOBAL because we want __secondary_hold
 * to be the actual text address, not a descriptor.
115
 */
116 117
	.globl	__secondary_hold
__secondary_hold:
118
#ifndef CONFIG_PPC_BOOK3E
119 120 121
	mfmsr	r24
	ori	r24,r24,MSR_RI
	mtmsrd	r24			/* RI on */
122
#endif
123
	/* Grab our physical cpu number */
124 125 126 127 128
	mr	r24,r3

	/* Tell the master cpu we're here */
	/* Relocation is off & we are located at an address less */
	/* than 0x100, so only need to grab low order offset.    */
129
	std	r24,__secondary_hold_acknowledge-_stext(0)
130 131 132
	sync

	/* All secondary cpus wait here until told to start. */
133
100:	ld	r4,__secondary_hold_spinloop-_stext(0)
134 135
	cmpdi	0,r4,0
	beq	100b
136

137
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
138
	ld	r4,0(r4)		/* deref function descriptor */
139
	mtctr	r4
140
	mr	r3,r24
141
	li	r4,0
142 143
	/* Make sure that patched code is visible */
	isync
144
	bctr
145 146 147 148 149 150 151 152 153 154 155
#else
	BUG_OPCODE
#endif

/* This value is used to mark exception frames on the stack. */
	.section ".toc","aw"
exception_marker:
	.tc	ID_72656773_68657265[TC],0x7265677368657265
	.text

/*
156 157 158
 * On server, we include the exception vectors code here as it
 * relies on absolute addressing which is only possible within
 * this compilation unit
159
 */
160 161
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
162
#endif
163

164 165 166 167 168 169 170 171
_GLOBAL(generic_secondary_thread_init)
	mr	r24,r3

	/* turn on 64-bit mode */
	bl	.enable_64b_mode

	/* get a valid TOC pointer, wherever we're mapped at */
	bl	.relative_toc
172
	tovirt(r2,r2)
173 174 175 176 177 178 179

#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
	bl	.book3e_secondary_thread_init
#endif
	b	generic_secondary_common_init
180 181

/*
O
Olof Johansson 已提交
182 183
 * On pSeries and most other platforms, secondary processors spin
 * in the following code.
184
 * At entry, r3 = this processor's number (physical cpu id)
185 186 187 188
 *
 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
 * this core already exists (setup via some other mechanism such
 * as SCOM before entry).
189
 */
O
Olof Johansson 已提交
190
_GLOBAL(generic_secondary_smp_init)
191
	mr	r24,r3
192 193
	mr	r25,r4

194 195 196
	/* turn on 64-bit mode */
	bl	.enable_64b_mode

197
	/* get a valid TOC pointer, wherever we're mapped at */
198
	bl	.relative_toc
199
	tovirt(r2,r2)
200

201 202 203 204 205 206 207 208
#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
	mr	r4,r25
	bl	.book3e_secondary_core_init
#endif

generic_secondary_common_init:
209 210 211 212
	/* Set up a paca value for this processor. Since we have the
	 * physical cpu id in r24, we need to search the pacas to find
	 * which logical id maps to our physical one.
	 */
213 214
	LOAD_REG_ADDR(r13, paca)	/* Load paca pointer		 */
	ld	r13,0(r13)		/* Get base vaddr of paca array	 */
215 216 217 218 219 220
#ifndef CONFIG_SMP
	addi	r13,r13,PACA_SIZE	/* know r13 if used accidentally */
	b	.kexec_wait		/* wait for next kernel if !SMP	 */
#else
	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
	lwz	r7,0(r7)		/* also the max paca allocated 	 */
221 222 223 224 225 226
	li	r5,0			/* logical cpu id                */
1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
	cmpw	r6,r24			/* Compare to our id             */
	beq	2f
	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
	addi	r5,r5,1
227
	cmpw	r5,r7			/* Check if more pacas exist     */
228 229 230 231 232
	blt	1b

	mr	r3,r24			/* not found, copy phys to r3	 */
	b	.kexec_wait		/* next kernel might do better	 */

233
2:	SET_PACA(r13)
234 235 236 237 238
#ifdef CONFIG_PPC_BOOK3E
	addi	r12,r13,PACA_EXTLB	/* and TLB exc frame in another  */
	mtspr	SPRN_SPRG_TLB_EXFRAME,r12
#endif

239 240
	/* From now on, r24 is expected to be logical cpuid */
	mr	r24,r5
241

O
Olof Johansson 已提交
242
	/* See if we need to call a cpu state restore handler */
243
	LOAD_REG_ADDR(r23, cur_cpu_spec)
O
Olof Johansson 已提交
244 245 246
	ld	r23,0(r23)
	ld	r23,CPU_SPEC_RESTORE(r23)
	cmpdi	0,r23,0
247
	beq	3f
O
Olof Johansson 已提交
248 249 250 251
	ld	r23,0(r23)
	mtctr	r23
	bctrl

252
3:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
253 254 255 256 257 258 259
	lwarx	r4,0,r3
	subi	r4,r4,1
	stwcx.	r4,0,r3
	bne	3b
	isync

4:	HMT_LOW
260 261 262
	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
					/* start.			 */
	cmpwi	0,r23,0
263
	beq	4b			/* Loop until told to go	 */
264 265

	sync				/* order paca.run and cur_cpu_spec */
266
	isync				/* In case code patching happened */
267

268
	/* Create a temp kernel stack for use before relocation is on.	*/
269 270 271
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

272
	b	__secondary_start
273
#endif /* SMP */
274

275 276 277 278
/*
 * Turn the MMU off.
 * Assumes we're mapped EA == RA if the MMU is on.
 */
279
#ifdef CONFIG_PPC_BOOK3S
280 281 282 283
_STATIC(__mmu_off)
	mfmsr	r3
	andi.	r0,r3,MSR_IR|MSR_DR
	beqlr
284
	mflr	r4
285 286 287 288 289 290
	andc	r3,r3,r0
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	sync
	rfid
	b	.	/* prevent speculative execution */
291
#endif
292 293 294 295 296 297 298 299 300 301 302 303 304 305


/*
 * Here is our main kernel entry point. We support currently 2 kind of entries
 * depending on the value of r5.
 *
 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
 *                 in r3...r7
 *   
 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
 *                 DT block, r4 is a physical pointer to the kernel itself
 *
 */
_GLOBAL(__start_initialization_multiplatform)
306 307 308 309 310 311 312 313 314 315 316 317
	/* Make sure we are running in 64 bits mode */
	bl	.enable_64b_mode

	/* Get TOC pointer (current runtime address) */
	bl	.relative_toc

	/* find out where we are now */
	bcl	20,31,$+4
0:	mflr	r26			/* r26 = runtime addr here */
	addis	r26,r26,(_stext - 0b)@ha
	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */

318 319 320 321
	/*
	 * Are we booted from a PROM Of-type client-interface ?
	 */
	cmpldi	cr0,r5,0
322 323 324
	beq	1f
	b	.__boot_from_prom		/* yes -> prom */
1:
325 326 327
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
328 329 330 331 332
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Save OPAL entry */
	mr	r28,r8
	mr	r29,r9
#endif
333

334 335 336 337
#ifdef CONFIG_PPC_BOOK3E
	bl	.start_initialization_book3e
	b	.__after_prom_start
#else
338
	/* Setup some critical 970 SPRs before switching MMU off */
O
Olof Johansson 已提交
339 340 341 342 343 344 345
	mfspr	r0,SPRN_PVR
	srwi	r0,r0,16
	cmpwi	r0,0x39		/* 970 */
	beq	1f
	cmpwi	r0,0x3c		/* 970FX */
	beq	1f
	cmpwi	r0,0x44		/* 970MP */
346 347
	beq	1f
	cmpwi	r0,0x45		/* 970GX */
O
Olof Johansson 已提交
348 349 350
	bne	2f
1:	bl	.__cpu_preinit_ppc970
2:
351

352
	/* Switch off MMU if not already off */
353 354
	bl	.__mmu_off
	b	.__after_prom_start
355
#endif /* CONFIG_PPC_BOOK3E */
356

357
_INIT_STATIC(__boot_from_prom)
358
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
359 360 361 362 363 364 365
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
	mr	r29,r5
	mr	r28,r6
	mr	r27,r7

366 367 368
	/*
	 * Align the stack to 16-byte boundary
	 * Depending on the size and layout of the ELF sections in the initial
369
	 * boot binary, the stack pointer may be unaligned on PowerMac
370
	 */
371 372
	rldicr	r1,r1,0,59

373 374 375 376 377 378
#ifdef CONFIG_RELOCATABLE
	/* Relocate code for where we are now */
	mr	r3,r26
	bl	.relocate
#endif

379 380 381 382 383 384 385 386
	/* Restore parameters */
	mr	r3,r31
	mr	r4,r30
	mr	r5,r29
	mr	r6,r28
	mr	r7,r27

	/* Do all of the interaction with OF client interface */
387
	mr	r8,r26
388
	bl	.prom_init
389 390 391 392
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */

	/* We never return. We also hit that trap if trying to boot
	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
393 394 395
	trap

_STATIC(__after_prom_start)
396 397 398 399
#ifdef CONFIG_RELOCATABLE
	/* process relocations for the final address of the kernel */
	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
	sldi	r25,r25,32
400
	lwz	r7,__run_at_load-_stext(r26)
401
	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
402 403 404
	bne	1f
	add	r25,r25,r26
1:	mr	r3,r25
405 406
	bl	.relocate
#endif
407 408

/*
409
 * We need to run with _stext at physical address PHYSICAL_START.
410 411 412 413 414
 * This will leave some code in the first 256B of
 * real memory, which are reserved for software use.
 *
 * Note: This process overwrites the OF exception vectors.
 */
415
	li	r3,0			/* target addr */
416 417 418
#ifdef CONFIG_PPC_BOOK3E
	tovirt(r3,r3)			/* on booke, we already run at PAGE_OFFSET */
#endif
419
	mr.	r4,r26			/* In some cases the loader may  */
420
	beq	9f			/* have already put us at zero */
421 422
	li	r6,0x100		/* Start offset, the first 0x100 */
					/* bytes were copied earlier.	 */
423 424 425
#ifdef CONFIG_PPC_BOOK3E
	tovirt(r6,r6)			/* on booke, we already run at PAGE_OFFSET */
#endif
426

427
#ifdef CONFIG_RELOCATABLE
428 429
/*
 * Check if the kernel has to be running as relocatable kernel based on the
430
 * variable __run_at_load, if it is set the kernel is treated as relocatable
431 432
 * kernel, otherwise it will be moved to PHYSICAL_START
 */
433 434
	lwz	r7,__run_at_load-_stext(r26)
	cmplwi	cr0,r7,1
435 436
	bne	3f

437 438
	/* just copy interrupts */
	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
439 440 441 442 443 444
	b	5f
3:
#endif
	lis	r5,(copy_to_here - _stext)@ha
	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */

445 446 447
	bl	.copy_and_flush		/* copy the first n bytes	 */
					/* this includes the code being	 */
					/* executed here.		 */
448 449 450
	addis	r8,r3,(4f - _stext)@ha	/* Jump to the copy of this code */
	addi	r8,r8,(4f - _stext)@l	/* that we just made */
	mtctr	r8
451 452
	bctr

453 454
p_end:	.llong	_end - _stext

455 456 457
4:	/* Now copy the rest of the kernel up to _end */
	addis	r5,r26,(p_end - _stext)@ha
	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
458
5:	bl	.copy_and_flush		/* copy the rest */
459 460 461

9:	b	.start_here_multiplatform

462 463 464 465 466 467 468 469 470 471 472
/*
 * Copy routine used to copy the kernel to start at physical address 0
 * and flush and invalidate the caches as needed.
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 *
 * Note: this routine *only* clobbers r0, r6 and lr
 */
_GLOBAL(copy_and_flush)
	addi	r5,r5,-8
	addi	r6,r6,-8
473
4:	li	r0,8			/* Use the smallest common	*/
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
					/* denominator cache line	*/
					/* size.  This results in	*/
					/* extra cache line flushes	*/
					/* but operation is correct.	*/
					/* Can't get cache line size	*/
					/* from NACA as it is being	*/
					/* moved too.			*/

	mtctr	r0			/* put # words/line in ctr	*/
3:	addi	r6,r6,8			/* copy a cache line		*/
	ldx	r0,r6,r4
	stdx	r0,r6,r3
	bdnz	3b
	dcbst	r6,r3			/* write it to memory		*/
	sync
	icbi	r6,r3			/* flush the icache line	*/
	cmpld	0,r6,r5
	blt	4b
	sync
	addi	r5,r5,8
	addi	r6,r6,8
	blr

.align 8
copy_to_here:

#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_PMAC
/*
 * On PowerMac, secondary processors starts from the reset vector, which
 * is temporarily turned into a call to one of the functions below.
 */
	.section ".text";
	.align 2 ;

509 510 511 512 513 514 515 516 517 518 519
	.globl	__secondary_start_pmac_0
__secondary_start_pmac_0:
	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
	li	r24,0
	b	1f
	li	r24,1
	b	1f
	li	r24,2
	b	1f
	li	r24,3
1:
520 521 522 523 524
	
_GLOBAL(pmac_secondary_start)
	/* turn on 64-bit mode */
	bl	.enable_64b_mode

525 526 527 528 529 530 531 532 533
	li	r0,0
	mfspr	r3,SPRN_HID4
	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
	sync
	mtspr	SPRN_HID4,r3
	isync
	sync
	slbia

534 535
	/* get TOC pointer (real address) */
	bl	.relative_toc
536
	tovirt(r2,r2)
537

538
	/* Copy some CPU settings from CPU 0 */
O
Olof Johansson 已提交
539
	bl	.__restore_cpu_ppc970
540 541 542 543 544 545 546

	/* pSeries do that early though I don't think we really need it */
	mfmsr	r3
	ori	r3,r3,MSR_RI
	mtmsrd	r3			/* RI on */

	/* Set up a paca value for this processor. */
547 548
	LOAD_REG_ADDR(r4,paca)		/* Load paca pointer		*/
	ld	r4,0(r4)		/* Get base vaddr of paca array	*/
549
	mulli	r13,r24,PACA_SIZE	/* Calculate vaddr of right paca */
550
	add	r13,r13,r4		/* for this processor.		*/
551
	SET_PACA(r13)			/* Save vaddr of paca in an SPRG*/
552

553 554 555 556 557
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
	li	r0,0
	stb	r0,PACASOFTIRQEN(r13)
558 559
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
560

561 562 563 564
	/* Create a temp kernel stack for use before relocation is on.	*/
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

565
	b	__secondary_start
566 567 568 569 570 571 572 573 574 575 576

#endif /* CONFIG_PPC_PMAC */

/*
 * This function is called after the master CPU has released the
 * secondary processors.  The execution environment is relocation off.
 * The paca for this processor has the following fields initialized at
 * this point:
 *   1. Processor number
 *   2. Segment table pointer (virtual address)
 * On entry the following are set:
577
 *   r1	       = stack pointer (real addr of temp stack)
578 579 580
 *   r24       = cpu# (in Linux terms)
 *   r13       = paca virtual address
 *   SPRG_PACA = paca virtual address
581
 */
582 583 584
	.section ".text";
	.align 2 ;

585
	.globl	__secondary_start
586
__secondary_start:
587 588
	/* Set thread priority to MEDIUM */
	HMT_MEDIUM
589

590
	/* Initialize the kernel stack */
591
	LOAD_REG_ADDR(r3, current_set)
592
	sldi	r28,r24,3		/* get current_set[cpu#]	 */
593 594 595
	ldx	r14,r3,r28
	addi	r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
	std	r14,PACAKSAVE(r13)
596

597 598 599
	/* Do early setup for that CPU (stab, slb, hash table pointer) */
	bl	.early_setup_secondary

600 601 602 603 604 605
	/*
	 * setup the new stack pointer, but *don't* use this until
	 * translation is on.
	 */
	mr	r1, r14

606
	/* Clear backchain so we get nice backtraces */
607 608 609
	li	r7,0
	mtlr	r7

610 611 612
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
613
	stb	r7,PACASOFTIRQEN(r13)
614 615
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
616

617
	/* enable MMU and jump to start_secondary */
618 619
	LOAD_REG_ADDR(r3, .start_secondary_prolog)
	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
620

621 622
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
623
	RFI
624 625 626 627
	b	.	/* prevent speculative execution */

/* 
 * Running with relocation on at this point.  All we want to do is
628 629
 * zero the stack back-chain pointer and get the TOC virtual address
 * before going into C code.
630 631
 */
_GLOBAL(start_secondary_prolog)
632
	ld	r2,PACATOC(r13)
633 634 635
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
	bl	.start_secondary
636
	b	.
637 638 639 640 641 642 643 644 645 646 647
/*
 * Reset stack pointer and call start_secondary
 * to continue with online operation when woken up
 * from cede in cpu offline.
 */
_GLOBAL(start_secondary_resume)
	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
	bl	.start_secondary
	b	.
648 649 650 651 652 653 654
#endif

/*
 * This subroutine clobbers r11 and r12
 */
_GLOBAL(enable_64b_mode)
	mfmsr	r11			/* grab the current MSR */
655 656 657 658
#ifdef CONFIG_PPC_BOOK3E
	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
	mtmsr	r11
#else /* CONFIG_PPC_BOOK3E */
659
	li	r12,(MSR_64BIT | MSR_ISF)@highest
660
	sldi	r12,r12,48
661 662 663
	or	r11,r11,r12
	mtmsrd	r11
	isync
664
#endif
665 666
	blr

667 668 669 670
/*
 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
 * by the toolchain).  It computes the correct value for wherever we
 * are running at the moment, using position-independent code.
671 672 673 674 675 676 677
 *
 * Note: The compiler constructs pointers using offsets from the
 * TOC in -mcmodel=medium mode. After we relocate to 0 but before
 * the MMU is on we need our TOC to be a virtual address otherwise
 * these pointers will be real addresses which may get stored and
 * accessed later with the MMU on. We use tovirt() at the call
 * sites to handle this.
678 679 680 681
 */
_GLOBAL(relative_toc)
	mflr	r0
	bcl	20,31,$+4
682 683 684
0:	mflr	r11
	ld	r2,(p_toc - 0b)(r11)
	add	r2,r2,r11
685 686 687 688 689
	mtlr	r0
	blr

p_toc:	.llong	__toc_start + 0x8000 - 0b

690 691 692
/*
 * This is where the main kernel code starts.
 */
693
_INIT_STATIC(start_here_multiplatform)
694 695 696
	/* set up the TOC */
	bl      .relative_toc
	tovirt(r2,r2)
697 698 699 700 701 702

	/* Clear out the BSS. It may have been done in prom_init,
	 * already but that's irrelevant since prom_init will soon
	 * be detached from the kernel completely. Besides, we need
	 * to clear it now for kexec-style entry.
	 */
703 704
	LOAD_REG_ADDR(r11,__bss_stop)
	LOAD_REG_ADDR(r8,__bss_start)
705 706
	sub	r11,r11,r8		/* bss size			*/
	addi	r11,r11,7		/* round up to an even double word */
707
	srdi.	r11,r11,3		/* shift right by 3		*/
708 709 710 711 712 713 714 715
	beq	4f
	addi	r8,r8,-8
	li	r0,0
	mtctr	r11			/* zero this many doublewords	*/
3:	stdu	r0,8(r8)
	bdnz	3b
4:

716 717
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Setup OPAL entry */
718
	LOAD_REG_ADDR(r11, opal)
719 720 721 722
	std	r28,0(r11);
	std	r29,8(r11);
#endif

723
#ifndef CONFIG_PPC_BOOK3E
724 725 726
	mfmsr	r6
	ori	r6,r6,MSR_RI
	mtmsrd	r6			/* RI on */
727
#endif
728

729 730 731 732 733 734 735
#ifdef CONFIG_RELOCATABLE
	/* Save the physical address we're running at in kernstart_addr */
	LOAD_REG_ADDR(r4, kernstart_addr)
	clrldi	r0,r25,2
	std	r0,0(r4)
#endif

736
	/* The following gets the stack set up with the regs */
737 738 739 740 741
	/* pointing to the real addr of the kernel stack.  This is   */
	/* all done to support the C function call below which sets  */
	/* up the htab.  This is done because we have relocated the  */
	/* kernel but are still running in real mode. */

742
	LOAD_REG_ADDR(r3,init_thread_union)
743

744
	/* set up a stack pointer */
745 746 747 748 749 750 751 752 753
	addi	r1,r3,THREAD_SIZE
	li	r0,0
	stdu	r0,-STACK_FRAME_OVERHEAD(r1)

	/* Do very early kernel initializations, including initial hash table,
	 * stab and slb setup before we turn on relocation.	*/

	/* Restore parameters passed from prom_init/kexec */
	mr	r3,r31
754
	bl	.early_setup		/* also sets r13 and SPRG_PACA */
755

756 757
	LOAD_REG_ADDR(r3, .start_here_common)
	ld	r4,PACAKMSR(r13)
758 759
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
760
	RFI
761 762 763
	b	.	/* prevent speculative execution */
	
	/* This is where all platforms converge execution */
764
_INIT_GLOBAL(start_here_common)
765
	/* relocation is on at this point */
766
	std	r1,PACAKSAVE(r13)
767

768
	/* Load the TOC (virtual address) */
769 770
	ld	r2,PACATOC(r13)

771
	/* Do more system initializations in virtual mode */
772 773
	bl	.setup_system

774 775 776 777 778 779 780
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
	li	r0,0
	stb	r0,PACASOFTIRQEN(r13)
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
781

782
	/* Generic kernel entry */
783
	bl	.start_kernel
784

785 786
	/* Not reached */
	BUG_OPCODE
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801

/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the bss, which is page-aligned.
 */
	.section ".bss"

	.align	PAGE_SHIFT

	.globl	empty_zero_page
empty_zero_page:
	.space	PAGE_SIZE

	.globl	swapper_pg_dir
swapper_pg_dir:
802
	.space	PGD_TABLE_SIZE