head_64.S 25.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
 *
15 16 17
 *  This file contains the entry point for the 64-bit kernel along
 *  with some early initialization code common to all 64-bit powerpc
 *  variants.
18 19 20 21 22 23 24 25
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/threads.h>
26
#include <linux/init.h>
27
#include <asm/reg.h>
28 29 30
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
31
#include <asm/head-64.h>
32 33 34 35 36
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
37
#include <asm/thread_info.h>
38
#include <asm/firmware.h>
39
#include <asm/page_64.h>
40
#include <asm/irqflags.h>
41
#include <asm/kvm_book3s_asm.h>
42
#include <asm/ptrace.h>
43
#include <asm/hw_irq.h>
44
#include <asm/cputhreads.h>
45
#include <asm/ppc-opcode.h>
A
Al Viro 已提交
46
#include <asm/export.h>
47
#include <asm/feature-fixups.h>
48

L
Lucas De Marchi 已提交
49
/* The physical memory is laid out such that the secondary processor
50 51
 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
 * using the layout described in exceptions-64s.S
52 53 54 55
 */

/*
 * Entering into this code we make the following assumptions:
56 57
 *
 *  For pSeries or server processors:
58
 *   1. The MMU is off & open firmware is running in real mode.
59 60 61 62 63 64
 *   2. The primary CPU enters at __start.
 *   3. If the RTAS supports "query-cpu-stopped-state", then secondary
 *      CPUs will enter as directed by "start-cpu" RTAS call, which is
 *      generic_secondary_smp_init, with PIR in r3.
 *   4. Else the secondary CPUs will enter at secondary_hold (0x60) as
 *      directed by the "start-cpu" RTS call, with PIR in r3.
65
 * -or- For OPAL entry:
66 67 68 69 70
 *   1. The MMU is off, processor in HV mode.
 *   2. The primary CPU enters at 0 with device-tree in r3, OPAL base
 *      in r8, and entry in r9 for debugging purposes.
 *   3. Secondary CPUs enter as directed by OPAL_START_CPU call, which
 *      is at generic_secondary_smp_init, with PIR in r3.
71
 *
72 73 74
 *  For Book3E processors:
 *   1. The MMU is on running in AS0 in a state defined in ePAPR
 *   2. The kernel is entered at __start
75 76
 */

77 78 79 80 81 82 83 84
OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
USE_FIXED_SECTION(first_256B)
	/*
	 * Offsets are relative from the start of fixed section, and
	 * first_256B starts at 0. Offsets are a bit easier to use here
	 * than the fixed section entry macros.
	 */
	. = 0x0
85 86 87
_GLOBAL(__start)
	/* NOP this out unconditionally */
BEGIN_FTR_SECTION
88
	FIXUP_ENDIAN
89
	b	__start_initialization_multiplatform
90 91 92 93 94
END_FTR_SECTION(0, 1)

	/* Catch branch to 0 in real mode */
	trap

95 96 97
	/* Secondary processors spin on this value until it becomes non-zero.
	 * When non-zero, it contains the real address of the function the cpu
	 * should jump to.
98
	 */
99
	.balign 8
100 101
	.globl  __secondary_hold_spinloop
__secondary_hold_spinloop:
102
	.8byte	0x0
103 104 105 106 107

	/* Secondary processors write this value with their cpu # */
	/* after they enter the spin loop immediately below.	  */
	.globl	__secondary_hold_acknowledge
__secondary_hold_acknowledge:
108
	.8byte	0x0
109

110
#ifdef CONFIG_RELOCATABLE
111 112 113 114 115 116
	/* This flag is set to 1 by a loader if the kernel should run
	 * at the loaded address instead of the linked address.  This
	 * is used by kexec-tools to keep the the kdump kernel in the
	 * crash_kernel region.  The loader is responsible for
	 * observing the alignment requirement.
	 */
117 118 119 120 121 122 123

#ifdef CONFIG_RELOCATABLE_TEST
#define RUN_AT_LOAD_DEFAULT 1		/* Test relocation, do not copy to 0 */
#else
#define RUN_AT_LOAD_DEFAULT 0x72756e30  /* "run0" -- relocate to 0 by default */
#endif

124 125 126 127
	/* Do not move this variable as kexec-tools knows about it. */
	. = 0x5c
	.globl	__run_at_load
__run_at_load:
128
DEFINE_FIXED_SYMBOL(__run_at_load)
129
	.long	RUN_AT_LOAD_DEFAULT
130 131
#endif

132 133
	. = 0x60
/*
134 135
 * The following code is used to hold secondary processors
 * in a spin loop after they have entered the kernel, but
136 137 138
 * before the bulk of the kernel has been relocated.  This code
 * is relocated to physical address 0x60 before prom_init is run.
 * All of it must fit below the first exception vector at 0x100.
139 140
 * Use .globl here not _GLOBAL because we want __secondary_hold
 * to be the actual text address, not a descriptor.
141
 */
142 143
	.globl	__secondary_hold
__secondary_hold:
144
	FIXUP_ENDIAN
145
#ifndef CONFIG_PPC_BOOK3E
146 147 148
	mfmsr	r24
	ori	r24,r24,MSR_RI
	mtmsrd	r24			/* RI on */
149
#endif
150
	/* Grab our physical cpu number */
151
	mr	r24,r3
152 153
	/* stash r4 for book3e */
	mr	r25,r4
154 155 156 157

	/* Tell the master cpu we're here */
	/* Relocation is off & we are located at an address less */
	/* than 0x100, so only need to grab low order offset.    */
158
	std	r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
159 160
	sync

161 162 163 164
	li	r26,0
#ifdef CONFIG_PPC_BOOK3E
	tovirt(r26,r26)
#endif
165
	/* All secondary cpus wait here until told to start. */
166
100:	ld	r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
167
	cmpdi	0,r12,0
168
	beq	100b
169

170
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
171
#ifdef CONFIG_PPC_BOOK3E
172 173 174
	tovirt(r12,r12)
#endif
	mtctr	r12
175
	mr	r3,r24
176 177 178 179 180 181 182
	/*
	 * it may be the case that other platforms have r4 right to
	 * begin with, this gives us some safety in case it is not
	 */
#ifdef CONFIG_PPC_BOOK3E
	mr	r4,r25
#else
183
	li	r4,0
184
#endif
185 186
	/* Make sure that patched code is visible */
	isync
187
	bctr
188 189 190
#else
	BUG_OPCODE
#endif
191
CLOSE_FIXED_SECTION(first_256B)
192 193 194 195 196

/* This value is used to mark exception frames on the stack. */
	.section ".toc","aw"
exception_marker:
	.tc	ID_72656773_68657265[TC],0x7265677368657265
197
	.previous
198 199

/*
200 201 202
 * On server, we include the exception vectors code here as it
 * relies on absolute addressing which is only possible within
 * this compilation unit
203
 */
204 205
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
206 207
#else
OPEN_TEXT_SECTION(0x100)
208
#endif
209

210 211
USE_TEXT_SECTION()

212
#ifdef CONFIG_PPC_BOOK3E
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
/*
 * The booting_thread_hwid holds the thread id we want to boot in cpu
 * hotplug case. It is set by cpu hotplug code, and is invalid by default.
 * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
 * bit field.
 */
	.globl	booting_thread_hwid
booting_thread_hwid:
	.long  INVALID_THREAD_HWID
	.align 3
/*
 * start a thread in the same core
 * input parameters:
 * r3 = the thread physical id
 * r4 = the entry point where thread starts
 */
_GLOBAL(book3e_start_thread)
	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
231
	cmpwi	r3, 0
232
	beq	10f
233
	cmpwi	r3, 1
234 235 236 237
	beq	11f
	/* If the thread id is invalid, just exit. */
	b	13f
10:
238 239
	MTTMR(TMRN_IMSR0, 5)
	MTTMR(TMRN_INIA0, 4)
240 241
	b	12f
11:
242 243
	MTTMR(TMRN_IMSR1, 5)
	MTTMR(TMRN_INIA1, 4)
244 245 246 247 248 249 250 251
12:
	isync
	li	r6, 1
	sld	r6, r6, r3
	mtspr	SPRN_TENS, r6
13:
	blr

C
chenhui zhao 已提交
252 253 254 255 256 257
/*
 * stop a thread in the same core
 * input parameter:
 * r3 = the thread physical id
 */
_GLOBAL(book3e_stop_thread)
258
	cmpwi	r3, 0
C
chenhui zhao 已提交
259
	beq	10f
260
	cmpwi	r3, 1
C
chenhui zhao 已提交
261 262 263 264 265 266 267 268 269 270
	beq	10f
	/* If the thread id is invalid, just exit. */
	b	13f
10:
	li	r4, 1
	sld	r4, r4, r3
	mtspr	SPRN_TENC, r4
13:
	blr

271
_GLOBAL(fsl_secondary_thread_init)
272 273
	mfspr	r4,SPRN_BUCSR

274 275 276 277 278 279 280 281 282 283 284 285 286 287
	/* Enable branch prediction */
	lis     r3,BUCSR_INIT@h
	ori     r3,r3,BUCSR_INIT@l
	mtspr   SPRN_BUCSR,r3
	isync

	/*
	 * Fix PIR to match the linear numbering in the device tree.
	 *
	 * On e6500, the reset value of PIR uses the low three bits for
	 * the thread within a core, and the upper bits for the core
	 * number.  There are two threads per core, so shift everything
	 * but the low bit right by two bits so that the cpu numbering is
	 * continuous.
288 289 290 291 292 293 294 295 296 297
	 *
	 * If the old value of BUCSR is non-zero, this thread has run
	 * before.  Thus, we assume we are coming from kexec or a similar
	 * scenario, and PIR is already set to the correct value.  This
	 * is a bit of a hack, but there are limited opportunities for
	 * getting information into the thread and the alternatives
	 * seemed like they'd be overkill.  We can't tell just by looking
	 * at the old PIR value which state it's in, since the same value
	 * could be valid for one thread out of reset and for a different
	 * thread in Linux.
298
	 */
299

300
	mfspr	r3, SPRN_PIR
301 302
	cmpwi	r4,0
	bne	1f
303 304
	rlwimi	r3, r3, 30, 2, 30
	mtspr	SPRN_PIR, r3
305
1:
306 307
#endif

308 309 310 311
_GLOBAL(generic_secondary_thread_init)
	mr	r24,r3

	/* turn on 64-bit mode */
312
	bl	enable_64b_mode
313 314

	/* get a valid TOC pointer, wherever we're mapped at */
315
	bl	relative_toc
316
	tovirt(r2,r2)
317 318 319 320

#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
321
	bl	book3e_secondary_thread_init
322 323
#endif
	b	generic_secondary_common_init
324 325

/*
O
Olof Johansson 已提交
326 327
 * On pSeries and most other platforms, secondary processors spin
 * in the following code.
328
 * At entry, r3 = this processor's number (physical cpu id)
329 330 331 332
 *
 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
 * this core already exists (setup via some other mechanism such
 * as SCOM before entry).
333
 */
O
Olof Johansson 已提交
334
_GLOBAL(generic_secondary_smp_init)
335
	FIXUP_ENDIAN
336
	mr	r24,r3
337 338
	mr	r25,r4

339
	/* turn on 64-bit mode */
340
	bl	enable_64b_mode
341

342
	/* get a valid TOC pointer, wherever we're mapped at */
343
	bl	relative_toc
344
	tovirt(r2,r2)
345

346 347 348 349
#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
	mr	r4,r25
350
	bl	book3e_secondary_core_init
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388

/*
 * After common core init has finished, check if the current thread is the
 * one we wanted to boot. If not, start the specified thread and stop the
 * current thread.
 */
	LOAD_REG_ADDR(r4, booting_thread_hwid)
	lwz     r3, 0(r4)
	li	r5, INVALID_THREAD_HWID
	cmpw	r3, r5
	beq	20f

	/*
	 * The value of booting_thread_hwid has been stored in r3,
	 * so make it invalid.
	 */
	stw	r5, 0(r4)

	/*
	 * Get the current thread id and check if it is the one we wanted.
	 * If not, start the one specified in booting_thread_hwid and stop
	 * the current thread.
	 */
	mfspr	r8, SPRN_TIR
	cmpw	r3, r8
	beq	20f

	/* start the specified thread */
	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
	ld	r4, 0(r5)
	bl	book3e_start_thread

	/* stop the current thread */
	mr	r3, r8
	bl	book3e_stop_thread
10:
	b	10b
20:
389 390 391
#endif

generic_secondary_common_init:
392 393 394 395
	/* Set up a paca value for this processor. Since we have the
	 * physical cpu id in r24, we need to search the pacas to find
	 * which logical id maps to our physical one.
	 */
396
#ifndef CONFIG_SMP
397
	b	kexec_wait		/* wait for next kernel if !SMP	 */
398
#else
399 400
	LOAD_REG_ADDR(r8, paca_ptrs)	/* Load paca_ptrs pointe	 */
	ld	r8,0(r8)		/* Get base vaddr of array	 */
401 402
	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
	lwz	r7,0(r7)		/* also the max paca allocated 	 */
403
	li	r5,0			/* logical cpu id                */
404 405 406 407
1:
	sldi	r9,r5,3			/* get paca_ptrs[] index from cpu id */
	ldx	r13,r9,r8		/* r13 = paca_ptrs[cpu id]       */
	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
408 409 410
	cmpw	r6,r24			/* Compare to our id             */
	beq	2f
	addi	r5,r5,1
411
	cmpw	r5,r7			/* Check if more pacas exist     */
412 413 414
	blt	1b

	mr	r3,r24			/* not found, copy phys to r3	 */
415
	b	kexec_wait		/* next kernel might do better	 */
416

417
2:	SET_PACA(r13)
418 419 420 421 422
#ifdef CONFIG_PPC_BOOK3E
	addi	r12,r13,PACA_EXTLB	/* and TLB exc frame in another  */
	mtspr	SPRN_SPRG_TLB_EXFRAME,r12
#endif

423 424
	/* From now on, r24 is expected to be logical cpuid */
	mr	r24,r5
425

O
Olof Johansson 已提交
426
	/* See if we need to call a cpu state restore handler */
427
	LOAD_REG_ADDR(r23, cur_cpu_spec)
O
Olof Johansson 已提交
428
	ld	r23,0(r23)
429 430
	ld	r12,CPU_SPEC_RESTORE(r23)
	cmpdi	0,r12,0
431
	beq	3f
432
#ifdef PPC64_ELF_ABI_v1
433 434
	ld	r12,0(r12)
#endif
435
	mtctr	r12
O
Olof Johansson 已提交
436 437
	bctrl

438
3:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
439 440 441 442 443 444 445
	lwarx	r4,0,r3
	subi	r4,r4,1
	stwcx.	r4,0,r3
	bne	3b
	isync

4:	HMT_LOW
446 447 448
	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
					/* start.			 */
	cmpwi	0,r23,0
449
	beq	4b			/* Loop until told to go	 */
450 451

	sync				/* order paca.run and cur_cpu_spec */
452
	isync				/* In case code patching happened */
453

454
	/* Create a temp kernel stack for use before relocation is on.	*/
455 456 457
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

458
	b	__secondary_start
459
#endif /* SMP */
460

461 462 463 464
/*
 * Turn the MMU off.
 * Assumes we're mapped EA == RA if the MMU is on.
 */
465
#ifdef CONFIG_PPC_BOOK3S
466
__mmu_off:
467 468 469
	mfmsr	r3
	andi.	r0,r3,MSR_IR|MSR_DR
	beqlr
470
	mflr	r4
471 472 473 474 475 476
	andc	r3,r3,r0
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	sync
	rfid
	b	.	/* prevent speculative execution */
477
#endif
478 479 480 481 482 483 484 485 486 487 488 489 490


/*
 * Here is our main kernel entry point. We support currently 2 kind of entries
 * depending on the value of r5.
 *
 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
 *                 in r3...r7
 *   
 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
 *                 DT block, r4 is a physical pointer to the kernel itself
 *
 */
491
__start_initialization_multiplatform:
492
	/* Make sure we are running in 64 bits mode */
493
	bl	enable_64b_mode
494 495

	/* Get TOC pointer (current runtime address) */
496
	bl	relative_toc
497 498 499 500 501 502 503

	/* find out where we are now */
	bcl	20,31,$+4
0:	mflr	r26			/* r26 = runtime addr here */
	addis	r26,r26,(_stext - 0b)@ha
	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */

504 505 506 507
	/*
	 * Are we booted from a PROM Of-type client-interface ?
	 */
	cmpldi	cr0,r5,0
508
	beq	1f
509
	b	__boot_from_prom		/* yes -> prom */
510
1:
511 512 513
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
514 515 516 517 518
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Save OPAL entry */
	mr	r28,r8
	mr	r29,r9
#endif
519

520
#ifdef CONFIG_PPC_BOOK3E
521 522
	bl	start_initialization_book3e
	b	__after_prom_start
523
#else
524
	/* Setup some critical 970 SPRs before switching MMU off */
O
Olof Johansson 已提交
525 526 527 528 529 530 531
	mfspr	r0,SPRN_PVR
	srwi	r0,r0,16
	cmpwi	r0,0x39		/* 970 */
	beq	1f
	cmpwi	r0,0x3c		/* 970FX */
	beq	1f
	cmpwi	r0,0x44		/* 970MP */
532 533
	beq	1f
	cmpwi	r0,0x45		/* 970GX */
O
Olof Johansson 已提交
534
	bne	2f
535
1:	bl	__cpu_preinit_ppc970
O
Olof Johansson 已提交
536
2:
537

538
	/* Switch off MMU if not already off */
539 540
	bl	__mmu_off
	b	__after_prom_start
541
#endif /* CONFIG_PPC_BOOK3E */
542

543
__boot_from_prom:
544
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
545 546 547 548 549 550 551
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
	mr	r29,r5
	mr	r28,r6
	mr	r27,r7

552 553 554
	/*
	 * Align the stack to 16-byte boundary
	 * Depending on the size and layout of the ELF sections in the initial
555
	 * boot binary, the stack pointer may be unaligned on PowerMac
556
	 */
557 558
	rldicr	r1,r1,0,59

559 560 561
#ifdef CONFIG_RELOCATABLE
	/* Relocate code for where we are now */
	mr	r3,r26
562
	bl	relocate
563 564
#endif

565 566 567 568 569 570 571 572
	/* Restore parameters */
	mr	r3,r31
	mr	r4,r30
	mr	r5,r29
	mr	r6,r28
	mr	r7,r27

	/* Do all of the interaction with OF client interface */
573
	mr	r8,r26
574
	bl	prom_init
575 576 577 578
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */

	/* We never return. We also hit that trap if trying to boot
	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
579 580
	trap

581
__after_prom_start:
582 583 584 585
#ifdef CONFIG_RELOCATABLE
	/* process relocations for the final address of the kernel */
	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
	sldi	r25,r25,32
586 587 588
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
#endif
589
	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
590 591 592
#if defined(CONFIG_PPC_BOOK3E)
	tophys(r26,r26)
#endif
593
	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
594 595 596
	bne	1f
	add	r25,r25,r26
1:	mr	r3,r25
597
	bl	relocate
598 599 600 601
#if defined(CONFIG_PPC_BOOK3E)
	/* IVPR needs to be set after relocation. */
	bl	init_core_book3e
#endif
602
#endif
603 604

/*
605
 * We need to run with _stext at physical address PHYSICAL_START.
606 607 608 609 610
 * This will leave some code in the first 256B of
 * real memory, which are reserved for software use.
 *
 * Note: This process overwrites the OF exception vectors.
 */
611
	li	r3,0			/* target addr */
612
#ifdef CONFIG_PPC_BOOK3E
613
	tovirt(r3,r3)		/* on booke, we already run at PAGE_OFFSET */
614
#endif
615
	mr.	r4,r26			/* In some cases the loader may  */
616 617 618
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r4,r4)
#endif
619
	beq	9f			/* have already put us at zero */
620 621 622
	li	r6,0x100		/* Start offset, the first 0x100 */
					/* bytes were copied earlier.	 */

623
#ifdef CONFIG_RELOCATABLE
624 625
/*
 * Check if the kernel has to be running as relocatable kernel based on the
626
 * variable __run_at_load, if it is set the kernel is treated as relocatable
627 628
 * kernel, otherwise it will be moved to PHYSICAL_START
 */
629 630 631
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
#endif
632
	lwz	r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
633
	cmplwi	cr0,r7,1
634 635
	bne	3f

636 637 638 639 640
#ifdef CONFIG_PPC_BOOK3E
	LOAD_REG_ADDR(r5, __end_interrupts)
	LOAD_REG_ADDR(r11, _stext)
	sub	r5,r5,r11
#else
641
	/* just copy interrupts */
642
	LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
643
#endif
644 645 646
	b	5f
3:
#endif
647 648 649
	/* # bytes of memory to copy */
	lis	r5,(ABS_ADDR(copy_to_here))@ha
	addi	r5,r5,(ABS_ADDR(copy_to_here))@l
650

651
	bl	copy_and_flush		/* copy the first n bytes	 */
652 653
					/* this includes the code being	 */
					/* executed here.		 */
654 655 656
	/* Jump to the copy of this code that we just made */
	addis	r8,r3,(ABS_ADDR(4f))@ha
	addi	r12,r8,(ABS_ADDR(4f))@l
657
	mtctr	r12
658 659
	bctr

A
Anton Blanchard 已提交
660
.balign 8
661
p_end: .8byte _end - copy_to_here
662

663 664 665 666 667
4:
	/*
	 * Now copy the rest of the kernel up to _end, add
	 * _end - copy_to_here to the copy limit and run again.
	 */
668 669
	addis   r8,r26,(ABS_ADDR(p_end))@ha
	ld      r8,(ABS_ADDR(p_end))@l(r8)
670
	add	r5,r5,r8
671
5:	bl	copy_and_flush		/* copy the rest */
672

673
9:	b	start_here_multiplatform
674

675 676 677 678 679 680 681 682 683 684 685
/*
 * Copy routine used to copy the kernel to start at physical address 0
 * and flush and invalidate the caches as needed.
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 *
 * Note: this routine *only* clobbers r0, r6 and lr
 */
_GLOBAL(copy_and_flush)
	addi	r5,r5,-8
	addi	r6,r6,-8
686
4:	li	r0,8			/* Use the smallest common	*/
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
					/* denominator cache line	*/
					/* size.  This results in	*/
					/* extra cache line flushes	*/
					/* but operation is correct.	*/
					/* Can't get cache line size	*/
					/* from NACA as it is being	*/
					/* moved too.			*/

	mtctr	r0			/* put # words/line in ctr	*/
3:	addi	r6,r6,8			/* copy a cache line		*/
	ldx	r0,r6,r4
	stdx	r0,r6,r3
	bdnz	3b
	dcbst	r6,r3			/* write it to memory		*/
	sync
	icbi	r6,r3			/* flush the icache line	*/
	cmpld	0,r6,r5
	blt	4b
	sync
	addi	r5,r5,8
	addi	r6,r6,8
708
	isync
709 710 711 712 713 714 715 716 717 718 719 720 721 722
	blr

.align 8
copy_to_here:

#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_PMAC
/*
 * On PowerMac, secondary processors starts from the reset vector, which
 * is temporarily turned into a call to one of the functions below.
 */
	.section ".text";
	.align 2 ;

723 724 725 726 727 728 729 730 731 732 733
	.globl	__secondary_start_pmac_0
__secondary_start_pmac_0:
	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
	li	r24,0
	b	1f
	li	r24,1
	b	1f
	li	r24,2
	b	1f
	li	r24,3
1:
734 735 736
	
_GLOBAL(pmac_secondary_start)
	/* turn on 64-bit mode */
737
	bl	enable_64b_mode
738

739 740 741 742 743 744 745 746 747
	li	r0,0
	mfspr	r3,SPRN_HID4
	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
	sync
	mtspr	SPRN_HID4,r3
	isync
	sync
	slbia

748
	/* get TOC pointer (real address) */
749
	bl	relative_toc
750
	tovirt(r2,r2)
751

752
	/* Copy some CPU settings from CPU 0 */
753
	bl	__restore_cpu_ppc970
754 755 756 757 758 759 760

	/* pSeries do that early though I don't think we really need it */
	mfmsr	r3
	ori	r3,r3,MSR_RI
	mtmsrd	r3			/* RI on */

	/* Set up a paca value for this processor. */
761 762 763 764
	LOAD_REG_ADDR(r4,paca_ptrs)	/* Load paca pointer		*/
	ld	r4,0(r4)		/* Get base vaddr of paca_ptrs array */
	sldi	r5,r24,3		/* get paca_ptrs[] index from cpu id */
	ldx	r13,r5,r4		/* r13 = paca_ptrs[cpu id]       */
765
	SET_PACA(r13)			/* Save vaddr of paca in an SPRG*/
766

767 768 769
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
770
	li	r0,IRQS_DISABLED
771
	stb	r0,PACAIRQSOFTMASK(r13)
772 773
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
774

775 776 777 778
	/* Create a temp kernel stack for use before relocation is on.	*/
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

779
	b	__secondary_start
780 781 782 783 784 785 786 787 788 789 790

#endif /* CONFIG_PPC_PMAC */

/*
 * This function is called after the master CPU has released the
 * secondary processors.  The execution environment is relocation off.
 * The paca for this processor has the following fields initialized at
 * this point:
 *   1. Processor number
 *   2. Segment table pointer (virtual address)
 * On entry the following are set:
791
 *   r1	       = stack pointer (real addr of temp stack)
792 793 794
 *   r24       = cpu# (in Linux terms)
 *   r13       = paca virtual address
 *   SPRG_PACA = paca virtual address
795
 */
796 797 798
	.section ".text";
	.align 2 ;

799
	.globl	__secondary_start
800
__secondary_start:
801 802
	/* Set thread priority to MEDIUM */
	HMT_MEDIUM
803

804 805 806 807 808
	/*
	 * Do early setup for this CPU, in particular initialising the MMU so we
	 * can turn it on below. This is a call to C, which is OK, we're still
	 * running on the emergency stack.
	 */
809
	bl	early_setup_secondary
810

811
	/*
812 813 814
	 * The primary has initialized our kernel stack for us in the paca, grab
	 * it and put it in r1. We must *not* use it until we turn on the MMU
	 * below, because it may not be inside the RMO.
815
	 */
816
	ld	r1, PACAKSAVE(r13)
817

818
	/* Clear backchain so we get nice backtraces */
819 820 821
	li	r7,0
	mtlr	r7

822 823 824
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
825
	li	r7,IRQS_DISABLED
826
	stb	r7,PACAIRQSOFTMASK(r13)
827 828
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
829

830
	/* enable MMU and jump to start_secondary */
831
	LOAD_REG_ADDR(r3, start_secondary_prolog)
832
	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
833

834 835
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
836
	RFI
837 838 839 840
	b	.	/* prevent speculative execution */

/* 
 * Running with relocation on at this point.  All we want to do is
841 842
 * zero the stack back-chain pointer and get the TOC virtual address
 * before going into C code.
843
 */
844
start_secondary_prolog:
845
	ld	r2,PACATOC(r13)
846 847
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
848
	bl	start_secondary
849
	b	.
850 851 852 853 854 855 856 857 858
/*
 * Reset stack pointer and call start_secondary
 * to continue with online operation when woken up
 * from cede in cpu offline.
 */
_GLOBAL(start_secondary_resume)
	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
859
	bl	start_secondary
860
	b	.
861 862 863 864 865
#endif

/*
 * This subroutine clobbers r11 and r12
 */
866
enable_64b_mode:
867
	mfmsr	r11			/* grab the current MSR */
868 869 870 871
#ifdef CONFIG_PPC_BOOK3E
	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
	mtmsr	r11
#else /* CONFIG_PPC_BOOK3E */
872
	li	r12,(MSR_64BIT | MSR_ISF)@highest
873
	sldi	r12,r12,48
874 875 876
	or	r11,r11,r12
	mtmsrd	r11
	isync
877
#endif
878 879
	blr

880 881 882 883
/*
 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
 * by the toolchain).  It computes the correct value for wherever we
 * are running at the moment, using position-independent code.
884 885 886 887 888 889 890
 *
 * Note: The compiler constructs pointers using offsets from the
 * TOC in -mcmodel=medium mode. After we relocate to 0 but before
 * the MMU is on we need our TOC to be a virtual address otherwise
 * these pointers will be real addresses which may get stored and
 * accessed later with the MMU on. We use tovirt() at the call
 * sites to handle this.
891 892 893 894
 */
_GLOBAL(relative_toc)
	mflr	r0
	bcl	20,31,$+4
895 896 897
0:	mflr	r11
	ld	r2,(p_toc - 0b)(r11)
	add	r2,r2,r11
898 899 900
	mtlr	r0
	blr

A
Anton Blanchard 已提交
901
.balign 8
902
p_toc:	.8byte	__toc_start + 0x8000 - 0b
903

904 905 906
/*
 * This is where the main kernel code starts.
 */
907
start_here_multiplatform:
908
	/* set up the TOC */
909
	bl      relative_toc
910
	tovirt(r2,r2)
911 912 913 914 915 916

	/* Clear out the BSS. It may have been done in prom_init,
	 * already but that's irrelevant since prom_init will soon
	 * be detached from the kernel completely. Besides, we need
	 * to clear it now for kexec-style entry.
	 */
917 918
	LOAD_REG_ADDR(r11,__bss_stop)
	LOAD_REG_ADDR(r8,__bss_start)
919 920
	sub	r11,r11,r8		/* bss size			*/
	addi	r11,r11,7		/* round up to an even double word */
921
	srdi.	r11,r11,3		/* shift right by 3		*/
922 923 924 925 926 927 928 929
	beq	4f
	addi	r8,r8,-8
	li	r0,0
	mtctr	r11			/* zero this many doublewords	*/
3:	stdu	r0,8(r8)
	bdnz	3b
4:

930 931
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Setup OPAL entry */
932
	LOAD_REG_ADDR(r11, opal)
933 934 935 936
	std	r28,0(r11);
	std	r29,8(r11);
#endif

937
#ifndef CONFIG_PPC_BOOK3E
938 939 940
	mfmsr	r6
	ori	r6,r6,MSR_RI
	mtmsrd	r6			/* RI on */
941
#endif
942

943 944 945 946 947 948 949
#ifdef CONFIG_RELOCATABLE
	/* Save the physical address we're running at in kernstart_addr */
	LOAD_REG_ADDR(r4, kernstart_addr)
	clrldi	r0,r25,2
	std	r0,0(r4)
#endif

950
	/* The following gets the stack set up with the regs */
951 952 953 954 955
	/* pointing to the real addr of the kernel stack.  This is   */
	/* all done to support the C function call below which sets  */
	/* up the htab.  This is done because we have relocated the  */
	/* kernel but are still running in real mode. */

956
	LOAD_REG_ADDR(r3,init_thread_union)
957

958
	/* set up a stack pointer */
959 960
	LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
	add	r1,r3,r1
961 962 963
	li	r0,0
	stdu	r0,-STACK_FRAME_OVERHEAD(r1)

M
Michael Ellerman 已提交
964 965 966 967
	/*
	 * Do very early kernel initializations, including initial hash table
	 * and SLB setup before we turn on relocation.
	 */
968 969 970

	/* Restore parameters passed from prom_init/kexec */
	mr	r3,r31
971 972 973
	LOAD_REG_ADDR(r12, DOTSYM(early_setup))
	mtctr	r12
	bctrl		/* also sets r13 and SPRG_PACA */
974

975
	LOAD_REG_ADDR(r3, start_here_common)
976
	ld	r4,PACAKMSR(r13)
977 978
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
979
	RFI
980
	b	.	/* prevent speculative execution */
981

982
	/* This is where all platforms converge execution */
983 984

start_here_common:
985
	/* relocation is on at this point */
986
	std	r1,PACAKSAVE(r13)
987

988
	/* Load the TOC (virtual address) */
989 990
	ld	r2,PACATOC(r13)

991 992 993
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
994
	li	r0,IRQS_DISABLED
995
	stb	r0,PACAIRQSOFTMASK(r13)
996 997
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
998

999
	/* Generic kernel entry */
1000
	bl	start_kernel
1001

1002 1003
	/* Not reached */
	BUG_OPCODE
1004 1005 1006 1007 1008 1009

/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the bss, which is page-aligned.
 */
	.section ".bss"
1010 1011 1012 1013 1014
/*
 * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
 * We will need to find a better way to fix this
 */
	.align	16
1015

1016 1017 1018
	.globl	swapper_pg_dir
swapper_pg_dir:
	.space	PGD_TABLE_SIZE
1019 1020 1021 1022

	.globl	empty_zero_page
empty_zero_page:
	.space	PAGE_SIZE
A
Al Viro 已提交
1023
EXPORT_SYMBOL(empty_zero_page)