head_64.S 24.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 *  Adapted for Power Macintosh by Paul Mackerras.
 *  Low-level exception handlers and MMU support
 *  rewritten by Paul Mackerras.
 *    Copyright (C) 1996 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
 *
15 16 17
 *  This file contains the entry point for the 64-bit kernel along
 *  with some early initialization code common to all 64-bit powerpc
 *  variants.
18 19 20 21 22 23 24 25
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/threads.h>
26
#include <linux/init.h>
27
#include <asm/reg.h>
28 29 30 31 32 33 34 35
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
36
#include <asm/thread_info.h>
37
#include <asm/firmware.h>
38
#include <asm/page_64.h>
39
#include <asm/irqflags.h>
40
#include <asm/kvm_book3s_asm.h>
41
#include <asm/ptrace.h>
42
#include <asm/hw_irq.h>
43
#include <asm/cputhreads.h>
44
#include <asm/ppc-opcode.h>
45

L
Lucas De Marchi 已提交
46
/* The physical memory is laid out such that the secondary processor
47 48
 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
 * using the layout described in exceptions-64s.S
49 50 51 52
 */

/*
 * Entering into this code we make the following assumptions:
53 54
 *
 *  For pSeries or server processors:
55 56
 *   1. The MMU is off & open firmware is running in real mode.
 *   2. The kernel is entered at __start
57 58
 * -or- For OPAL entry:
 *   1. The MMU is off, processor in HV mode, primary CPU enters at 0
59 60
 *      with device-tree in gpr3. We also get OPAL base in r8 and
 *	entry in r9 for debugging purposes
61
 *   2. Secondary processors enter at 0x60 with PIR in gpr3
62
 *
63 64 65
 *  For Book3E processors:
 *   1. The MMU is on running in AS0 in a state defined in ePAPR
 *   2. The kernel is entered at __start
66 67 68 69 70 71 72 73
 */

	.text
	.globl  _stext
_stext:
_GLOBAL(__start)
	/* NOP this out unconditionally */
BEGIN_FTR_SECTION
74
	FIXUP_ENDIAN
75
	b	__start_initialization_multiplatform
76 77 78 79 80
END_FTR_SECTION(0, 1)

	/* Catch branch to 0 in real mode */
	trap

81 82 83
	/* Secondary processors spin on this value until it becomes non-zero.
	 * When non-zero, it contains the real address of the function the cpu
	 * should jump to.
84
	 */
85
	.balign 8
86 87 88 89 90 91 92 93 94 95
	.globl  __secondary_hold_spinloop
__secondary_hold_spinloop:
	.llong	0x0

	/* Secondary processors write this value with their cpu # */
	/* after they enter the spin loop immediately below.	  */
	.globl	__secondary_hold_acknowledge
__secondary_hold_acknowledge:
	.llong	0x0

96
#ifdef CONFIG_RELOCATABLE
97 98 99 100 101 102 103 104 105 106 107 108 109
	/* This flag is set to 1 by a loader if the kernel should run
	 * at the loaded address instead of the linked address.  This
	 * is used by kexec-tools to keep the the kdump kernel in the
	 * crash_kernel region.  The loader is responsible for
	 * observing the alignment requirement.
	 */
	/* Do not move this variable as kexec-tools knows about it. */
	. = 0x5c
	.globl	__run_at_load
__run_at_load:
	.long	0x72756e30	/* "run0" -- relocate to 0 by default */
#endif

110 111
	. = 0x60
/*
112 113
 * The following code is used to hold secondary processors
 * in a spin loop after they have entered the kernel, but
114 115 116
 * before the bulk of the kernel has been relocated.  This code
 * is relocated to physical address 0x60 before prom_init is run.
 * All of it must fit below the first exception vector at 0x100.
117 118
 * Use .globl here not _GLOBAL because we want __secondary_hold
 * to be the actual text address, not a descriptor.
119
 */
120 121
	.globl	__secondary_hold
__secondary_hold:
122
	FIXUP_ENDIAN
123
#ifndef CONFIG_PPC_BOOK3E
124 125 126
	mfmsr	r24
	ori	r24,r24,MSR_RI
	mtmsrd	r24			/* RI on */
127
#endif
128
	/* Grab our physical cpu number */
129
	mr	r24,r3
130 131
	/* stash r4 for book3e */
	mr	r25,r4
132 133 134 135

	/* Tell the master cpu we're here */
	/* Relocation is off & we are located at an address less */
	/* than 0x100, so only need to grab low order offset.    */
136
	std	r24,__secondary_hold_acknowledge-_stext(0)
137 138
	sync

139 140 141 142
	li	r26,0
#ifdef CONFIG_PPC_BOOK3E
	tovirt(r26,r26)
#endif
143
	/* All secondary cpus wait here until told to start. */
144 145
100:	ld	r12,__secondary_hold_spinloop-_stext(r26)
	cmpdi	0,r12,0
146
	beq	100b
147

148
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
149
#ifdef CONFIG_PPC_BOOK3E
150 151 152
	tovirt(r12,r12)
#endif
	mtctr	r12
153
	mr	r3,r24
154 155 156 157 158 159 160
	/*
	 * it may be the case that other platforms have r4 right to
	 * begin with, this gives us some safety in case it is not
	 */
#ifdef CONFIG_PPC_BOOK3E
	mr	r4,r25
#else
161
	li	r4,0
162
#endif
163 164
	/* Make sure that patched code is visible */
	isync
165
	bctr
166 167 168 169 170 171 172 173 174 175 176
#else
	BUG_OPCODE
#endif

/* This value is used to mark exception frames on the stack. */
	.section ".toc","aw"
exception_marker:
	.tc	ID_72656773_68657265[TC],0x7265677368657265
	.text

/*
177 178 179
 * On server, we include the exception vectors code here as it
 * relies on absolute addressing which is only possible within
 * this compilation unit
180
 */
181 182
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
183
#endif
184

185
#ifdef CONFIG_PPC_BOOK3E
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * The booting_thread_hwid holds the thread id we want to boot in cpu
 * hotplug case. It is set by cpu hotplug code, and is invalid by default.
 * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
 * bit field.
 */
	.globl	booting_thread_hwid
booting_thread_hwid:
	.long  INVALID_THREAD_HWID
	.align 3
/*
 * start a thread in the same core
 * input parameters:
 * r3 = the thread physical id
 * r4 = the entry point where thread starts
 */
_GLOBAL(book3e_start_thread)
	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
	cmpi	0, r3, 0
	beq	10f
	cmpi	0, r3, 1
	beq	11f
	/* If the thread id is invalid, just exit. */
	b	13f
10:
211 212
	MTTMR(TMRN_IMSR0, 5)
	MTTMR(TMRN_INIA0, 4)
213 214
	b	12f
11:
215 216
	MTTMR(TMRN_IMSR1, 5)
	MTTMR(TMRN_INIA1, 4)
217 218 219 220 221 222 223 224
12:
	isync
	li	r6, 1
	sld	r6, r6, r3
	mtspr	SPRN_TENS, r6
13:
	blr

C
chenhui zhao 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/*
 * stop a thread in the same core
 * input parameter:
 * r3 = the thread physical id
 */
_GLOBAL(book3e_stop_thread)
	cmpi	0, r3, 0
	beq	10f
	cmpi	0, r3, 1
	beq	10f
	/* If the thread id is invalid, just exit. */
	b	13f
10:
	li	r4, 1
	sld	r4, r4, r3
	mtspr	SPRN_TENC, r4
13:
	blr

244
_GLOBAL(fsl_secondary_thread_init)
245 246
	mfspr	r4,SPRN_BUCSR

247 248 249 250 251 252 253 254 255 256 257 258 259 260
	/* Enable branch prediction */
	lis     r3,BUCSR_INIT@h
	ori     r3,r3,BUCSR_INIT@l
	mtspr   SPRN_BUCSR,r3
	isync

	/*
	 * Fix PIR to match the linear numbering in the device tree.
	 *
	 * On e6500, the reset value of PIR uses the low three bits for
	 * the thread within a core, and the upper bits for the core
	 * number.  There are two threads per core, so shift everything
	 * but the low bit right by two bits so that the cpu numbering is
	 * continuous.
261 262 263 264 265 266 267 268 269 270
	 *
	 * If the old value of BUCSR is non-zero, this thread has run
	 * before.  Thus, we assume we are coming from kexec or a similar
	 * scenario, and PIR is already set to the correct value.  This
	 * is a bit of a hack, but there are limited opportunities for
	 * getting information into the thread and the alternatives
	 * seemed like they'd be overkill.  We can't tell just by looking
	 * at the old PIR value which state it's in, since the same value
	 * could be valid for one thread out of reset and for a different
	 * thread in Linux.
271
	 */
272

273
	mfspr	r3, SPRN_PIR
274 275
	cmpwi	r4,0
	bne	1f
276 277
	rlwimi	r3, r3, 30, 2, 30
	mtspr	SPRN_PIR, r3
278
1:
279 280
#endif

281 282 283 284
_GLOBAL(generic_secondary_thread_init)
	mr	r24,r3

	/* turn on 64-bit mode */
285
	bl	enable_64b_mode
286 287

	/* get a valid TOC pointer, wherever we're mapped at */
288
	bl	relative_toc
289
	tovirt(r2,r2)
290 291 292 293

#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
294
	bl	book3e_secondary_thread_init
295 296
#endif
	b	generic_secondary_common_init
297 298

/*
O
Olof Johansson 已提交
299 300
 * On pSeries and most other platforms, secondary processors spin
 * in the following code.
301
 * At entry, r3 = this processor's number (physical cpu id)
302 303 304 305
 *
 * On Book3E, r4 = 1 to indicate that the initial TLB entry for
 * this core already exists (setup via some other mechanism such
 * as SCOM before entry).
306
 */
O
Olof Johansson 已提交
307
_GLOBAL(generic_secondary_smp_init)
308
	FIXUP_ENDIAN
309
	mr	r24,r3
310 311
	mr	r25,r4

312
	/* turn on 64-bit mode */
313
	bl	enable_64b_mode
314

315
	/* get a valid TOC pointer, wherever we're mapped at */
316
	bl	relative_toc
317
	tovirt(r2,r2)
318

319 320 321 322
#ifdef CONFIG_PPC_BOOK3E
	/* Book3E initialization */
	mr	r3,r24
	mr	r4,r25
323
	bl	book3e_secondary_core_init
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361

/*
 * After common core init has finished, check if the current thread is the
 * one we wanted to boot. If not, start the specified thread and stop the
 * current thread.
 */
	LOAD_REG_ADDR(r4, booting_thread_hwid)
	lwz     r3, 0(r4)
	li	r5, INVALID_THREAD_HWID
	cmpw	r3, r5
	beq	20f

	/*
	 * The value of booting_thread_hwid has been stored in r3,
	 * so make it invalid.
	 */
	stw	r5, 0(r4)

	/*
	 * Get the current thread id and check if it is the one we wanted.
	 * If not, start the one specified in booting_thread_hwid and stop
	 * the current thread.
	 */
	mfspr	r8, SPRN_TIR
	cmpw	r3, r8
	beq	20f

	/* start the specified thread */
	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
	ld	r4, 0(r5)
	bl	book3e_start_thread

	/* stop the current thread */
	mr	r3, r8
	bl	book3e_stop_thread
10:
	b	10b
20:
362 363 364
#endif

generic_secondary_common_init:
365 366 367 368
	/* Set up a paca value for this processor. Since we have the
	 * physical cpu id in r24, we need to search the pacas to find
	 * which logical id maps to our physical one.
	 */
369 370
	LOAD_REG_ADDR(r13, paca)	/* Load paca pointer		 */
	ld	r13,0(r13)		/* Get base vaddr of paca array	 */
371 372
#ifndef CONFIG_SMP
	addi	r13,r13,PACA_SIZE	/* know r13 if used accidentally */
373
	b	kexec_wait		/* wait for next kernel if !SMP	 */
374 375 376
#else
	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
	lwz	r7,0(r7)		/* also the max paca allocated 	 */
377 378 379 380 381 382
	li	r5,0			/* logical cpu id                */
1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
	cmpw	r6,r24			/* Compare to our id             */
	beq	2f
	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
	addi	r5,r5,1
383
	cmpw	r5,r7			/* Check if more pacas exist     */
384 385 386
	blt	1b

	mr	r3,r24			/* not found, copy phys to r3	 */
387
	b	kexec_wait		/* next kernel might do better	 */
388

389
2:	SET_PACA(r13)
390 391 392 393 394
#ifdef CONFIG_PPC_BOOK3E
	addi	r12,r13,PACA_EXTLB	/* and TLB exc frame in another  */
	mtspr	SPRN_SPRG_TLB_EXFRAME,r12
#endif

395 396
	/* From now on, r24 is expected to be logical cpuid */
	mr	r24,r5
397

O
Olof Johansson 已提交
398
	/* See if we need to call a cpu state restore handler */
399
	LOAD_REG_ADDR(r23, cur_cpu_spec)
O
Olof Johansson 已提交
400
	ld	r23,0(r23)
401 402
	ld	r12,CPU_SPEC_RESTORE(r23)
	cmpdi	0,r12,0
403
	beq	3f
404
#ifdef PPC64_ELF_ABI_v1
405 406
	ld	r12,0(r12)
#endif
407
	mtctr	r12
O
Olof Johansson 已提交
408 409
	bctrl

410
3:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
411 412 413 414 415 416 417
	lwarx	r4,0,r3
	subi	r4,r4,1
	stwcx.	r4,0,r3
	bne	3b
	isync

4:	HMT_LOW
418 419 420
	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
					/* start.			 */
	cmpwi	0,r23,0
421
	beq	4b			/* Loop until told to go	 */
422 423

	sync				/* order paca.run and cur_cpu_spec */
424
	isync				/* In case code patching happened */
425

426
	/* Create a temp kernel stack for use before relocation is on.	*/
427 428 429
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

430
	b	__secondary_start
431
#endif /* SMP */
432

433 434 435 436
/*
 * Turn the MMU off.
 * Assumes we're mapped EA == RA if the MMU is on.
 */
437
#ifdef CONFIG_PPC_BOOK3S
438
__mmu_off:
439 440 441
	mfmsr	r3
	andi.	r0,r3,MSR_IR|MSR_DR
	beqlr
442
	mflr	r4
443 444 445 446 447 448
	andc	r3,r3,r0
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r3
	sync
	rfid
	b	.	/* prevent speculative execution */
449
#endif
450 451 452 453 454 455 456 457 458 459 460 461 462


/*
 * Here is our main kernel entry point. We support currently 2 kind of entries
 * depending on the value of r5.
 *
 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
 *                 in r3...r7
 *   
 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
 *                 DT block, r4 is a physical pointer to the kernel itself
 *
 */
463
__start_initialization_multiplatform:
464
	/* Make sure we are running in 64 bits mode */
465
	bl	enable_64b_mode
466 467

	/* Get TOC pointer (current runtime address) */
468
	bl	relative_toc
469 470 471 472 473 474 475

	/* find out where we are now */
	bcl	20,31,$+4
0:	mflr	r26			/* r26 = runtime addr here */
	addis	r26,r26,(_stext - 0b)@ha
	addi	r26,r26,(_stext - 0b)@l	/* current runtime base addr */

476 477 478 479
	/*
	 * Are we booted from a PROM Of-type client-interface ?
	 */
	cmpldi	cr0,r5,0
480
	beq	1f
481
	b	__boot_from_prom		/* yes -> prom */
482
1:
483 484 485
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
486 487 488 489 490
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Save OPAL entry */
	mr	r28,r8
	mr	r29,r9
#endif
491

492
#ifdef CONFIG_PPC_BOOK3E
493 494
	bl	start_initialization_book3e
	b	__after_prom_start
495
#else
496
	/* Setup some critical 970 SPRs before switching MMU off */
O
Olof Johansson 已提交
497 498 499 500 501 502 503
	mfspr	r0,SPRN_PVR
	srwi	r0,r0,16
	cmpwi	r0,0x39		/* 970 */
	beq	1f
	cmpwi	r0,0x3c		/* 970FX */
	beq	1f
	cmpwi	r0,0x44		/* 970MP */
504 505
	beq	1f
	cmpwi	r0,0x45		/* 970GX */
O
Olof Johansson 已提交
506
	bne	2f
507
1:	bl	__cpu_preinit_ppc970
O
Olof Johansson 已提交
508
2:
509

510
	/* Switch off MMU if not already off */
511 512
	bl	__mmu_off
	b	__after_prom_start
513
#endif /* CONFIG_PPC_BOOK3E */
514

515
__boot_from_prom:
516
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
517 518 519 520 521 522 523
	/* Save parameters */
	mr	r31,r3
	mr	r30,r4
	mr	r29,r5
	mr	r28,r6
	mr	r27,r7

524 525 526
	/*
	 * Align the stack to 16-byte boundary
	 * Depending on the size and layout of the ELF sections in the initial
527
	 * boot binary, the stack pointer may be unaligned on PowerMac
528
	 */
529 530
	rldicr	r1,r1,0,59

531 532 533
#ifdef CONFIG_RELOCATABLE
	/* Relocate code for where we are now */
	mr	r3,r26
534
	bl	relocate
535 536
#endif

537 538 539 540 541 542 543 544
	/* Restore parameters */
	mr	r3,r31
	mr	r4,r30
	mr	r5,r29
	mr	r6,r28
	mr	r7,r27

	/* Do all of the interaction with OF client interface */
545
	mr	r8,r26
546
	bl	prom_init
547 548 549 550
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */

	/* We never return. We also hit that trap if trying to boot
	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
551 552
	trap

553
__after_prom_start:
554 555 556 557
#ifdef CONFIG_RELOCATABLE
	/* process relocations for the final address of the kernel */
	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
	sldi	r25,r25,32
558 559 560
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
#endif
561
	lwz	r7,__run_at_load-_stext(r26)
562 563 564
#if defined(CONFIG_PPC_BOOK3E)
	tophys(r26,r26)
#endif
565
	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
566 567 568
	bne	1f
	add	r25,r25,r26
1:	mr	r3,r25
569
	bl	relocate
570 571 572 573
#if defined(CONFIG_PPC_BOOK3E)
	/* IVPR needs to be set after relocation. */
	bl	init_core_book3e
#endif
574
#endif
575 576

/*
577
 * We need to run with _stext at physical address PHYSICAL_START.
578 579 580 581 582
 * This will leave some code in the first 256B of
 * real memory, which are reserved for software use.
 *
 * Note: This process overwrites the OF exception vectors.
 */
583
	li	r3,0			/* target addr */
584
#ifdef CONFIG_PPC_BOOK3E
585
	tovirt(r3,r3)		/* on booke, we already run at PAGE_OFFSET */
586
#endif
587
	mr.	r4,r26			/* In some cases the loader may  */
588 589 590
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r4,r4)
#endif
591
	beq	9f			/* have already put us at zero */
592 593 594
	li	r6,0x100		/* Start offset, the first 0x100 */
					/* bytes were copied earlier.	 */

595
#ifdef CONFIG_RELOCATABLE
596 597
/*
 * Check if the kernel has to be running as relocatable kernel based on the
598
 * variable __run_at_load, if it is set the kernel is treated as relocatable
599 600
 * kernel, otherwise it will be moved to PHYSICAL_START
 */
601 602 603
#if defined(CONFIG_PPC_BOOK3E)
	tovirt(r26,r26)		/* on booke, we already run at PAGE_OFFSET */
#endif
604 605
	lwz	r7,__run_at_load-_stext(r26)
	cmplwi	cr0,r7,1
606 607
	bne	3f

608 609 610 611 612
#ifdef CONFIG_PPC_BOOK3E
	LOAD_REG_ADDR(r5, __end_interrupts)
	LOAD_REG_ADDR(r11, _stext)
	sub	r5,r5,r11
#else
613 614
	/* just copy interrupts */
	LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
615
#endif
616 617 618 619 620 621
	b	5f
3:
#endif
	lis	r5,(copy_to_here - _stext)@ha
	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */

622
	bl	copy_and_flush		/* copy the first n bytes	 */
623 624
					/* this includes the code being	 */
					/* executed here.		 */
625
	addis	r8,r3,(4f - _stext)@ha	/* Jump to the copy of this code */
626 627
	addi	r12,r8,(4f - _stext)@l	/* that we just made */
	mtctr	r12
628 629
	bctr

A
Anton Blanchard 已提交
630
.balign 8
631 632
p_end:	.llong	_end - _stext

633 634 635
4:	/* Now copy the rest of the kernel up to _end */
	addis	r5,r26,(p_end - _stext)@ha
	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
636
5:	bl	copy_and_flush		/* copy the rest */
637

638
9:	b	start_here_multiplatform
639

640 641 642 643 644 645 646 647 648 649 650
/*
 * Copy routine used to copy the kernel to start at physical address 0
 * and flush and invalidate the caches as needed.
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 *
 * Note: this routine *only* clobbers r0, r6 and lr
 */
_GLOBAL(copy_and_flush)
	addi	r5,r5,-8
	addi	r6,r6,-8
651
4:	li	r0,8			/* Use the smallest common	*/
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
					/* denominator cache line	*/
					/* size.  This results in	*/
					/* extra cache line flushes	*/
					/* but operation is correct.	*/
					/* Can't get cache line size	*/
					/* from NACA as it is being	*/
					/* moved too.			*/

	mtctr	r0			/* put # words/line in ctr	*/
3:	addi	r6,r6,8			/* copy a cache line		*/
	ldx	r0,r6,r4
	stdx	r0,r6,r3
	bdnz	3b
	dcbst	r6,r3			/* write it to memory		*/
	sync
	icbi	r6,r3			/* flush the icache line	*/
	cmpld	0,r6,r5
	blt	4b
	sync
	addi	r5,r5,8
	addi	r6,r6,8
673
	isync
674 675 676 677 678 679 680 681 682 683 684 685 686 687
	blr

.align 8
copy_to_here:

#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_PMAC
/*
 * On PowerMac, secondary processors starts from the reset vector, which
 * is temporarily turned into a call to one of the functions below.
 */
	.section ".text";
	.align 2 ;

688 689 690 691 692 693 694 695 696 697 698
	.globl	__secondary_start_pmac_0
__secondary_start_pmac_0:
	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
	li	r24,0
	b	1f
	li	r24,1
	b	1f
	li	r24,2
	b	1f
	li	r24,3
1:
699 700 701
	
_GLOBAL(pmac_secondary_start)
	/* turn on 64-bit mode */
702
	bl	enable_64b_mode
703

704 705 706 707 708 709 710 711 712
	li	r0,0
	mfspr	r3,SPRN_HID4
	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
	sync
	mtspr	SPRN_HID4,r3
	isync
	sync
	slbia

713
	/* get TOC pointer (real address) */
714
	bl	relative_toc
715
	tovirt(r2,r2)
716

717
	/* Copy some CPU settings from CPU 0 */
718
	bl	__restore_cpu_ppc970
719 720 721 722 723 724 725

	/* pSeries do that early though I don't think we really need it */
	mfmsr	r3
	ori	r3,r3,MSR_RI
	mtmsrd	r3			/* RI on */

	/* Set up a paca value for this processor. */
726 727
	LOAD_REG_ADDR(r4,paca)		/* Load paca pointer		*/
	ld	r4,0(r4)		/* Get base vaddr of paca array	*/
728
	mulli	r13,r24,PACA_SIZE	/* Calculate vaddr of right paca */
729
	add	r13,r13,r4		/* for this processor.		*/
730
	SET_PACA(r13)			/* Save vaddr of paca in an SPRG*/
731

732 733 734 735 736
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
	li	r0,0
	stb	r0,PACASOFTIRQEN(r13)
737 738
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
739

740 741 742 743
	/* Create a temp kernel stack for use before relocation is on.	*/
	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD

744
	b	__secondary_start
745 746 747 748 749 750 751 752 753 754 755

#endif /* CONFIG_PPC_PMAC */

/*
 * This function is called after the master CPU has released the
 * secondary processors.  The execution environment is relocation off.
 * The paca for this processor has the following fields initialized at
 * this point:
 *   1. Processor number
 *   2. Segment table pointer (virtual address)
 * On entry the following are set:
756
 *   r1	       = stack pointer (real addr of temp stack)
757 758 759
 *   r24       = cpu# (in Linux terms)
 *   r13       = paca virtual address
 *   SPRG_PACA = paca virtual address
760
 */
761 762 763
	.section ".text";
	.align 2 ;

764
	.globl	__secondary_start
765
__secondary_start:
766 767
	/* Set thread priority to MEDIUM */
	HMT_MEDIUM
768

769
	/* Initialize the kernel stack */
770
	LOAD_REG_ADDR(r3, current_set)
771
	sldi	r28,r24,3		/* get current_set[cpu#]	 */
772 773 774
	ldx	r14,r3,r28
	addi	r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
	std	r14,PACAKSAVE(r13)
775

M
Michael Ellerman 已提交
776
	/* Do early setup for that CPU (SLB and hash table pointer) */
777
	bl	early_setup_secondary
778

779 780 781 782 783 784
	/*
	 * setup the new stack pointer, but *don't* use this until
	 * translation is on.
	 */
	mr	r1, r14

785
	/* Clear backchain so we get nice backtraces */
786 787 788
	li	r7,0
	mtlr	r7

789 790 791
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
792
	stb	r7,PACASOFTIRQEN(r13)
793 794
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
795

796
	/* enable MMU and jump to start_secondary */
797
	LOAD_REG_ADDR(r3, start_secondary_prolog)
798
	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
799

800 801
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
802
	RFI
803 804 805 806
	b	.	/* prevent speculative execution */

/* 
 * Running with relocation on at this point.  All we want to do is
807 808
 * zero the stack back-chain pointer and get the TOC virtual address
 * before going into C code.
809
 */
810
start_secondary_prolog:
811
	ld	r2,PACATOC(r13)
812 813
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
814
	bl	start_secondary
815
	b	.
816 817 818 819 820 821 822 823 824
/*
 * Reset stack pointer and call start_secondary
 * to continue with online operation when woken up
 * from cede in cpu offline.
 */
_GLOBAL(start_secondary_resume)
	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
	li	r3,0
	std	r3,0(r1)		/* Zero the stack frame pointer	*/
825
	bl	start_secondary
826
	b	.
827 828 829 830 831
#endif

/*
 * This subroutine clobbers r11 and r12
 */
832
enable_64b_mode:
833
	mfmsr	r11			/* grab the current MSR */
834 835 836 837
#ifdef CONFIG_PPC_BOOK3E
	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
	mtmsr	r11
#else /* CONFIG_PPC_BOOK3E */
838
	li	r12,(MSR_64BIT | MSR_ISF)@highest
839
	sldi	r12,r12,48
840 841 842
	or	r11,r11,r12
	mtmsrd	r11
	isync
843
#endif
844 845
	blr

846 847 848 849
/*
 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
 * by the toolchain).  It computes the correct value for wherever we
 * are running at the moment, using position-independent code.
850 851 852 853 854 855 856
 *
 * Note: The compiler constructs pointers using offsets from the
 * TOC in -mcmodel=medium mode. After we relocate to 0 but before
 * the MMU is on we need our TOC to be a virtual address otherwise
 * these pointers will be real addresses which may get stored and
 * accessed later with the MMU on. We use tovirt() at the call
 * sites to handle this.
857 858 859 860
 */
_GLOBAL(relative_toc)
	mflr	r0
	bcl	20,31,$+4
861 862 863
0:	mflr	r11
	ld	r2,(p_toc - 0b)(r11)
	add	r2,r2,r11
864 865 866
	mtlr	r0
	blr

A
Anton Blanchard 已提交
867
.balign 8
868 869
p_toc:	.llong	__toc_start + 0x8000 - 0b

870 871 872
/*
 * This is where the main kernel code starts.
 */
873
start_here_multiplatform:
874
	/* set up the TOC */
875
	bl      relative_toc
876
	tovirt(r2,r2)
877 878 879 880 881 882

	/* Clear out the BSS. It may have been done in prom_init,
	 * already but that's irrelevant since prom_init will soon
	 * be detached from the kernel completely. Besides, we need
	 * to clear it now for kexec-style entry.
	 */
883 884
	LOAD_REG_ADDR(r11,__bss_stop)
	LOAD_REG_ADDR(r8,__bss_start)
885 886
	sub	r11,r11,r8		/* bss size			*/
	addi	r11,r11,7		/* round up to an even double word */
887
	srdi.	r11,r11,3		/* shift right by 3		*/
888 889 890 891 892 893 894 895
	beq	4f
	addi	r8,r8,-8
	li	r0,0
	mtctr	r11			/* zero this many doublewords	*/
3:	stdu	r0,8(r8)
	bdnz	3b
4:

896 897
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
	/* Setup OPAL entry */
898
	LOAD_REG_ADDR(r11, opal)
899 900 901 902
	std	r28,0(r11);
	std	r29,8(r11);
#endif

903
#ifndef CONFIG_PPC_BOOK3E
904 905 906
	mfmsr	r6
	ori	r6,r6,MSR_RI
	mtmsrd	r6			/* RI on */
907
#endif
908

909 910 911 912 913 914 915
#ifdef CONFIG_RELOCATABLE
	/* Save the physical address we're running at in kernstart_addr */
	LOAD_REG_ADDR(r4, kernstart_addr)
	clrldi	r0,r25,2
	std	r0,0(r4)
#endif

916
	/* The following gets the stack set up with the regs */
917 918 919 920 921
	/* pointing to the real addr of the kernel stack.  This is   */
	/* all done to support the C function call below which sets  */
	/* up the htab.  This is done because we have relocated the  */
	/* kernel but are still running in real mode. */

922
	LOAD_REG_ADDR(r3,init_thread_union)
923

924
	/* set up a stack pointer */
925 926 927 928
	addi	r1,r3,THREAD_SIZE
	li	r0,0
	stdu	r0,-STACK_FRAME_OVERHEAD(r1)

M
Michael Ellerman 已提交
929 930 931 932
	/*
	 * Do very early kernel initializations, including initial hash table
	 * and SLB setup before we turn on relocation.
	 */
933 934 935

	/* Restore parameters passed from prom_init/kexec */
	mr	r3,r31
936
	bl	early_setup		/* also sets r13 and SPRG_PACA */
937

938
	LOAD_REG_ADDR(r3, start_here_common)
939
	ld	r4,PACAKMSR(r13)
940 941
	mtspr	SPRN_SRR0,r3
	mtspr	SPRN_SRR1,r4
942
	RFI
943
	b	.	/* prevent speculative execution */
944

945
	/* This is where all platforms converge execution */
946 947

start_here_common:
948
	/* relocation is on at this point */
949
	std	r1,PACAKSAVE(r13)
950

951
	/* Load the TOC (virtual address) */
952 953
	ld	r2,PACATOC(r13)

954 955 956 957 958 959 960
	/* Mark interrupts soft and hard disabled (they might be enabled
	 * in the PACA when doing hotplug)
	 */
	li	r0,0
	stb	r0,PACASOFTIRQEN(r13)
	li	r0,PACA_IRQ_HARD_DIS
	stb	r0,PACAIRQHAPPENED(r13)
961

962
	/* Generic kernel entry */
963
	bl	start_kernel
964

965 966
	/* Not reached */
	BUG_OPCODE
967 968 969 970 971 972

/*
 * We put a few things here that have to be page-aligned.
 * This stuff goes at the beginning of the bss, which is page-aligned.
 */
	.section ".bss"
973 974 975 976 977
/*
 * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
 * We will need to find a better way to fix this
 */
	.align	16
978

979 980 981
	.globl	swapper_pg_dir
swapper_pg_dir:
	.space	PGD_TABLE_SIZE
982 983 984 985

	.globl	empty_zero_page
empty_zero_page:
	.space	PAGE_SIZE