setup_64.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * 
 * Common boot and setup code.
 *
 * Copyright (C) 2001 PPC64 Team, IBM Corp
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

13
#define DEBUG
14

15
#include <linux/export.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/utsname.h>
#include <linux/tty.h>
#include <linux/root_dev.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
34
#include <linux/bootmem.h>
35
#include <linux/pci.h>
36
#include <linux/lockdep.h>
Y
Yinghai Lu 已提交
37
#include <linux/memblock.h>
38
#include <linux/memory.h>
39
#include <linux/nmi.h>
40

41
#include <asm/io.h>
42
#include <asm/kdump.h>
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
#include <asm/setup.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/serial.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
P
Paul Mackerras 已提交
63
#include <asm/xmon.h>
D
David Gibson 已提交
64
#include <asm/udbg.h>
65
#include <asm/kexec.h>
66
#include <asm/code-patching.h>
67
#include <asm/livepatch.h>
68
#include <asm/opal.h>
69
#include <asm/cputhreads.h>
70 71 72 73 74 75 76

#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

77
int spinning_secondaries;
78 79
u64 ppc64_pft_size;

80 81 82 83
/* Pick defaults since we might want to patch instructions
 * before we've read this from the device tree.
 */
struct ppc64_caches ppc64_caches = {
84 85 86 87
	.dline_size = 0x40,
	.log_dline_size = 6,
	.iline_size = 0x40,
	.log_iline_size = 6
88
};
89 90 91 92 93 94 95 96 97 98
EXPORT_SYMBOL_GPL(ppc64_caches);

/*
 * These are used in binfmt_elf.c to put aux entries on the stack
 * for each elf executable being started.
 */
int dcache_bsize;
int icache_bsize;
int ucache_bsize;

99
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
100
void __init setup_tlb_core_data(void)
101 102 103
{
	int cpu;

104 105
	BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);

106 107 108
	for_each_possible_cpu(cpu) {
		int first = cpu_first_thread_sibling(cpu);

109 110 111 112 113 114 115 116
		/*
		 * If we boot via kdump on a non-primary thread,
		 * make sure we point at the thread that actually
		 * set up this TLB.
		 */
		if (cpu_first_thread_sibling(boot_cpuid) == first)
			first = boot_cpuid;

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		paca[cpu].tcd_ptr = &paca[first].tcd;

		/*
		 * If we have threads, we need either tlbsrx.
		 * or e6500 tablewalk mode, or else TLB handlers
		 * will be racy and could produce duplicate entries.
		 */
		if (smt_enabled_at_boot >= 2 &&
		    !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
		    book3e_htw_mode != PPC_HTW_E6500) {
			/* Should we panic instead? */
			WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
				  __func__);
		}
	}
}
#endif

135 136
#ifdef CONFIG_SMP

137
static char *smt_enabled_cmdline;
138 139

/* Look for ibm,smt-enabled OF option */
140
void __init check_smt_enabled(void)
141 142
{
	struct device_node *dn;
143
	const char *smt_option;
144

145 146
	/* Default to enabling all threads */
	smt_enabled_at_boot = threads_per_core;
147

148 149 150 151 152 153 154
	/* Allow the command line to overrule the OF option */
	if (smt_enabled_cmdline) {
		if (!strcmp(smt_enabled_cmdline, "on"))
			smt_enabled_at_boot = threads_per_core;
		else if (!strcmp(smt_enabled_cmdline, "off"))
			smt_enabled_at_boot = 0;
		else {
155
			int smt;
156 157
			int rc;

158
			rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
159 160
			if (!rc)
				smt_enabled_at_boot =
161
					min(threads_per_core, smt);
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
		}
	} else {
		dn = of_find_node_by_path("/options");
		if (dn) {
			smt_option = of_get_property(dn, "ibm,smt-enabled",
						     NULL);

			if (smt_option) {
				if (!strcmp(smt_option, "on"))
					smt_enabled_at_boot = threads_per_core;
				else if (!strcmp(smt_option, "off"))
					smt_enabled_at_boot = 0;
			}

			of_node_put(dn);
		}
	}
179 180 181 182 183
}

/* Look for smt-enabled= cmdline option */
static int __init early_smt_enabled(char *p)
{
184
	smt_enabled_cmdline = p;
185 186 187 188 189 190
	return 0;
}
early_param("smt-enabled", early_smt_enabled);

#endif /* CONFIG_SMP */

191
/** Fix up paca fields required for the boot cpu */
192
static void __init fixup_boot_paca(void)
193 194 195 196 197 198 199
{
	/* The boot cpu is started */
	get_paca()->cpu_start = 1;
	/* Allow percpu accesses to work until we setup percpu data */
	get_paca()->data_offset = 0;
}

200
static void __init configure_exceptions(void)
201
{
202
	/*
203 204
	 * Setup the trampolines from the lowmem exception vectors
	 * to the kdump kernel when not using a relocatable kernel.
205
	 */
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	setup_kdump_trampoline();

	/* Under a PAPR hypervisor, we need hypercalls */
	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
		/* Enable AIL if possible */
		pseries_enable_reloc_on_exc();

		/*
		 * Tell the hypervisor that we want our exceptions to
		 * be taken in little endian mode.
		 *
		 * We don't call this for big endian as our calling convention
		 * makes us always enter in BE, and the call may fail under
		 * some circumstances with kdump.
		 */
#ifdef __LITTLE_ENDIAN__
		pseries_little_endian_exceptions();
#endif
	} else {
		/* Set endian mode using OPAL */
		if (firmware_has_feature(FW_FEATURE_OPAL))
			opal_configure_cores();

		/* Enable AIL if supported, and we are in hypervisor mode */
230 231
		if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
		    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
232 233 234
			unsigned long lpcr = mfspr(SPRN_LPCR);
			mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
		}
235 236 237
	}
}

238 239 240 241 242 243
static void cpu_ready_for_interrupts(void)
{
	/* Set IR and DR in PACA MSR */
	get_paca()->kernel_msr = MSR_KERNEL;
}

244 245 246 247 248 249
/*
 * Early initialization entry point. This is called by head.S
 * with MMU translation disabled. We rely on the "feature" of
 * the CPU that ignores the top 2 bits of the address in real
 * mode so we can access kernel globals normally provided we
 * only toy with things in the RMO region. From here, we do
Y
Yinghai Lu 已提交
250
 * some early parsing of the device-tree to setup out MEMBLOCK
251 252 253 254 255 256 257 258 259 260 261 262 263 264
 * data structures, and allocate & initialize the hash table
 * and segment tables so we can start running with translation
 * enabled.
 *
 * It is this function which will call the probe() callback of
 * the various platform types and copy the matching one to the
 * global ppc_md structure. Your platform can eventually do
 * some very early initializations from the probe() routine, but
 * this is not recommended, be very careful as, for example, the
 * device-tree is not accessible via normal means at this point.
 */

void __init early_setup(unsigned long dt_ptr)
{
265 266
	static __initdata struct paca_struct boot_paca;

267 268
	/* -------- printk is _NOT_ safe to use here ! ------- */

269
	/* Identify CPU type */
270
	identify_cpu(0, mfspr(SPRN_PVR));
271

272
	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
273 274
	initialise_paca(&boot_paca, 0);
	setup_paca(&boot_paca);
275
	fixup_boot_paca();
276

277 278
	/* -------- printk is now safe to use ------- */

279 280 281
	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

282
 	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
283 284

	/*
285 286 287
	 * Do early initialization using the flattened device
	 * tree, such as retrieving the physical memory map or
	 * calculating/retrieving the hash table size.
288 289 290
	 */
	early_init_devtree(__va(dt_ptr));

291
	/* Now we know the logical id of our boot cpu, setup the paca. */
292
	setup_paca(&paca[boot_cpuid]);
293
	fixup_boot_paca();
294

295
	/*
296 297
	 * Configure exception handlers. This include setting up trampolines
	 * if needed, setting exception endian mode, etc...
298
	 */
299
	configure_exceptions();
300

301 302 303
	/* Apply all the dynamic patching */
	apply_feature_fixups();

304 305 306
	/* Initialize the hash table or TLB handling */
	early_init_mmu();

307 308 309
	/*
	 * At this point, we can let interrupts switch to virtual mode
	 * (the MMU has been setup), so adjust the MSR in the PACA to
310
	 * have IR and DR set and enable AIL if it exists
311
	 */
312
	cpu_ready_for_interrupts();
313

314
	DBG(" <- early_setup()\n");
315 316 317 318 319 320 321 322 323 324 325 326

#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
	/*
	 * This needs to be done *last* (after the above DBG() even)
	 *
	 * Right after we return from this function, we turn on the MMU
	 * which means the real-mode access trick that btext does will
	 * no longer work, it needs to switch to using a real MMU
	 * mapping. This call will ensure that it does
	 */
	btext_map();
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
327 328
}

329 330 331
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
332
	/* Mark interrupts disabled in PACA */
333
	get_paca()->soft_enabled = 0;
334

335 336
	/* Initialize the hash table or TLB handling */
	early_init_mmu_secondary();
337 338 339 340 341 342

	/*
	 * At this point, we can let interrupts switch to virtual mode
	 * (the MMU has been setup), so adjust the MSR in the PACA to
	 * have IR and DR set.
	 */
343
	cpu_ready_for_interrupts();
344 345 346
}

#endif /* CONFIG_SMP */
347

348
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
349 350 351 352 353 354 355 356 357 358 359 360
static bool use_spinloop(void)
{
	if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
		return true;

	/*
	 * When book3e boots from kexec, the ePAPR spin table does
	 * not get used.
	 */
	return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
}

361 362
void smp_release_cpus(void)
{
363
	unsigned long *ptr;
364
	int i;
365

366 367 368
	if (!use_spinloop())
		return;

369 370 371 372 373 374
	DBG(" -> smp_release_cpus()\n");

	/* All secondary cpus are spinning on a common spinloop, release them
	 * all now so they can start to spin on their individual paca
	 * spinloops. For non SMP kernels, the secondary cpus never get out
	 * of the common spinloop.
375
	 */
376

377 378
	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
			- PHYSICAL_START);
379
	*ptr = ppc_function_entry(generic_secondary_smp_init);
380 381 382 383 384

	/* And wait a bit for them to catch up */
	for (i = 0; i < 100000; i++) {
		mb();
		HMT_low();
385
		if (spinning_secondaries == 0)
386 387 388
			break;
		udelay(1);
	}
389
	DBG("spinning_secondaries = %d\n", spinning_secondaries);
390 391 392 393 394

	DBG(" <- smp_release_cpus()\n");
}
#endif /* CONFIG_SMP || CONFIG_KEXEC */

395
/*
396 397
 * Initialize some remaining members of the ppc64_caches and systemcfg
 * structures
398 399 400 401
 * (at least until we get rid of them completely). This is mostly some
 * cache informations about the CPU that will be used by cache flush
 * routines and/or provided to userland
 */
402
void __init initialize_cache_info(void)
403 404 405 406 407 408
{
	struct device_node *np;
	unsigned long num_cpus = 0;

	DBG(" -> initialize_cache_info()\n");

409
	for_each_node_by_type(np, "cpu") {
410 411
		num_cpus += 1;

A
Anton Blanchard 已提交
412 413
		/*
		 * We're assuming *all* of the CPUs have the same
414 415
		 * d-cache and i-cache sizes... -Peter
		 */
A
Anton Blanchard 已提交
416
		if (num_cpus == 1) {
417
			const __be32 *sizep, *lsizep;
418 419 420 421
			u32 size, lsize;

			size = 0;
			lsize = cur_cpu_spec->dcache_bsize;
422
			sizep = of_get_property(np, "d-cache-size", NULL);
423
			if (sizep != NULL)
424
				size = be32_to_cpu(*sizep);
A
Anton Blanchard 已提交
425 426
			lsizep = of_get_property(np, "d-cache-block-size",
						 NULL);
427 428
			/* fallback if block size missing */
			if (lsizep == NULL)
A
Anton Blanchard 已提交
429 430 431
				lsizep = of_get_property(np,
							 "d-cache-line-size",
							 NULL);
432
			if (lsizep != NULL)
433
				lsize = be32_to_cpu(*lsizep);
434
			if (sizep == NULL || lsizep == NULL)
435 436 437
				DBG("Argh, can't find dcache properties ! "
				    "sizep: %p, lsizep: %p\n", sizep, lsizep);

438 439
			ppc64_caches.dsize = size;
			ppc64_caches.dline_size = lsize;
440 441 442 443 444
			ppc64_caches.log_dline_size = __ilog2(lsize);
			ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;

			size = 0;
			lsize = cur_cpu_spec->icache_bsize;
445
			sizep = of_get_property(np, "i-cache-size", NULL);
446
			if (sizep != NULL)
447
				size = be32_to_cpu(*sizep);
A
Anton Blanchard 已提交
448 449
			lsizep = of_get_property(np, "i-cache-block-size",
						 NULL);
450
			if (lsizep == NULL)
A
Anton Blanchard 已提交
451 452 453
				lsizep = of_get_property(np,
							 "i-cache-line-size",
							 NULL);
454
			if (lsizep != NULL)
455
				lsize = be32_to_cpu(*lsizep);
456
			if (sizep == NULL || lsizep == NULL)
457 458 459
				DBG("Argh, can't find icache properties ! "
				    "sizep: %p, lsizep: %p\n", sizep, lsizep);

460 461
			ppc64_caches.isize = size;
			ppc64_caches.iline_size = lsize;
462 463 464 465 466
			ppc64_caches.log_iline_size = __ilog2(lsize);
			ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
		}
	}

467 468 469 470
	/* For use by binfmt_elf */
	dcache_bsize = ppc64_caches.dline_size;
	icache_bsize = ppc64_caches.iline_size;

471 472 473
	DBG(" <- initialize_cache_info()\n");
}

474 475 476 477 478
/* This returns the limit below which memory accesses to the linear
 * mapping are guarnateed not to cause a TLB or SLB miss. This is
 * used to allocate interrupt or emergency stacks for which our
 * exception entry path doesn't deal with being interrupted.
 */
479
static __init u64 safe_stack_limit(void)
480
{
481 482 483 484 485 486 487 488 489
#ifdef CONFIG_PPC_BOOK3E
	/* Freescale BookE bolts the entire linear mapping */
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
		return linear_map_top;
	/* Other BookE, we assume the first GB is bolted */
	return 1ul << 30;
#else
	/* BookS, the first segment is bolted */
	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
490 491
		return 1UL << SID_SHIFT_1T;
	return 1UL << SID_SHIFT;
492
#endif
493 494
}

495
void __init irqstack_early_init(void)
496
{
497
	u64 limit = safe_stack_limit();
498 499 500
	unsigned int i;

	/*
501 502
	 * Interrupt stacks must be in the first segment since we
	 * cannot afford to take SLB misses on them.
503
	 */
504
	for_each_possible_cpu(i) {
505
		softirq_ctx[i] = (struct thread_info *)
Y
Yinghai Lu 已提交
506
			__va(memblock_alloc_base(THREAD_SIZE,
507
					    THREAD_SIZE, limit));
508
		hardirq_ctx[i] = (struct thread_info *)
Y
Yinghai Lu 已提交
509
			__va(memblock_alloc_base(THREAD_SIZE,
510
					    THREAD_SIZE, limit));
511 512 513
	}
}

514
#ifdef CONFIG_PPC_BOOK3E
515
void __init exc_lvl_early_init(void)
516 517
{
	unsigned int i;
518
	unsigned long sp;
519 520

	for_each_possible_cpu(i) {
521 522 523 524 525 526 527 528 529 530 531
		sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
		critirq_ctx[i] = (struct thread_info *)__va(sp);
		paca[i].crit_kstack = __va(sp + THREAD_SIZE);

		sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
		dbgirq_ctx[i] = (struct thread_info *)__va(sp);
		paca[i].dbg_kstack = __va(sp + THREAD_SIZE);

		sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
		mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
		paca[i].mc_kstack = __va(sp + THREAD_SIZE);
532
	}
533 534

	if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
535
		patch_exception(0x040, exc_debug_debug_book3e);
536 537 538
}
#endif

539 540
/*
 * Stack space used when we detect a bad kernel stack pointer, and
541 542
 * early in SMP boots before relocation is enabled. Exclusive emergency
 * stack for machine checks.
543
 */
544
void __init emergency_stack_init(void)
545
{
546
	u64 limit;
547 548 549 550 551 552 553 554 555 556 557
	unsigned int i;

	/*
	 * Emergency stacks must be under 256MB, we cannot afford to take
	 * SLB misses on them. The ABI also requires them to be 128-byte
	 * aligned.
	 *
	 * Since we use these as temporary stacks during secondary CPU
	 * bringup, we need to get at them in real mode. This means they
	 * must also be within the RMO region.
	 */
558
	limit = min(safe_stack_limit(), ppc64_rma_size);
559

560
	for_each_possible_cpu(i) {
561 562 563 564
		struct thread_info *ti;
		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
		klp_init_thread_info(ti);
		paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
565 566 567

#ifdef CONFIG_PPC_BOOK3S_64
		/* emergency stack for machine check exception handling. */
568 569 570
		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
		klp_init_thread_info(ti);
		paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
571
#endif
572
	}
573 574
}

575
#ifdef CONFIG_SMP
576 577 578
#define PCPU_DYN_SIZE		()

static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
579
{
580 581 582
	return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
				    __pa(MAX_DMA_ADDRESS));
}
583

584 585 586 587
static void __init pcpu_fc_free(void *ptr, size_t size)
{
	free_bootmem(__pa(ptr), size);
}
588

589 590 591 592 593 594 595 596
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
	if (cpu_to_node(from) == cpu_to_node(to))
		return LOCAL_DISTANCE;
	else
		return REMOTE_DISTANCE;
}

597 598 599
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
void __init setup_per_cpu_areas(void)
{
	const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
	size_t atom_size;
	unsigned long delta;
	unsigned int cpu;
	int rc;

	/*
	 * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
	 * to group units.  For larger mappings, use 1M atom which
	 * should be large enough to contain a number of units.
	 */
	if (mmu_linear_psize == MMU_PAGE_4K)
		atom_size = PAGE_SIZE;
	else
		atom_size = 1 << 20;

	rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
				    pcpu_fc_alloc, pcpu_fc_free);
	if (rc < 0)
		panic("cannot initialize percpu area (err=%d)", rc);

	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
624 625 626 627
	for_each_possible_cpu(cpu) {
                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
		paca[cpu].data_offset = __per_cpu_offset[cpu];
	}
628 629
}
#endif
630

631 632 633 634 635 636 637 638 639
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
unsigned long memory_block_size_bytes(void)
{
	if (ppc_md.memory_block_size)
		return ppc_md.memory_block_size();

	return MIN_MEMORY_BLOCK_SIZE;
}
#endif
640

641
#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
642 643
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
644
#endif
645 646 647 648 649 650 651 652 653 654 655 656 657

#ifdef CONFIG_HARDLOCKUP_DETECTOR
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
	return ppc_proc_freq * watchdog_thresh;
}

/*
 * The hardlockup detector breaks PMU event based branches and is likely
 * to get false positives in KVM guests, so disable it by default.
 */
static int __init disable_hardlockup_detector(void)
{
658
	hardlockup_detector_disable();
659 660 661 662 663

	return 0;
}
early_initcall(disable_hardlockup_detector);
#endif