smpboot.c 41.5 KB
Newer Older
1
 /*
2 3
 *	x86 SMP booting functions
 *
4
 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
I
Ingo Molnar 已提交
5
 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
 *	Copyright 2001 Andi Kleen, SuSE Labs.
 *
 *	Much of the core SMP work is based on previous work by Thomas Radke, to
 *	whom a great many thanks are extended.
 *
 *	Thanks to Intel for making available several different Pentium,
 *	Pentium Pro and Pentium-II/Xeon MP machines.
 *	Original development of Linux SMP code supported by Caldera.
 *
 *	This code is released under the GNU General Public License version 2 or
 *	later.
 *
 *	Fixes
 *		Felix Koop	:	NR_CPUS used properly
 *		Jose Renau	:	Handle single CPU case.
 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
 *		Greg Wright	:	Fix for kernel stacks panic.
 *		Erich Boleyn	:	MP v1.4 and additional changes.
 *	Matthias Sattler	:	Changes for 2.1 kernel map.
 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
 *	Michael Chastain	:	Change trampoline.S to gnu as.
 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
 *		Ingo Molnar	:	Added APIC timers, based on code
 *					from Jose Renau
 *		Ingo Molnar	:	various cleanups and rewrites
 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
 *	Andi Kleen		:	Changed for SMP boot into long mode.
 *		Martin J. Bligh	: 	Added support for multi-quad systems
 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
 *      Andi Kleen              :       Converted to new state machine.
 *	Ashok Raj		: 	CPU hotplug support
 *	Glauber Costa		:	i386 and x86_64 integration
 */

42 43
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

44 45
#include <linux/init.h>
#include <linux/smp.h>
46
#include <linux/export.h>
47
#include <linux/sched.h>
48
#include <linux/percpu.h>
G
Glauber Costa 已提交
49
#include <linux/bootmem.h>
50 51
#include <linux/err.h>
#include <linux/nmi.h>
52
#include <linux/tboot.h>
53
#include <linux/stackprotector.h>
54
#include <linux/gfp.h>
55
#include <linux/cpuidle.h>
56

57
#include <asm/acpi.h>
58
#include <asm/desc.h>
59 60
#include <asm/nmi.h>
#include <asm/irq.h>
61
#include <asm/idle.h>
62
#include <asm/realmode.h>
63 64
#include <asm/cpu.h>
#include <asm/numa.h>
65 66 67
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
68
#include <asm/mwait.h>
I
Ingo Molnar 已提交
69
#include <asm/apic.h>
70
#include <asm/io_apic.h>
71
#include <asm/fpu/internal.h>
72
#include <asm/setup.h>
T
Tejun Heo 已提交
73
#include <asm/uv/uv.h>
74
#include <linux/mc146818rtc.h>
75
#include <asm/i8259.h>
76
#include <asm/realmode.h>
77
#include <asm/misc.h>
78

79 80 81 82 83
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);

/* Last level cache ID of each logical CPU */
84
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
85 86

/* representing HT siblings of each logical CPU */
87
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
88 89 90
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);

/* representing HT and core siblings of each logical CPU */
91
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
92 93
EXPORT_PER_CPU_SYMBOL(cpu_core_map);

94
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
95

96
/* Per CPU bogomips and other parameters */
97
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
98
EXPORT_PER_CPU_SYMBOL(cpu_info);
99

100 101 102 103 104 105
/* Logical package management. We might want to allocate that dynamically */
static int *physical_to_logical_pkg __read_mostly;
static unsigned long *physical_package_map __read_mostly;;
static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
106 107
static unsigned int logical_packages __read_mostly;
static bool logical_packages_frozen __read_mostly;
108

109 110 111
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
	unsigned long flags;

	spin_lock_irqsave(&rtc_lock, flags);
	CMOS_WRITE(0xa, 0xf);
	spin_unlock_irqrestore(&rtc_lock, flags);
	local_flush_tlb();
	pr_debug("1.\n");
	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
							start_eip >> 4;
	pr_debug("2.\n");
	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
							start_eip & 0xf;
	pr_debug("3.\n");
}

static inline void smpboot_restore_warm_reset_vector(void)
{
	unsigned long flags;

	/*
	 * Install writable page 0 entry to set BIOS data area.
	 */
	local_flush_tlb();

	/*
	 * Paranoid:  Set warm reset code and vector here back
	 * to default values.
	 */
	spin_lock_irqsave(&rtc_lock, flags);
	CMOS_WRITE(0, 0xf);
	spin_unlock_irqrestore(&rtc_lock, flags);

	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
}

149
/*
150 151
 * Report back to the Boot Processor during boot time or to the caller processor
 * during CPU online.
152
 */
153
static void smp_callin(void)
154 155 156 157 158
{
	int cpuid, phys_id;

	/*
	 * If waken up by an INIT in an 82489DX configuration
159 160 161
	 * cpu_callout_mask guarantees we don't get here before
	 * an INIT_deassert IPI reaches our local APIC, so it is
	 * now safe to touch our local APIC.
162
	 */
163
	cpuid = smp_processor_id();
164 165 166 167

	/*
	 * (This works even if the APIC is not enabled.)
	 */
168
	phys_id = read_apic_id();
169 170 171 172 173 174 175

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */
176
	apic_ap_setup();
177

178 179 180 181 182 183
	/*
	 * Save our processor parameters. Note: this information
	 * is needed for clock calibration.
	 */
	smp_store_cpu_info(cpuid);

184 185
	/*
	 * Get our bogomips.
186 187 188
	 * Update loops_per_jiffy in cpu_data. Previous call to
	 * smp_store_cpu_info() stored a value that is close but not as
	 * accurate as the value just calculated.
189 190
	 */
	calibrate_delay();
191
	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
192
	pr_debug("Stack at about %p\n", &cpuid);
193

194 195 196 197 198 199 200
	/*
	 * This must be done before setting cpu_online_mask
	 * or calling notify_cpu_starting.
	 */
	set_cpu_sibling_map(raw_smp_processor_id());
	wmb();

201 202
	notify_cpu_starting(cpuid);

203 204 205
	/*
	 * Allow the master to continue.
	 */
206
	cpumask_set_cpu(cpuid, cpu_callin_mask);
207 208
}

209 210
static int cpu0_logical_apicid;
static int enable_start_cpu0;
211 212 213
/*
 * Activate a secondary processor.
 */
214
static void notrace start_secondary(void *unused)
215 216 217 218 219 220
{
	/*
	 * Don't put *anything* before cpu_init(), SMP booting is too
	 * fragile that we want to limit the things done here to the
	 * most necessary things.
	 */
221
	cpu_init();
222
	x86_cpuinit.early_percpu_clock_init();
223 224
	preempt_disable();
	smp_callin();
225

226 227
	enable_start_cpu0 = 0;

228
#ifdef CONFIG_X86_32
229
	/* switch away from the initial page table */
230 231 232 233
	load_cr3(swapper_pg_dir);
	__flush_tlb_all();
#endif

234 235 236 237 238 239 240 241
	/* otherwise gcc will move up smp_processor_id before the cpu_init */
	barrier();
	/*
	 * Check TSC synchronization with the BP:
	 */
	check_tsc_sync_target();

	/*
242 243 244 245
	 * Lock vector_lock and initialize the vectors on this cpu
	 * before setting the cpu online. We must set it online with
	 * vector_lock held to prevent a concurrent setup/teardown
	 * from seeing a half valid vector space.
246
	 */
247
	lock_vector_lock();
248
	setup_vector_irq(smp_processor_id());
249
	set_cpu_online(smp_processor_id(), true);
250
	unlock_vector_lock();
251
	cpu_set_state_online(smp_processor_id());
252
	x86_platform.nmi_init();
253

254 255 256
	/* enable local interrupts */
	local_irq_enable();

257 258
	/* to prevent fake stack check failure in clock setup */
	boot_init_stack_canary();
259

260
	x86_cpuinit.setup_percpu_clockev();
261 262

	wmb();
263
	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
264 265
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
int topology_update_package_map(unsigned int apicid, unsigned int cpu)
{
	unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;

	/* Called from early boot ? */
	if (!physical_package_map)
		return 0;

	if (pkg >= max_physical_pkg_id)
		return -EINVAL;

	/* Set the logical package id */
	if (test_and_set_bit(pkg, physical_package_map))
		goto found;

281
	if (logical_packages_frozen) {
282
		physical_to_logical_pkg[pkg] = -1;
283
		pr_warn("APIC(%x) Package %u exceeds logical package max\n",
284 285 286
			apicid, pkg);
		return -ENOSPC;
	}
287 288

	new = logical_packages++;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	pr_info("APIC(%x) Converting physical %u to logical package %u\n",
		apicid, pkg, new);
	physical_to_logical_pkg[pkg] = new;

found:
	cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
	return 0;
}

/**
 * topology_phys_to_logical_pkg - Map a physical package id to a logical
 *
 * Returns logical package id or -1 if not found
 */
int topology_phys_to_logical_pkg(unsigned int phys_pkg)
{
	if (phys_pkg >= max_physical_pkg_id)
		return -1;
	return physical_to_logical_pkg[phys_pkg];
}
EXPORT_SYMBOL(topology_phys_to_logical_pkg);

static void __init smp_init_package_map(void)
{
	unsigned int ncpus, cpu;
	size_t size;

	/*
	 * Today neither Intel nor AMD support heterogenous systems. That
	 * might change in the future....
319 320 321 322 323 324 325 326 327 328 329 330 331
	 *
	 * While ideally we'd want '* smp_num_siblings' in the below @ncpus
	 * computation, this won't actually work since some Intel BIOSes
	 * report inconsistent HT data when they disable HT.
	 *
	 * In particular, they reduce the APIC-IDs to only include the cores,
	 * but leave the CPUID topology to say there are (2) siblings.
	 * This means we don't know how many threads there will be until
	 * after the APIC enumeration.
	 *
	 * By not including this we'll sometimes over-estimate the number of
	 * logical packages by the amount of !present siblings, but this is
	 * still better than MAX_LOCAL_APIC.
332 333 334 335 336
	 *
	 * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
	 * on the command line leading to a similar issue as the HT disable
	 * problem because the hyperthreads are usually enumerated after the
	 * primary cores.
337
	 */
338
	ncpus = boot_cpu_data.x86_max_cores;
339 340 341 342 343
	if (!ncpus) {
		pr_warn("x86_max_cores == zero !?!?");
		ncpus = 1;
	}

344
	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
345
	logical_packages = 0;
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

	/*
	 * Possibly larger than what we need as the number of apic ids per
	 * package can be smaller than the actual used apic ids.
	 */
	max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
	size = max_physical_pkg_id * sizeof(unsigned int);
	physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
	memset(physical_to_logical_pkg, 0xff, size);
	size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
	physical_package_map = kzalloc(size, GFP_KERNEL);

	for_each_present_cpu(cpu) {
		unsigned int apicid = apic->cpu_present_to_apicid(cpu);

		if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
			continue;
		if (!topology_update_package_map(apicid, cpu))
			continue;
		pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
		per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
		set_cpu_possible(cpu, false);
		set_cpu_present(cpu, false);
	}
370 371 372 373 374 375 376 377 378

	if (logical_packages > __max_logical_packages) {
		pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
			logical_packages, __max_logical_packages);
		logical_packages_frozen = true;
		__max_logical_packages  = logical_packages;
	}

	pr_info("Max logical packages: %u\n", __max_logical_packages);
379 380
}

381 382 383 384 385 386 387
void __init smp_store_boot_cpu_info(void)
{
	int id = 0; /* CPU 0 */
	struct cpuinfo_x86 *c = &cpu_data(id);

	*c = boot_cpu_data;
	c->cpu_index = id;
388
	smp_init_package_map();
389 390
}

391 392 393 394
/*
 * The bootstrap kernel entry code has set these up. Save them for
 * a given CPU
 */
395
void smp_store_cpu_info(int id)
396 397 398
{
	struct cpuinfo_x86 *c = &cpu_data(id);

399
	*c = boot_cpu_data;
400
	c->cpu_index = id;
401 402 403 404 405
	/*
	 * During boot time, CPU0 has this setup already. Save the info when
	 * bringing up AP or offlined CPU0.
	 */
	identify_secondary_cpu(c);
406 407
}

408 409 410 411 412 413 414 415
static bool
topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;

	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
}

416
static bool
417
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
418
{
419 420
	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;

421
	return !WARN_ONCE(!topology_same_node(c, o),
422 423 424 425 426
		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
		"[node: %d != %d]. Ignoring dependency.\n",
		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
}

427
#define link_mask(mfunc, c1, c2)					\
428
do {									\
429 430
	cpumask_set_cpu((c1), mfunc(c2));				\
	cpumask_set_cpu((c2), mfunc(c1));				\
431 432
} while (0)

433
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
434
{
435
	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
436 437 438 439
		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;

		if (c->phys_proc_id == o->phys_proc_id &&
		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
440
		    c->cpu_core_id == o->cpu_core_id)
441 442 443 444 445 446 447 448 449 450
			return topology_sane(c, o, "smt");

	} else if (c->phys_proc_id == o->phys_proc_id &&
		   c->cpu_core_id == o->cpu_core_id) {
		return topology_sane(c, o, "smt");
	}

	return false;
}

451
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
452 453 454 455 456 457 458 459
{
	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;

	if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
	    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
		return topology_sane(c, o, "llc");

	return false;
460 461
}

462 463 464 465 466 467
/*
 * Unlike the other levels, we do not enforce keeping a
 * multicore group inside a NUMA node.  If this happens, we will
 * discard the MC level of the topology later.
 */
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
468
{
469 470
	if (c->phys_proc_id == o->phys_proc_id)
		return true;
471 472
	return false;
}
473

474
static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
475 476 477 478 479 480 481 482
#ifdef CONFIG_SCHED_SMT
	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
#endif
	{ NULL, },
};
483 484 485 486 487 488 489 490 491 492 493 494

static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT
	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
#endif
	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
	{ NULL, },
};

495
/*
496 497
 * Set if a package/die has multiple NUMA nodes inside.
 * AMD Magny-Cours and Intel Cluster-on-Die have this.
498
 */
499
static bool x86_has_numa_in_package;
500

501
void set_cpu_sibling_map(int cpu)
502
{
503
	bool has_smt = smp_num_siblings > 1;
504
	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
505
	struct cpuinfo_x86 *c = &cpu_data(cpu);
506
	struct cpuinfo_x86 *o;
507
	int i, threads;
508

509
	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
510

511
	if (!has_mp) {
512
		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
513
		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
514
		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
515 516 517 518
		c->booted_cores = 1;
		return;
	}

519
	for_each_cpu(i, cpu_sibling_setup_mask) {
520 521 522
		o = &cpu_data(i);

		if ((i == cpu) || (has_smt && match_smt(c, o)))
523
			link_mask(topology_sibling_cpumask, cpu, i);
524

525
		if ((i == cpu) || (has_mp && match_llc(c, o)))
526
			link_mask(cpu_llc_shared_mask, cpu, i);
527

528 529 530 531
	}

	/*
	 * This needs a separate iteration over the cpus because we rely on all
532
	 * topology_sibling_cpumask links to be set-up.
533 534 535 536
	 */
	for_each_cpu(i, cpu_sibling_setup_mask) {
		o = &cpu_data(i);

537
		if ((i == cpu) || (has_mp && match_die(c, o))) {
538
			link_mask(topology_core_cpumask, cpu, i);
539

540 541 542
			/*
			 *  Does this new cpu bringup a new core?
			 */
543 544
			if (cpumask_weight(
			    topology_sibling_cpumask(cpu)) == 1) {
545 546 547 548
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
549 550
				if (cpumask_first(
				    topology_sibling_cpumask(i)) == i)
551 552 553 554 555 556 557 558 559 560
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
561
		if (match_die(c, o) && !topology_same_node(c, o))
562
			x86_has_numa_in_package = true;
563
	}
564 565 566 567

	threads = cpumask_weight(topology_sibling_cpumask(cpu));
	if (threads > __max_smt_threads)
		__max_smt_threads = threads;
568 569
}

570
/* maps the cpu to the sched domain representing multi-core */
R
Rusty Russell 已提交
571
const struct cpumask *cpu_coregroup_mask(int cpu)
572
{
573
	return cpu_llc_shared_mask(cpu);
R
Rusty Russell 已提交
574 575
}

I
Ingo Molnar 已提交
576
static void impress_friends(void)
577 578 579 580 581 582
{
	int cpu;
	unsigned long bogosum = 0;
	/*
	 * Allow the user to impress friends.
	 */
583
	pr_debug("Before bogomips\n");
584
	for_each_possible_cpu(cpu)
585
		if (cpumask_test_cpu(cpu, cpu_callout_mask))
586
			bogosum += cpu_data(cpu).loops_per_jiffy;
587
	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
588
		num_online_cpus(),
589 590 591
		bogosum/(500000/HZ),
		(bogosum/(5000/HZ))%100);

592
	pr_debug("Before bogocount - setting activated=1\n");
593 594
}

595
void __inquire_remote_apic(int apicid)
596 597
{
	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
598
	const char * const names[] = { "ID", "VERSION", "SPIV" };
599 600 601
	int timeout;
	u32 status;

602
	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
603 604

	for (i = 0; i < ARRAY_SIZE(regs); i++) {
605
		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
606 607 608 609 610 611

		/*
		 * Wait for idle.
		 */
		status = safe_apic_wait_icr_idle();
		if (status)
612
			pr_cont("a previous APIC delivery may have failed\n");
613

614
		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
615 616 617 618 619 620 621 622 623 624

		timeout = 0;
		do {
			udelay(100);
			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);

		switch (status) {
		case APIC_ICR_RR_VALID:
			status = apic_read(APIC_RRR);
625
			pr_cont("%08x\n", status);
626 627
			break;
		default:
628
			pr_cont("failed\n");
629 630 631 632
		}
	}
}

633 634 635 636 637 638 639 640
/*
 * The Multiprocessor Specification 1.4 (1997) example code suggests
 * that there should be a 10ms delay between the BSP asserting INIT
 * and de-asserting INIT, when starting a remote processor.
 * But that slows boot and resume on modern processors, which include
 * many cores and don't require that delay.
 *
 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
641
 * Modern processor families are quirked to remove the delay entirely.
642 643 644
 */
#define UDELAY_10MS_DEFAULT 10000

645
static unsigned int init_udelay = UINT_MAX;
646 647 648 649 650 651 652 653 654

static int __init cpu_init_udelay(char *str)
{
	get_option(&str, &init_udelay);

	return 0;
}
early_param("cpu_init_udelay", cpu_init_udelay);

655 656 657
static void __init smp_quirk_init_udelay(void)
{
	/* if cmdline changed it from default, leave it alone */
658
	if (init_udelay != UINT_MAX)
659 660 661 662
		return;

	/* if modern processor, use no delay */
	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
663
	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
664
		init_udelay = 0;
665 666
		return;
	}
667 668
	/* else, use legacy delay */
	init_udelay = UDELAY_10MS_DEFAULT;
669 670
}

671 672 673 674 675
/*
 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 * won't ... remember to clear down the APIC, etc later.
 */
676
int
677
wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
678 679 680 681 682 683 684
{
	unsigned long send_status, accept_status = 0;
	int maxlvt;

	/* Target chip */
	/* Boot on the stack */
	/* Kick the second */
685
	apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
686

687
	pr_debug("Waiting for send to finish...\n");
688 689 690 691 692 693
	send_status = safe_apic_wait_icr_idle();

	/*
	 * Give the other CPU some time to accept the IPI.
	 */
	udelay(200);
694
	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
695 696 697 698 699
		maxlvt = lapic_get_maxlvt();
		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
		accept_status = (apic_read(APIC_ESR) & 0xEF);
	}
700
	pr_debug("NMI sent\n");
701 702

	if (send_status)
703
		pr_err("APIC never delivered???\n");
704
	if (accept_status)
705
		pr_err("APIC delivery error (%lx)\n", accept_status);
706 707 708 709

	return (send_status | accept_status);
}

710
static int
711
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
712
{
713
	unsigned long send_status = 0, accept_status = 0;
714 715
	int maxlvt, num_starts, j;

716 717
	maxlvt = lapic_get_maxlvt();

718 719 720
	/*
	 * Be paranoid about clearing APIC errors.
	 */
721
	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
722 723
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
724 725 726
		apic_read(APIC_ESR);
	}

727
	pr_debug("Asserting INIT\n");
728 729 730 731 732 733 734

	/*
	 * Turn INIT on target chip
	 */
	/*
	 * Send IPI
	 */
735 736
	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
		       phys_apicid);
737

738
	pr_debug("Waiting for send to finish...\n");
739 740
	send_status = safe_apic_wait_icr_idle();

741
	udelay(init_udelay);
742

743
	pr_debug("Deasserting INIT\n");
744 745 746

	/* Target chip */
	/* Send IPI */
747
	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
748

749
	pr_debug("Waiting for send to finish...\n");
750 751 752 753 754 755 756 757 758 759
	send_status = safe_apic_wait_icr_idle();

	mb();

	/*
	 * Should we send STARTUP IPIs ?
	 *
	 * Determine this based on the APIC version.
	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
	 */
760
	if (APIC_INTEGRATED(boot_cpu_apic_version))
761 762 763 764 765 766 767
		num_starts = 2;
	else
		num_starts = 0;

	/*
	 * Run STARTUP IPI loop.
	 */
768
	pr_debug("#startup loops: %d\n", num_starts);
769 770

	for (j = 1; j <= num_starts; j++) {
771
		pr_debug("Sending STARTUP #%d\n", j);
772 773
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
774
		apic_read(APIC_ESR);
775
		pr_debug("After apic_write\n");
776 777 778 779 780 781 782 783

		/*
		 * STARTUP IPI
		 */

		/* Target chip */
		/* Boot on the stack */
		/* Kick the second */
784 785
		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
			       phys_apicid);
786 787 788 789

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
L
Len Brown 已提交
790 791 792
		if (init_udelay == 0)
			udelay(10);
		else
793
			udelay(300);
794

795
		pr_debug("Startup point 1\n");
796

797
		pr_debug("Waiting for send to finish...\n");
798 799 800 801 802
		send_status = safe_apic_wait_icr_idle();

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
L
Len Brown 已提交
803 804 805
		if (init_udelay == 0)
			udelay(10);
		else
806
			udelay(200);
807

808
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
809 810 811 812 813
			apic_write(APIC_ESR, 0);
		accept_status = (apic_read(APIC_ESR) & 0xEF);
		if (send_status || accept_status)
			break;
	}
814
	pr_debug("After Startup\n");
815 816

	if (send_status)
817
		pr_err("APIC never delivered???\n");
818
	if (accept_status)
819
		pr_err("APIC delivery error (%lx)\n", accept_status);
820 821 822 823

	return (send_status | accept_status);
}

824 825 826 827 828 829 830 831
void smp_announce(void)
{
	int num_nodes = num_online_nodes();

	printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
	       num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
}

832
/* reduce the number of lines printed when booting a large cpu count system */
833
static void announce_cpu(int cpu, int apicid)
834 835
{
	static int current_node = -1;
836
	int node = early_cpu_to_node(cpu);
837
	static int width, node_width;
838 839 840

	if (!width)
		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
841

842 843 844 845 846 847
	if (!node_width)
		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */

	if (cpu == 1)
		printk(KERN_INFO "x86: Booting SMP configuration:\n");

848 849 850
	if (system_state == SYSTEM_BOOTING) {
		if (node != current_node) {
			if (current_node > (-1))
851
				pr_cont("\n");
852
			current_node = node;
853 854 855

			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
			       node_width - num_digits(node), " ", node);
856
		}
857 858 859 860 861 862 863

		/* Add padding for the BSP */
		if (cpu == 1)
			pr_cont("%*s", width + 1, " ");

		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);

864 865 866 867 868
	} else
		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
			node, cpu, apicid);
}

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
{
	int cpu;

	cpu = smp_processor_id();
	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
		return NMI_HANDLED;

	return NMI_DONE;
}

/*
 * Wake up AP by INIT, INIT, STARTUP sequence.
 *
 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
 * boot-strap code which is not a desired behavior for waking up BSP. To
 * void the boot-strap code, wake up CPU0 by NMI instead.
 *
 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
 * (i.e. physically hot removed and then hot added), NMI won't wake it up.
 * We'll change this code in the future to wake up hard offlined CPU0 if
 * real platform and request are available.
 */
892
static int
893 894 895 896 897 898
wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
	       int *cpu0_nmi_registered)
{
	int id;
	int boot_error;

899 900
	preempt_disable();

901 902 903
	/*
	 * Wake up AP by INIT, INIT, STARTUP sequence.
	 */
904 905 906 907
	if (cpu) {
		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
		goto out;
	}
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	/*
	 * Wake up BSP by nmi.
	 *
	 * Register a NMI handler to help wake up CPU0.
	 */
	boot_error = register_nmi_handler(NMI_LOCAL,
					  wakeup_cpu0_nmi, 0, "wake_cpu0");

	if (!boot_error) {
		enable_start_cpu0 = 1;
		*cpu0_nmi_registered = 1;
		if (apic->dest_logical == APIC_DEST_LOGICAL)
			id = cpu0_logical_apicid;
		else
			id = apicid;
		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
	}
926 927 928

out:
	preempt_enable();
929 930 931 932

	return boot_error;
}

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
void common_cpu_up(unsigned int cpu, struct task_struct *idle)
{
	/* Just in case we booted with a single CPU. */
	alternatives_enable_smp();

	per_cpu(current_task, cpu) = idle;

#ifdef CONFIG_X86_32
	/* Stack for startup_32 can be just as for start_secondary onwards */
	irq_ctx_init(cpu);
	per_cpu(cpu_current_top_of_stack, cpu) =
		(unsigned long)task_stack_page(idle) + THREAD_SIZE;
#else
	initial_gs = per_cpu_offset(cpu);
#endif
}

950 951 952
/*
 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
953 954
 * Returns zero if CPU booted OK, else error code from
 * ->wakeup_secondary_cpu.
955
 */
956
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
957
{
958
	volatile u32 *trampoline_status =
959
		(volatile u32 *) __va(real_mode_header->trampoline_status);
960
	/* start_ip had better be page-aligned! */
961
	unsigned long start_ip = real_mode_header->trampoline_start;
962

963
	unsigned long boot_error = 0;
964
	int cpu0_nmi_registered = 0;
965
	unsigned long timeout;
966

967 968
	idle->thread.sp = (unsigned long) (((struct pt_regs *)
			  (THREAD_SIZE +  task_stack_page(idle))) - 1);
969

970
	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
971
	initial_code = (unsigned long)start_secondary;
972
	initial_stack  = idle->thread.sp;
973

974 975 976 977 978 979 980
	/*
	 * Enable the espfix hack for this CPU
	*/
#ifdef CONFIG_X86_ESPFIX64
	init_espfix_ap(cpu);
#endif

981 982
	/* So we see what's up */
	announce_cpu(cpu, apicid);
983 984 985 986 987 988

	/*
	 * This grunge runs the startup process for
	 * the targeted processor.
	 */

J
Jack Steiner 已提交
989
	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
990

991
		pr_debug("Setting warm reset code and vector.\n");
992

J
Jack Steiner 已提交
993 994 995
		smpboot_setup_warm_reset_vector(start_ip);
		/*
		 * Be paranoid about clearing APIC errors.
996
		*/
997
		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
998 999 1000
			apic_write(APIC_ESR, 0);
			apic_read(APIC_ESR);
		}
J
Jack Steiner 已提交
1001
	}
1002

1003 1004 1005 1006 1007 1008 1009 1010 1011
	/*
	 * AP might wait on cpu_callout_mask in cpu_init() with
	 * cpu_initialized_mask set if previous attempt to online
	 * it timed-out. Clear cpu_initialized_mask so that after
	 * INIT/SIPI it could start with a clean state.
	 */
	cpumask_clear_cpu(cpu, cpu_initialized_mask);
	smp_mb();

1012
	/*
1013 1014 1015 1016
	 * Wake up a CPU in difference cases:
	 * - Use the method in the APIC driver if it's defined
	 * Otherwise,
	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1017
	 */
1018 1019 1020
	if (apic->wakeup_secondary_cpu)
		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
	else
1021 1022
		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
						     &cpu0_nmi_registered);
1023 1024 1025

	if (!boot_error) {
		/*
1026
		 * Wait 10s total for first sign of life from AP
1027
		 */
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
		boot_error = -1;
		timeout = jiffies + 10*HZ;
		while (time_before(jiffies, timeout)) {
			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
				/*
				 * Tell AP to proceed with initialization
				 */
				cpumask_set_cpu(cpu, cpu_callout_mask);
				boot_error = 0;
				break;
			}
			schedule();
		}
	}
1042

1043
	if (!boot_error) {
1044
		/*
1045
		 * Wait till AP completes initial initialization
1046
		 */
1047
		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1048 1049 1050 1051 1052 1053 1054
			/*
			 * Allow other tasks to run while we wait for the
			 * AP to come online. This also gives a chance
			 * for the MTRR work(triggered by the AP coming online)
			 * to be completed in the stop machine context.
			 */
			schedule();
1055 1056 1057 1058
		}
	}

	/* mark "stuck" area as not stuck */
1059
	*trampoline_status = 0;
1060

1061 1062 1063 1064 1065 1066
	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
		/*
		 * Cleanup possible dangling ends...
		 */
		smpboot_restore_warm_reset_vector();
	}
1067 1068 1069 1070 1071 1072 1073
	/*
	 * Clean up the nmi handler. Do this after the callin and callout sync
	 * to avoid impact of possible long unregister time.
	 */
	if (cpu0_nmi_registered)
		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");

1074 1075 1076
	return boot_error;
}

1077
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1078
{
1079
	int apicid = apic->cpu_present_to_apicid(cpu);
1080 1081 1082 1083 1084
	unsigned long flags;
	int err;

	WARN_ON(irqs_disabled());

1085
	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1086

1087
	if (apicid == BAD_APICID ||
1088
	    !physid_isset(apicid, phys_cpu_present_map) ||
1089
	    !apic->apic_id_valid(apicid)) {
1090
		pr_err("%s: bad cpu %d\n", __func__, cpu);
1091 1092 1093 1094 1095 1096
		return -EINVAL;
	}

	/*
	 * Already booted CPU?
	 */
1097
	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1098
		pr_debug("do_boot_cpu %d Already started\n", cpu);
1099 1100 1101 1102 1103 1104 1105 1106 1107
		return -ENOSYS;
	}

	/*
	 * Save current MTRR state in case it was changed since early boot
	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
	 */
	mtrr_save_state();

1108 1109 1110 1111
	/* x86 CPUs take themselves offline, so delayed offline is OK. */
	err = cpu_check_up_prepare(cpu);
	if (err && err != -EBUSY)
		return err;
1112

1113 1114 1115
	/* the FPU context is blank, nobody can own it */
	__cpu_disable_lazy_restore(cpu);

1116 1117
	common_cpu_up(cpu, tidle);

1118
	err = do_boot_cpu(apicid, cpu, tidle);
1119
	if (err) {
1120
		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1121
		return -EIO;
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	}

	/*
	 * Check TSC synchronization with the AP (keep irqs disabled
	 * while doing so):
	 */
	local_irq_save(flags);
	check_tsc_sync_source(cpu);
	local_irq_restore(flags);

1132
	while (!cpu_online(cpu)) {
1133 1134 1135 1136 1137 1138 1139
		cpu_relax();
		touch_nmi_watchdog();
	}

	return 0;
}

1140 1141 1142 1143 1144 1145 1146 1147
/**
 * arch_disable_smp_support() - disables SMP support for x86 at runtime
 */
void arch_disable_smp_support(void)
{
	disable_ioapic_support();
}

1148 1149 1150 1151 1152 1153 1154
/*
 * Fall back to non SMP mode after errors.
 *
 * RED-PEN audit/test this more. I bet there is more state messed up here.
 */
static __init void disable_smp(void)
{
1155 1156
	pr_info("SMP disabled\n");

1157 1158
	disable_ioapic_support();

1159 1160
	init_cpu_present(cpumask_of(0));
	init_cpu_possible(cpumask_of(0));
1161

1162
	if (smp_found_config)
1163
		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1164
	else
1165
		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1166 1167
	cpumask_set_cpu(0, topology_sibling_cpumask(0));
	cpumask_set_cpu(0, topology_core_cpumask(0));
1168 1169
}

1170 1171 1172 1173 1174 1175 1176
enum {
	SMP_OK,
	SMP_NO_CONFIG,
	SMP_NO_APIC,
	SMP_FORCE_UP,
};

1177 1178 1179 1180 1181
/*
 * Various sanity checks.
 */
static int __init smp_sanity_check(unsigned max_cpus)
{
J
Jack Steiner 已提交
1182
	preempt_disable();
1183

1184
#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1185 1186 1187 1188
	if (def_to_bigsmp && nr_cpu_ids > 8) {
		unsigned int cpu;
		unsigned nr;

1189 1190
		pr_warn("More than 8 CPUs detected - skipping them\n"
			"Use CONFIG_X86_BIGSMP\n");
1191 1192 1193 1194

		nr = 0;
		for_each_present_cpu(cpu) {
			if (nr >= 8)
1195
				set_cpu_present(cpu, false);
1196 1197 1198 1199 1200 1201
			nr++;
		}

		nr = 0;
		for_each_possible_cpu(cpu) {
			if (nr >= 8)
1202
				set_cpu_possible(cpu, false);
1203 1204 1205 1206 1207 1208 1209
			nr++;
		}

		nr_cpu_ids = 8;
	}
#endif

1210
	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1211
		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
M
Michael Tokarev 已提交
1212 1213
			hard_smp_processor_id());

1214 1215 1216 1217 1218 1219 1220 1221
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config && !acpi_lapic) {
J
Jack Steiner 已提交
1222
		preempt_enable();
1223
		pr_notice("SMP motherboard not detected\n");
1224
		return SMP_NO_CONFIG;
1225 1226 1227 1228 1229 1230
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 */
1231
	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1232 1233
		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
			  boot_cpu_physical_apicid);
1234 1235
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}
J
Jack Steiner 已提交
1236
	preempt_enable();
1237 1238 1239 1240

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
1241
	if (APIC_INTEGRATED(boot_cpu_apic_version) &&
1242
	    !boot_cpu_has(X86_FEATURE_APIC)) {
1243 1244 1245
		if (!disable_apic) {
			pr_err("BIOS bug, local APIC #%d not detected!...\n",
				boot_cpu_physical_apicid);
1246
			pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
1247
		}
1248
		return SMP_NO_APIC;
1249 1250 1251 1252 1253 1254
	}

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
1255
		pr_info("SMP mode deactivated\n");
1256
		return SMP_FORCE_UP;
1257 1258
	}

1259
	return SMP_OK;
1260 1261 1262 1263 1264 1265 1266
}

static void __init smp_cpu_index_default(void)
{
	int i;
	struct cpuinfo_x86 *c;

1267
	for_each_possible_cpu(i) {
1268 1269
		c = &cpu_data(i);
		/* mark all to hotplug */
1270
		c->cpu_index = nr_cpu_ids;
1271 1272 1273 1274 1275 1276 1277 1278 1279
	}
}

/*
 * Prepare for SMP bootup.  The MP table or ACPI has been read
 * earlier.  Just do some sanity checking here and enable APIC mode.
 */
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
1280 1281
	unsigned int i;

1282
	smp_cpu_index_default();
1283

1284 1285 1286
	/*
	 * Setup boot CPU information
	 */
1287
	smp_store_boot_cpu_info(); /* Final full version of the data */
1288 1289
	cpumask_copy(cpu_callin_mask, cpumask_of(0));
	mb();
1290

1291
	for_each_possible_cpu(i) {
1292 1293
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1294
		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1295
	}
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305

	/*
	 * Set 'default' x86 topology, this matches default_topology() in that
	 * it has NUMA nodes as a topology level. See also
	 * native_smp_cpus_done().
	 *
	 * Must be done before set_cpus_sibling_map() is ran.
	 */
	set_sched_topology(x86_topology);

1306 1307
	set_cpu_sibling_map(0);

1308 1309
	switch (smp_sanity_check(max_cpus)) {
	case SMP_NO_CONFIG:
1310
		disable_smp();
1311 1312 1313 1314 1315 1316 1317 1318
		if (APIC_init_uniprocessor())
			pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
		return;
	case SMP_NO_APIC:
		disable_smp();
		return;
	case SMP_FORCE_UP:
		disable_smp();
1319
		apic_bsp_setup(false);
1320
		return;
1321 1322
	case SMP_OK:
		break;
1323 1324
	}

1325
	if (read_apic_id() != boot_cpu_physical_apicid) {
1326
		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1327
		     read_apic_id(), boot_cpu_physical_apicid);
1328 1329 1330
		/* Or can we switch back to PIC here? */
	}

1331
	default_setup_apic_routing();
1332
	cpu0_logical_apicid = apic_bsp_setup(false);
1333

1334
	pr_info("CPU%d: ", 0);
1335
	print_cpu_info(&cpu_data(0));
1336 1337 1338

	if (is_uv_system())
		uv_system_init();
1339 1340

	set_mtrr_aps_delayed_init();
1341 1342

	smp_quirk_init_udelay();
1343
}
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354

void arch_enable_nonboot_cpus_begin(void)
{
	set_mtrr_aps_delayed_init();
}

void arch_enable_nonboot_cpus_end(void)
{
	mtrr_aps_init();
}

1355 1356 1357 1358 1359 1360
/*
 * Early setup to make printk work.
 */
void __init native_smp_prepare_boot_cpu(void)
{
	int me = smp_processor_id();
1361
	switch_to_new_gdt(me);
1362 1363
	/* already set me in cpu_online_mask in boot_cpu_init() */
	cpumask_set_cpu(me, cpu_callout_mask);
1364
	cpu_set_state_online(me);
1365 1366
}

1367 1368
void __init native_smp_cpus_done(unsigned int max_cpus)
{
1369
	pr_debug("Boot done\n");
1370

1371 1372 1373
	if (x86_has_numa_in_package)
		set_sched_topology(x86_numa_in_package_topology);

D
Don Zickus 已提交
1374
	nmi_selftest();
1375 1376
	impress_friends();
	setup_ioapic_dest();
1377
	mtrr_aps_init();
1378 1379
}

1380 1381 1382 1383 1384 1385 1386 1387 1388
static int __initdata setup_possible_cpus = -1;
static int __init _setup_possible_cpus(char *str)
{
	get_option(&str, &setup_possible_cpus);
	return 0;
}
early_param("possible_cpus", _setup_possible_cpus);


1389
/*
1390
 * cpu_possible_mask should be static, it cannot change as cpu's
1391 1392 1393
 * are onlined, or offlined. The reason is per-cpu data-structures
 * are allocated by some modules at init time, and dont expect to
 * do this dynamically on cpu arrival/departure.
1394
 * cpu_present_mask on the other hand can change dynamically.
1395 1396 1397 1398 1399 1400
 * In case when cpu_hotplug is not compiled, then we resort to current
 * behaviour, which is cpu_possible == cpu_present.
 * - Ashok Raj
 *
 * Three ways to find out the number of additional hotplug CPUs:
 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1401
 * - The user can overwrite it with possible_cpus=NUM
1402 1403 1404 1405 1406 1407
 * - Otherwise don't reserve additional CPUs.
 * We do this because additional CPUs waste a lot of memory.
 * -AK
 */
__init void prefill_possible_map(void)
{
T
Thomas Gleixner 已提交
1408
	int i, possible;
1409

1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
	/* No boot processor was found in mptable or ACPI MADT */
	if (!num_processors) {
		int apicid = boot_cpu_physical_apicid;
		int cpu = hard_smp_processor_id();

		pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);

		/* Make sure boot cpu is enumerated */
		if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
		    apic->apic_id_valid(apicid))
			generic_processor_info(apicid, boot_cpu_apic_version);

		if (!num_processors)
			num_processors = 1;
	}
1425

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
	i = setup_max_cpus ?: 1;
	if (setup_possible_cpus == -1) {
		possible = num_processors;
#ifdef CONFIG_HOTPLUG_CPU
		if (setup_max_cpus)
			possible += disabled_cpus;
#else
		if (possible > i)
			possible = i;
#endif
	} else
1437 1438
		possible = setup_possible_cpus;

1439 1440
	total_cpus = max_t(int, possible, num_processors + disabled_cpus);

1441 1442
	/* nr_cpu_ids could be reduced via nr_cpus= */
	if (possible > nr_cpu_ids) {
1443
		pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
1444 1445
			possible, nr_cpu_ids);
		possible = nr_cpu_ids;
1446
	}
1447

1448 1449 1450 1451
#ifdef CONFIG_HOTPLUG_CPU
	if (!setup_max_cpus)
#endif
	if (possible > i) {
1452
		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1453 1454 1455 1456
			possible, setup_max_cpus);
		possible = i;
	}

1457
	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1458 1459 1460
		possible, max_t(int, possible - num_processors, 0));

	for (i = 0; i < possible; i++)
1461
		set_cpu_possible(i, true);
1462 1463
	for (; i < NR_CPUS; i++)
		set_cpu_possible(i, false);
1464 1465

	nr_cpu_ids = possible;
1466
}
1467

1468 1469
#ifdef CONFIG_HOTPLUG_CPU

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
/* Recompute SMT state for all CPUs on offline */
static void recompute_smt_state(void)
{
	int max_threads, cpu;

	max_threads = 0;
	for_each_online_cpu (cpu) {
		int threads = cpumask_weight(topology_sibling_cpumask(cpu));

		if (threads > max_threads)
			max_threads = threads;
	}
	__max_smt_threads = max_threads;
}

1485 1486 1487 1488 1489
static void remove_siblinginfo(int cpu)
{
	int sibling;
	struct cpuinfo_x86 *c = &cpu_data(cpu);

1490 1491
	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1492 1493 1494
		/*/
		 * last thread sibling in this cpu core going down
		 */
1495
		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1496 1497 1498
			cpu_data(sibling).booted_cores--;
	}

1499 1500
	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1501 1502 1503
	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
	cpumask_clear(cpu_llc_shared_mask(cpu));
1504 1505
	cpumask_clear(topology_sibling_cpumask(cpu));
	cpumask_clear(topology_core_cpumask(cpu));
1506 1507
	c->phys_proc_id = 0;
	c->cpu_core_id = 0;
1508
	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1509
	recompute_smt_state();
1510 1511
}

1512
static void remove_cpu_from_maps(int cpu)
1513
{
1514 1515 1516
	set_cpu_online(cpu, false);
	cpumask_clear_cpu(cpu, cpu_callout_mask);
	cpumask_clear_cpu(cpu, cpu_callin_mask);
1517
	/* was set by cpu_init() */
1518
	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1519
	numa_remove_cpu(cpu);
1520 1521
}

1522
void cpu_disable_common(void)
1523 1524 1525 1526 1527 1528
{
	int cpu = smp_processor_id();

	remove_siblinginfo(cpu);

	/* It's now safe to remove this processor from the online map */
1529
	lock_vector_lock();
1530
	remove_cpu_from_maps(cpu);
1531
	unlock_vector_lock();
1532
	fixup_irqs();
1533 1534 1535 1536
}

int native_cpu_disable(void)
{
1537 1538 1539 1540 1541 1542
	int ret;

	ret = check_irq_vectors_for_cpu_disable();
	if (ret)
		return ret;

1543 1544
	clear_local_APIC();
	cpu_disable_common();
1545

1546 1547 1548
	return 0;
}

1549
int common_cpu_die(unsigned int cpu)
1550
{
1551
	int ret = 0;
1552

1553
	/* We don't do anything here: idle task is faking death itself. */
1554

1555
	/* They ack this in play_dead() by setting CPU_DEAD */
1556
	if (cpu_wait_death(cpu, 5)) {
1557 1558 1559 1560
		if (system_state == SYSTEM_RUNNING)
			pr_info("CPU %u is now offline\n", cpu);
	} else {
		pr_err("CPU %u didn't die...\n", cpu);
1561
		ret = -1;
1562
	}
1563 1564 1565 1566 1567 1568 1569

	return ret;
}

void native_cpu_die(unsigned int cpu)
{
	common_cpu_die(cpu);
1570
}
1571 1572 1573 1574 1575

void play_dead_common(void)
{
	idle_task_exit();
	reset_lazy_tlbstate();
1576
	amd_e400_remove_cpu(raw_smp_processor_id());
1577 1578

	/* Ack it */
1579
	(void)cpu_report_death();
1580 1581 1582 1583 1584 1585 1586

	/*
	 * With physical CPU hotplug, we should halt the cpu
	 */
	local_irq_disable();
}

1587 1588 1589 1590 1591 1592 1593 1594
static bool wakeup_cpu0(void)
{
	if (smp_processor_id() == 0 && enable_start_cpu0)
		return true;

	return false;
}

1595 1596 1597 1598 1599 1600 1601 1602 1603
/*
 * We need to flush the caches before going to sleep, lest we have
 * dirty data in our caches when we come back up.
 */
static inline void mwait_play_dead(void)
{
	unsigned int eax, ebx, ecx, edx;
	unsigned int highest_cstate = 0;
	unsigned int highest_subcstate = 0;
1604
	void *mwait_ptr;
1605
	int i;
1606

1607
	if (!this_cpu_has(X86_FEATURE_MWAIT))
1608
		return;
1609
	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1610
		return;
1611
	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
		return;

	eax = CPUID_MWAIT_LEAF;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);

	/*
	 * eax will be 0 if EDX enumeration is not valid.
	 * Initialized below to cstate, sub_cstate value when EDX is valid.
	 */
	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
		eax = 0;
	} else {
		edx >>= MWAIT_SUBSTATE_SIZE;
		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
			if (edx & MWAIT_SUBSTATE_MASK) {
				highest_cstate = i;
				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
			}
		}
		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
			(highest_subcstate - 1);
	}

1636 1637 1638 1639 1640 1641 1642
	/*
	 * This should be a memory location in a cache line which is
	 * unlikely to be touched by other processors.  The actual
	 * content is immaterial as it is not actually modified in any way.
	 */
	mwait_ptr = &current_thread_info()->flags;

1643 1644
	wbinvd();

1645
	while (1) {
1646 1647 1648 1649 1650 1651 1652
		/*
		 * The CLFLUSH is a workaround for erratum AAI65 for
		 * the Xeon 7400 series.  It's not clear it is actually
		 * needed, but it should be harmless in either case.
		 * The WBINVD is insufficient due to the spurious-wakeup
		 * case where we return around the loop.
		 */
1653
		mb();
1654
		clflush(mwait_ptr);
1655
		mb();
1656
		__monitor(mwait_ptr, 0, 0);
1657 1658
		mb();
		__mwait(eax, 0);
1659 1660 1661 1662 1663
		/*
		 * If NMI wants to wake up CPU0, start CPU0.
		 */
		if (wakeup_cpu0())
			start_cpu0();
1664 1665 1666
	}
}

1667
void hlt_play_dead(void)
1668
{
1669
	if (__this_cpu_read(cpu_info.x86) >= 4)
1670 1671
		wbinvd();

1672 1673
	while (1) {
		native_halt();
1674 1675 1676 1677 1678
		/*
		 * If NMI wants to wake up CPU0, start CPU0.
		 */
		if (wakeup_cpu0())
			start_cpu0();
1679 1680 1681
	}
}

1682 1683 1684
void native_play_dead(void)
{
	play_dead_common();
1685
	tboot_shutdown(TB_SHUTDOWN_WFS);
1686 1687

	mwait_play_dead();	/* Only returns on failure */
1688 1689
	if (cpuidle_play_dead())
		hlt_play_dead();
1690 1691
}

1692
#else /* ... !CONFIG_HOTPLUG_CPU */
1693
int native_cpu_disable(void)
1694 1695 1696 1697
{
	return -ENOSYS;
}

1698
void native_cpu_die(unsigned int cpu)
1699 1700 1701 1702
{
	/* We said "no" in __cpu_disable */
	BUG();
}
1703 1704 1705 1706 1707 1708

void native_play_dead(void)
{
	BUG();
}

1709
#endif