smpboot.c 33.9 KB
Newer Older
1 2 3
/*
 *	x86 SMP booting functions
 *
4
 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
 *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
 *	Copyright 2001 Andi Kleen, SuSE Labs.
 *
 *	Much of the core SMP work is based on previous work by Thomas Radke, to
 *	whom a great many thanks are extended.
 *
 *	Thanks to Intel for making available several different Pentium,
 *	Pentium Pro and Pentium-II/Xeon MP machines.
 *	Original development of Linux SMP code supported by Caldera.
 *
 *	This code is released under the GNU General Public License version 2 or
 *	later.
 *
 *	Fixes
 *		Felix Koop	:	NR_CPUS used properly
 *		Jose Renau	:	Handle single CPU case.
 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
 *		Greg Wright	:	Fix for kernel stacks panic.
 *		Erich Boleyn	:	MP v1.4 and additional changes.
 *	Matthias Sattler	:	Changes for 2.1 kernel map.
 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
 *	Michael Chastain	:	Change trampoline.S to gnu as.
 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
 *		Ingo Molnar	:	Added APIC timers, based on code
 *					from Jose Renau
 *		Ingo Molnar	:	various cleanups and rewrites
 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
 *	Andi Kleen		:	Changed for SMP boot into long mode.
 *		Martin J. Bligh	: 	Added support for multi-quad systems
 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
 *      Andi Kleen              :       Converted to new state machine.
 *	Ashok Raj		: 	CPU hotplug support
 *	Glauber Costa		:	i386 and x86_64 integration
 */

42 43
#include <linux/init.h>
#include <linux/smp.h>
44
#include <linux/module.h>
45
#include <linux/sched.h>
46
#include <linux/percpu.h>
G
Glauber Costa 已提交
47
#include <linux/bootmem.h>
48 49
#include <linux/err.h>
#include <linux/nmi.h>
50

51
#include <asm/acpi.h>
52
#include <asm/desc.h>
53 54
#include <asm/nmi.h>
#include <asm/irq.h>
55
#include <asm/idle.h>
56
#include <asm/trampoline.h>
57 58
#include <asm/cpu.h>
#include <asm/numa.h>
59 60 61
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
62
#include <asm/vmi.h>
J
Jack Steiner 已提交
63
#include <asm/genapic.h>
64
#include <asm/setup.h>
65
#include <linux/mc146818rtc.h>
66

67
#include <mach_apic.h>
68 69 70
#include <mach_wakecpu.h>
#include <smpboot_hooks.h>

71
#ifdef CONFIG_X86_32
72
u8 apicid_2_node[MAX_APICID];
73
static int low_mappings;
74 75
#endif

76 77 78
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };

79 80 81 82 83 84 85 86 87 88 89 90 91
/* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
 * removed after init for !CONFIG_HOTPLUG_CPU.
 */
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
#else
92
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
93 94 95
#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
#endif
96

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);

/* Last level cache ID of each logical CPU */
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;

/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);

/* representing HT and core siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);

/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
115

116 117
static atomic_t init_deasserted;

118

119
/* Set if we find a B stepping CPU */
120
static int __cpuinitdata smp_b_stepping;
121

122 123 124 125 126 127 128 129 130 131 132 133 134 135
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)

/* which logical CPUs are on which nodes */
cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(node_to_cpumask_map);
/* which node each logical CPU is on */
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_to_node_map);

/* set up a mapping between cpu and node. */
static void map_cpu_to_node(int cpu, int node)
{
	printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
136
	cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
137 138 139 140 141 142 143 144 145 146
	cpu_to_node_map[cpu] = node;
}

/* undo a mapping between cpu and node. */
static void unmap_cpu_to_node(int cpu)
{
	int node;

	printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
	for (node = 0; node < MAX_NUMNODES; node++)
147
		cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
148 149 150 151 152 153 154 155
	cpu_to_node_map[cpu] = 0;
}
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
#define map_cpu_to_node(cpu, node)	({})
#define unmap_cpu_to_node(cpu)	({})
#endif

#ifdef CONFIG_X86_32
156 157
static int boot_cpu_logical_apicid;

158 159 160
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
					{ [0 ... NR_CPUS-1] = BAD_APICID };

I
Ingo Molnar 已提交
161
static void map_cpu_to_logical_apicid(void)
162 163 164 165 166 167 168 169 170 171 172 173
{
	int cpu = smp_processor_id();
	int apicid = logical_smp_processor_id();
	int node = apicid_to_node(apicid);

	if (!node_online(node))
		node = first_online_node;

	cpu_2_logical_apicid[cpu] = apicid;
	map_cpu_to_node(cpu, node);
}

174
void numa_remove_cpu(int cpu)
175 176 177 178 179 180 181 182
{
	cpu_2_logical_apicid[cpu] = BAD_APICID;
	unmap_cpu_to_node(cpu);
}
#else
#define map_cpu_to_logical_apicid()  do {} while (0)
#endif

183 184 185 186
/*
 * Report back to the Boot Processor.
 * Running on AP.
 */
I
Ingo Molnar 已提交
187
static void __cpuinit smp_callin(void)
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
{
	int cpuid, phys_id;
	unsigned long timeout;

	/*
	 * If waken up by an INIT in an 82489DX configuration
	 * we may get here before an INIT-deassert IPI reaches
	 * our local APIC.  We have to wait for the IPI or we'll
	 * lock up on an APIC access.
	 */
	wait_for_init_deassert(&init_deasserted);

	/*
	 * (This works even if the APIC is not enabled.)
	 */
203
	phys_id = read_apic_id();
204
	cpuid = smp_processor_id();
205
	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
206 207 208
		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
					phys_id, cpuid);
	}
209
	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

	/*
	 * STARTUP IPIs are fragile beasts as they might sometimes
	 * trigger some glue motherboard logic. Complete APIC bus
	 * silence for 1 second, this overestimates the time the
	 * boot CPU is spending to send the up to 2 STARTUP IPIs
	 * by a factor of two. This should be enough.
	 */

	/*
	 * Waiting 2s total for startup (udelay is not yet working)
	 */
	timeout = jiffies + 2*HZ;
	while (time_before(jiffies, timeout)) {
		/*
		 * Has the boot CPU finished it's STARTUP sequence?
		 */
227
		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
			break;
		cpu_relax();
	}

	if (!time_before(jiffies, timeout)) {
		panic("%s: CPU%d started up but did not get a callout!\n",
		      __func__, cpuid);
	}

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */

244
	pr_debug("CALLIN, before setup_local_APIC().\n");
245 246 247 248 249
	smp_callin_clear_local_apic();
	setup_local_APIC();
	end_local_APIC_setup();
	map_cpu_to_logical_apicid();

250
	notify_cpu_starting(cpuid);
251 252 253 254 255 256 257 258 259
	/*
	 * Get our bogomips.
	 *
	 * Need to enable IRQs because it can take longer and then
	 * the NMI watchdog might kill us.
	 */
	local_irq_enable();
	calibrate_delay();
	local_irq_disable();
260
	pr_debug("Stack at about %p\n", &cpuid);
261 262 263 264 265 266 267 268 269

	/*
	 * Save our processor parameters
	 */
	smp_store_cpu_info(cpuid);

	/*
	 * Allow the master to continue.
	 */
270
	cpumask_set_cpu(cpuid, cpu_callin_mask);
271 272
}

A
Andi Kleen 已提交
273 274
static int __cpuinitdata unsafe_smp;

275 276 277
/*
 * Activate a secondary processor.
 */
278
notrace static void __cpuinit start_secondary(void *unused)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
{
	/*
	 * Don't put *anything* before cpu_init(), SMP booting is too
	 * fragile that we want to limit the things done here to the
	 * most necessary things.
	 */
	vmi_bringup();
	cpu_init();
	preempt_disable();
	smp_callin();

	/* otherwise gcc will move up smp_processor_id before the cpu_init */
	barrier();
	/*
	 * Check TSC synchronization with the BP:
	 */
	check_tsc_sync_target();

	if (nmi_watchdog == NMI_IO_APIC) {
		disable_8259A_irq(0);
		enable_NMI_through_LVT0();
		enable_8259A_irq(0);
	}

303 304 305 306 307 308
#ifdef CONFIG_X86_32
	while (low_mappings)
		cpu_relax();
	__flush_tlb_all();
#endif

309 310 311 312 313 314 315 316 317 318 319
	/* This must be done before setting cpu_online_map */
	set_cpu_sibling_map(raw_smp_processor_id());
	wmb();

	/*
	 * We need to hold call_lock, so there is no inconsistency
	 * between the time smp_call_function() determines number of
	 * IPI recipients, and the time when the determination is made
	 * for which cpus receive the IPI. Holding this
	 * lock helps us to not include this cpu in a currently in progress
	 * smp_call_function().
320 321 322 323
	 *
	 * We need to hold vector_lock so there the set of online cpus
	 * does not change while we are assigning vectors to cpus.  Holding
	 * this lock ensures we don't half assign or remove an irq from a cpu.
324
	 */
325
	ipi_call_lock();
326 327
	lock_vector_lock();
	__setup_vector_irq(smp_processor_id());
328
	set_cpu_online(smp_processor_id(), true);
329
	unlock_vector_lock();
330
	ipi_call_unlock();
331 332
	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;

333 334 335
	/* enable local interrupts */
	local_irq_enable();

336 337 338 339 340 341
	setup_secondary_clock();

	wmb();
	cpu_idle();
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
{
	/*
	 * Mask B, Pentium, but not Pentium MMX
	 */
	if (c->x86_vendor == X86_VENDOR_INTEL &&
	    c->x86 == 5 &&
	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
	    c->x86_model <= 3)
		/*
		 * Remember we have B step Pentia with bugs
		 */
		smp_b_stepping = 1;

	/*
	 * Certain Athlons might work (for various values of 'work') in SMP
	 * but they are not certified as MP capable.
	 */
	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {

		if (num_possible_cpus() == 1)
			goto valid_k7;

		/* Athlon 660/661 is valid. */
		if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
		    (c->x86_mask == 1)))
			goto valid_k7;

		/* Duron 670 is valid */
		if ((c->x86_model == 7) && (c->x86_mask == 0))
			goto valid_k7;

		/*
		 * Athlon 662, Duron 671, and Athlon >model 7 have capability
		 * bit. It's worth noting that the A5 stepping (662) of some
		 * Athlon XP's have the MP bit set.
		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
		 * more.
		 */
		if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
		    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
		     (c->x86_model > 7))
			if (cpu_has_mp)
				goto valid_k7;

		/* If we get here, not a certified SMP capable AMD system. */
A
Andi Kleen 已提交
388
		unsafe_smp = 1;
389 390 391 392 393 394
	}

valid_k7:
	;
}

I
Ingo Molnar 已提交
395
static void __cpuinit smp_checks(void)
396 397 398 399 400 401 402 403 404
{
	if (smp_b_stepping)
		printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
				    "with B stepping processors.\n");

	/*
	 * Don't taint if we are running SMP kernel on a single non-MP
	 * approved Athlon
	 */
A
Andi Kleen 已提交
405 406 407 408
	if (unsafe_smp && num_online_cpus() > 1) {
		printk(KERN_INFO "WARNING: This combination of AMD"
			"processors is not suitable for SMP.\n");
		add_taint(TAINT_UNSAFE_SMP);
409 410 411
	}
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
/*
 * The bootstrap kernel entry code has set these up. Save them for
 * a given CPU
 */

void __cpuinit smp_store_cpu_info(int id)
{
	struct cpuinfo_x86 *c = &cpu_data(id);

	*c = boot_cpu_data;
	c->cpu_index = id;
	if (id != 0)
		identify_secondary_cpu(c);
	smp_apply_quirks(c);
}


429 430 431 432 433
void __cpuinit set_cpu_sibling_map(int cpu)
{
	int i;
	struct cpuinfo_x86 *c = &cpu_data(cpu);

434
	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
435 436

	if (smp_num_siblings > 1) {
437 438 439 440 441 442 443 444 445 446 447
		for_each_cpu(i, cpu_sibling_setup_mask) {
			struct cpuinfo_x86 *o = &cpu_data(i);

			if (c->phys_proc_id == o->phys_proc_id &&
			    c->cpu_core_id == o->cpu_core_id) {
				cpumask_set_cpu(i, cpu_sibling_mask(cpu));
				cpumask_set_cpu(cpu, cpu_sibling_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, &c->llc_shared_map);
				cpumask_set_cpu(cpu, &o->llc_shared_map);
448 449 450
			}
		}
	} else {
451
		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
452 453
	}

454
	cpumask_set_cpu(cpu, &c->llc_shared_map);
455 456

	if (current_cpu_data.x86_max_cores == 1) {
457
		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
458 459 460 461
		c->booted_cores = 1;
		return;
	}

462
	for_each_cpu(i, cpu_sibling_setup_mask) {
463 464
		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
465 466
			cpumask_set_cpu(i, &c->llc_shared_map);
			cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
467 468
		}
		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
469 470
			cpumask_set_cpu(i, cpu_core_mask(cpu));
			cpumask_set_cpu(cpu, cpu_core_mask(i));
471 472 473
			/*
			 *  Does this new cpu bringup a new core?
			 */
474
			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
475 476 477 478
				/*
				 * for each core in package, increment
				 * the booted_cores for this new cpu
				 */
479
				if (cpumask_first(cpu_sibling_mask(i)) == i)
480 481 482 483 484 485 486 487 488 489 490 491 492
					c->booted_cores++;
				/*
				 * increment the core count for all
				 * the other cpus in this package
				 */
				if (i != cpu)
					cpu_data(i).booted_cores++;
			} else if (i != cpu && !c->booted_cores)
				c->booted_cores = cpu_data(i).booted_cores;
		}
	}
}

493
/* maps the cpu to the sched domain representing multi-core */
R
Rusty Russell 已提交
494
const struct cpumask *cpu_coregroup_mask(int cpu)
495 496 497 498 499 500 501
{
	struct cpuinfo_x86 *c = &cpu_data(cpu);
	/*
	 * For perf, we return last level cache shared map.
	 * And for power savings, we return cpu_core_map
	 */
	if (sched_mc_power_savings || sched_smt_power_savings)
502
		return cpu_core_mask(cpu);
503
	else
R
Rusty Russell 已提交
504 505 506 507 508 509
		return &c->llc_shared_map;
}

cpumask_t cpu_coregroup_map(int cpu)
{
	return *cpu_coregroup_mask(cpu);
510 511
}

I
Ingo Molnar 已提交
512
static void impress_friends(void)
513 514 515 516 517 518
{
	int cpu;
	unsigned long bogosum = 0;
	/*
	 * Allow the user to impress friends.
	 */
519
	pr_debug("Before bogomips.\n");
520
	for_each_possible_cpu(cpu)
521
		if (cpumask_test_cpu(cpu, cpu_callout_mask))
522 523 524
			bogosum += cpu_data(cpu).loops_per_jiffy;
	printk(KERN_INFO
		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
525
		num_online_cpus(),
526 527 528
		bogosum/(500000/HZ),
		(bogosum/(5000/HZ))%100);

529
	pr_debug("Before bogocount - setting activated=1.\n");
530 531
}

532
void __inquire_remote_apic(int apicid)
533 534 535 536 537 538
{
	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
	char *names[] = { "ID", "VERSION", "SPIV" };
	int timeout;
	u32 status;

Y
Yinghai Lu 已提交
539
	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
540 541

	for (i = 0; i < ARRAY_SIZE(regs); i++) {
Y
Yinghai Lu 已提交
542
		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
543 544 545 546 547 548 549 550 551

		/*
		 * Wait for idle.
		 */
		status = safe_apic_wait_icr_idle();
		if (status)
			printk(KERN_CONT
			       "a previous APIC delivery may have failed\n");

552
		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575

		timeout = 0;
		do {
			udelay(100);
			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);

		switch (status) {
		case APIC_ICR_RR_VALID:
			status = apic_read(APIC_RRR);
			printk(KERN_CONT "%08x\n", status);
			break;
		default:
			printk(KERN_CONT "failed\n");
		}
	}
}

/*
 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 * won't ... remember to clear down the APIC, etc later.
 */
576 577
int __devinit
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
578 579 580 581 582 583 584
{
	unsigned long send_status, accept_status = 0;
	int maxlvt;

	/* Target chip */
	/* Boot on the stack */
	/* Kick the second */
585
	apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
586

587
	pr_debug("Waiting for send to finish...\n");
588 589 590 591 592 593
	send_status = safe_apic_wait_icr_idle();

	/*
	 * Give the other CPU some time to accept the IPI.
	 */
	udelay(200);
594
	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
595 596 597 598 599
		maxlvt = lapic_get_maxlvt();
		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
		accept_status = (apic_read(APIC_ESR) & 0xEF);
	}
600
	pr_debug("NMI sent.\n");
601 602 603 604 605 606 607 608 609

	if (send_status)
		printk(KERN_ERR "APIC never delivered???\n");
	if (accept_status)
		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);

	return (send_status | accept_status);
}

610
int __devinit
611
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
612 613 614 615
{
	unsigned long send_status, accept_status = 0;
	int maxlvt, num_starts, j;

J
Jack Steiner 已提交
616 617 618 619 620 621
	if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
		send_status = uv_wakeup_secondary(phys_apicid, start_eip);
		atomic_set(&init_deasserted, 1);
		return send_status;
	}

622 623
	maxlvt = lapic_get_maxlvt();

624 625 626 627
	/*
	 * Be paranoid about clearing APIC errors.
	 */
	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
628 629
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
630 631 632
		apic_read(APIC_ESR);
	}

633
	pr_debug("Asserting INIT.\n");
634 635 636 637 638 639 640

	/*
	 * Turn INIT on target chip
	 */
	/*
	 * Send IPI
	 */
641 642
	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
		       phys_apicid);
643

644
	pr_debug("Waiting for send to finish...\n");
645 646 647 648
	send_status = safe_apic_wait_icr_idle();

	mdelay(10);

649
	pr_debug("Deasserting INIT.\n");
650 651 652

	/* Target chip */
	/* Send IPI */
653
	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
654

655
	pr_debug("Waiting for send to finish...\n");
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
	send_status = safe_apic_wait_icr_idle();

	mb();
	atomic_set(&init_deasserted, 1);

	/*
	 * Should we send STARTUP IPIs ?
	 *
	 * Determine this based on the APIC version.
	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
	 */
	if (APIC_INTEGRATED(apic_version[phys_apicid]))
		num_starts = 2;
	else
		num_starts = 0;

	/*
	 * Paravirt / VMI wants a startup IPI hook here to set up the
	 * target processor state.
	 */
	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
			 (unsigned long)stack_start.sp);

	/*
	 * Run STARTUP IPI loop.
	 */
682
	pr_debug("#startup loops: %d.\n", num_starts);
683 684

	for (j = 1; j <= num_starts; j++) {
685
		pr_debug("Sending STARTUP #%d.\n", j);
686 687
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
			apic_write(APIC_ESR, 0);
688
		apic_read(APIC_ESR);
689
		pr_debug("After apic_write.\n");
690 691 692 693 694 695 696 697

		/*
		 * STARTUP IPI
		 */

		/* Target chip */
		/* Boot on the stack */
		/* Kick the second */
698 699
		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
			       phys_apicid);
700 701 702 703 704 705

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
		udelay(300);

706
		pr_debug("Startup point 1.\n");
707

708
		pr_debug("Waiting for send to finish...\n");
709 710 711 712 713 714
		send_status = safe_apic_wait_icr_idle();

		/*
		 * Give the other CPU some time to accept the IPI.
		 */
		udelay(200);
715
		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
716 717 718 719 720
			apic_write(APIC_ESR, 0);
		accept_status = (apic_read(APIC_ESR) & 0xEF);
		if (send_status || accept_status)
			break;
	}
721
	pr_debug("After Startup.\n");
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800

	if (send_status)
		printk(KERN_ERR "APIC never delivered???\n");
	if (accept_status)
		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);

	return (send_status | accept_status);
}

struct create_idle {
	struct work_struct work;
	struct task_struct *idle;
	struct completion done;
	int cpu;
};

static void __cpuinit do_fork_idle(struct work_struct *work)
{
	struct create_idle *c_idle =
		container_of(work, struct create_idle, work);

	c_idle->idle = fork_idle(c_idle->cpu);
	complete(&c_idle->done);
}

static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
 */
{
	unsigned long boot_error = 0;
	int timeout;
	unsigned long start_ip;
	unsigned short nmi_high = 0, nmi_low = 0;
	struct create_idle c_idle = {
		.cpu = cpu,
		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
	};
	INIT_WORK(&c_idle.work, do_fork_idle);

	alternatives_smp_switch(1);

	c_idle.idle = get_idle_for_cpu(cpu);

	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	if (c_idle.idle) {
		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
		init_idle(c_idle.idle, cpu);
		goto do_rest;
	}

	if (!keventd_up() || current_is_keventd())
		c_idle.work.func(&c_idle.work);
	else {
		schedule_work(&c_idle.work);
		wait_for_completion(&c_idle.done);
	}

	if (IS_ERR(c_idle.idle)) {
		printk("failed fork for CPU %d\n", cpu);
		return PTR_ERR(c_idle.idle);
	}

	set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
#ifdef CONFIG_X86_32
	per_cpu(current_task, cpu) = c_idle.idle;
	init_gdt(cpu);
	/* Stack for startup_32 can be just as for start_secondary onwards */
	irq_ctx_init(cpu);
#else
	cpu_pda(cpu)->pcurrent = c_idle.idle;
	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
801
	initial_gs = per_cpu_offset(cpu);
802
#endif
803
	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
804
	initial_code = (unsigned long)start_secondary;
G
Glauber Costa 已提交
805
	stack_start.sp = (void *) c_idle.idle->thread.sp;
806 807 808 809 810

	/* start_ip had better be page-aligned! */
	start_ip = setup_trampoline();

	/* So we see what's up   */
Y
Yinghai Lu 已提交
811
	printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
812 813 814 815 816 817 818 819 820
			  cpu, apicid, start_ip);

	/*
	 * This grunge runs the startup process for
	 * the targeted processor.
	 */

	atomic_set(&init_deasserted, 0);

J
Jack Steiner 已提交
821
	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
822

823
		pr_debug("Setting warm reset code and vector.\n");
824

J
Jack Steiner 已提交
825 826 827 828 829
		store_NMI_vector(&nmi_high, &nmi_low);

		smpboot_setup_warm_reset_vector(start_ip);
		/*
		 * Be paranoid about clearing APIC errors.
830 831 832 833 834
		*/
		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
			apic_write(APIC_ESR, 0);
			apic_read(APIC_ESR);
		}
J
Jack Steiner 已提交
835
	}
836 837 838 839 840 841 842 843 844 845

	/*
	 * Starting actual IPI sequence...
	 */
	boot_error = wakeup_secondary_cpu(apicid, start_ip);

	if (!boot_error) {
		/*
		 * allow APs to start initializing.
		 */
846
		pr_debug("Before Callout %d.\n", cpu);
847
		cpumask_set_cpu(cpu, cpu_callout_mask);
848
		pr_debug("After Callout %d.\n", cpu);
849 850 851 852 853

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 50000; timeout++) {
854
			if (cpumask_test_cpu(cpu, cpu_callin_mask))
855 856 857 858
				break;	/* It has booted */
			udelay(100);
		}

859
		if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
860
			/* number CPUs logically, starting from 1 (BSP is 0) */
861
			pr_debug("OK.\n");
862 863
			printk(KERN_INFO "CPU%d: ", cpu);
			print_cpu_info(&cpu_data(cpu));
864
			pr_debug("CPU has booted.\n");
865 866 867 868 869 870 871 872 873
		} else {
			boot_error = 1;
			if (*((volatile unsigned char *)trampoline_base)
					== 0xA5)
				/* trampoline started but...? */
				printk(KERN_ERR "Stuck ??\n");
			else
				/* trampoline code not run */
				printk(KERN_ERR "Not responding.\n");
J
Jack Steiner 已提交
874 875
			if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
				inquire_remote_apic(apicid);
876 877
		}
	}
878

879 880
	if (boot_error) {
		/* Try to put things back the way they were before ... */
881
		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
882 883 884 885 886 887 888 889

		/* was set by do_boot_cpu() */
		cpumask_clear_cpu(cpu, cpu_callout_mask);

		/* was set by cpu_init() */
		cpumask_clear_cpu(cpu, cpu_initialized_mask);

		set_cpu_present(cpu, false);
890 891 892 893 894 895
		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
	}

	/* mark "stuck" area as not stuck */
	*((volatile unsigned long *)trampoline_base) = 0;

896 897 898 899 900
	/*
	 * Cleanup possible dangling ends...
	 */
	smpboot_restore_warm_reset_vector();

901 902 903 904 905 906 907 908 909 910 911
	return boot_error;
}

int __cpuinit native_cpu_up(unsigned int cpu)
{
	int apicid = cpu_present_to_apicid(cpu);
	unsigned long flags;
	int err;

	WARN_ON(irqs_disabled());

912
	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
913 914 915 916 917 918 919 920 921 922

	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
	    !physid_isset(apicid, phys_cpu_present_map)) {
		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
		return -EINVAL;
	}

	/*
	 * Already booted CPU?
	 */
923
	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
924
		pr_debug("do_boot_cpu %d Already started\n", cpu);
925 926 927 928 929 930 931 932 933 934 935 936 937
		return -ENOSYS;
	}

	/*
	 * Save current MTRR state in case it was changed since early boot
	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
	 */
	mtrr_save_state();

	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

#ifdef CONFIG_X86_32
	/* init low mem mapping */
J
Jeremy Fitzhardinge 已提交
938
	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
939
		min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
940
	flush_tlb_all();
941
	low_mappings = 1;
942 943

	err = do_boot_cpu(apicid, cpu);
944 945 946 947 948 949 950

	zap_low_mappings();
	low_mappings = 0;
#else
	err = do_boot_cpu(apicid, cpu);
#endif
	if (err) {
951
		pr_debug("do_boot_cpu failed %d\n", err);
952
		return -EIO;
953 954 955 956 957 958 959 960 961 962
	}

	/*
	 * Check TSC synchronization with the AP (keep irqs disabled
	 * while doing so):
	 */
	local_irq_save(flags);
	check_tsc_sync_source(cpu);
	local_irq_restore(flags);

963
	while (!cpu_online(cpu)) {
964 965 966 967 968 969 970
		cpu_relax();
		touch_nmi_watchdog();
	}

	return 0;
}

971 972 973 974 975 976 977
/*
 * Fall back to non SMP mode after errors.
 *
 * RED-PEN audit/test this more. I bet there is more state messed up here.
 */
static __init void disable_smp(void)
{
978 979 980
	/* use the read/write pointers to the present and possible maps */
	cpumask_copy(&cpu_present_map, cpumask_of(0));
	cpumask_copy(&cpu_possible_map, cpumask_of(0));
981
	smpboot_clear_io_apic_irqs();
982

983
	if (smp_found_config)
984
		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
985
	else
986
		physid_set_mask_of_physid(0, &phys_cpu_present_map);
987
	map_cpu_to_logical_apicid();
988 989
	cpumask_set_cpu(0, cpu_sibling_mask(0));
	cpumask_set_cpu(0, cpu_core_mask(0));
990 991 992 993 994 995 996
}

/*
 * Various sanity checks.
 */
static int __init smp_sanity_check(unsigned max_cpus)
{
J
Jack Steiner 已提交
997
	preempt_disable();
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010

#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
	if (def_to_bigsmp && nr_cpu_ids > 8) {
		unsigned int cpu;
		unsigned nr;

		printk(KERN_WARNING
		       "More than 8 CPUs detected - skipping them.\n"
		       "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");

		nr = 0;
		for_each_present_cpu(cpu) {
			if (nr >= 8)
1011
				set_cpu_present(cpu, false);
1012 1013 1014 1015 1016 1017
			nr++;
		}

		nr = 0;
		for_each_possible_cpu(cpu) {
			if (nr >= 8)
1018
				set_cpu_possible(cpu, false);
1019 1020 1021 1022 1023 1024 1025
			nr++;
		}

		nr_cpu_ids = 8;
	}
#endif

1026
	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
M
Michael Tokarev 已提交
1027 1028 1029 1030
		printk(KERN_WARNING
			"weird, boot CPU (#%d) not listed by the BIOS.\n",
			hard_smp_processor_id());

1031 1032 1033 1034 1035 1036 1037 1038
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config && !acpi_lapic) {
J
Jack Steiner 已提交
1039
		preempt_enable();
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		disable_smp();
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		return -1;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 */
	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
		printk(KERN_NOTICE
			"weird, boot CPU (#%d) not listed by the BIOS.\n",
			boot_cpu_physical_apicid);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}
J
Jack Steiner 已提交
1058
	preempt_enable();
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
	    !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_physical_apicid);
		printk(KERN_ERR "... forcing use of dummy APIC emulation."
				"(tell your hw vendor)\n");
		smpboot_clear_io_apic();
J
Jan Beulich 已提交
1070
		disable_ioapic_setup();
1071 1072 1073 1074 1075 1076 1077 1078 1079
		return -1;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
1080
		printk(KERN_INFO "SMP mode deactivated.\n");
1081
		smpboot_clear_io_apic();
1082 1083 1084

		localise_nmi_watchdog();

1085 1086 1087
		connect_bsp_APIC();
		setup_local_APIC();
		end_local_APIC_setup();
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		return -1;
	}

	return 0;
}

static void __init smp_cpu_index_default(void)
{
	int i;
	struct cpuinfo_x86 *c;

1099
	for_each_possible_cpu(i) {
1100 1101
		c = &cpu_data(i);
		/* mark all to hotplug */
1102
		c->cpu_index = nr_cpu_ids;
1103 1104 1105 1106 1107 1108 1109 1110 1111
	}
}

/*
 * Prepare for SMP bootup.  The MP table or ACPI has been read
 * earlier.  Just do some sanity checking here and enable APIC mode.
 */
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
1112
	preempt_disable();
1113 1114
	smp_cpu_index_default();
	current_cpu_data = boot_cpu_data;
1115
	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1116 1117 1118 1119 1120
	mb();
	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
1121
#ifdef CONFIG_X86_32
1122
	boot_cpu_logical_apicid = logical_smp_processor_id();
1123
#endif
1124 1125 1126
	current_thread_info()->cpu = 0;  /* needed? */
	set_cpu_sibling_map(0);

1127 1128 1129 1130 1131
#ifdef CONFIG_X86_64
	enable_IR_x2apic();
	setup_apic_routing();
#endif

1132 1133 1134
	if (smp_sanity_check(max_cpus) < 0) {
		printk(KERN_INFO "SMP disabled\n");
		disable_smp();
1135
		goto out;
1136 1137
	}

J
Jack Steiner 已提交
1138
	preempt_disable();
1139
	if (read_apic_id() != boot_cpu_physical_apicid) {
1140
		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1141
		     read_apic_id(), boot_cpu_physical_apicid);
1142 1143
		/* Or can we switch back to PIC here? */
	}
J
Jack Steiner 已提交
1144
	preempt_enable();
1145 1146

	connect_bsp_APIC();
1147

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	/*
	 * Switch from PIC to APIC mode.
	 */
	setup_local_APIC();

#ifdef CONFIG_X86_64
	/*
	 * Enable IO APIC before setting up error vector
	 */
	if (!skip_ioapic_setup && nr_ioapics)
		enable_IO_APIC();
#endif
	end_local_APIC_setup();

	map_cpu_to_logical_apicid();

	setup_portio_remap();

	smpboot_setup_io_apic();
	/*
	 * Set up local APIC timer on boot CPU.
	 */

	printk(KERN_INFO "CPU%d: ", 0);
	print_cpu_info(&cpu_data(0));
	setup_boot_clock();
1174 1175 1176

	if (is_uv_system())
		uv_system_init();
1177 1178
out:
	preempt_enable();
1179
}
1180 1181 1182 1183 1184 1185 1186 1187 1188
/*
 * Early setup to make printk work.
 */
void __init native_smp_prepare_boot_cpu(void)
{
	int me = smp_processor_id();
#ifdef CONFIG_X86_32
	init_gdt(me);
#endif
1189
	switch_to_new_gdt();
1190 1191
	/* already set me in cpu_online_mask in boot_cpu_init() */
	cpumask_set_cpu(me, cpu_callout_mask);
1192 1193 1194
	per_cpu(cpu_state, me) = CPU_ONLINE;
}

1195 1196
void __init native_smp_cpus_done(unsigned int max_cpus)
{
1197
	pr_debug("Boot done.\n");
1198 1199 1200 1201 1202 1203 1204 1205 1206

	impress_friends();
	smp_checks();
#ifdef CONFIG_X86_IO_APIC
	setup_ioapic_dest();
#endif
	check_nmi_watchdog();
}

1207 1208 1209 1210 1211 1212 1213 1214 1215
static int __initdata setup_possible_cpus = -1;
static int __init _setup_possible_cpus(char *str)
{
	get_option(&str, &setup_possible_cpus);
	return 0;
}
early_param("possible_cpus", _setup_possible_cpus);


1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
/*
 * cpu_possible_map should be static, it cannot change as cpu's
 * are onlined, or offlined. The reason is per-cpu data-structures
 * are allocated by some modules at init time, and dont expect to
 * do this dynamically on cpu arrival/departure.
 * cpu_present_map on the other hand can change dynamically.
 * In case when cpu_hotplug is not compiled, then we resort to current
 * behaviour, which is cpu_possible == cpu_present.
 * - Ashok Raj
 *
 * Three ways to find out the number of additional hotplug CPUs:
 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1228
 * - The user can overwrite it with possible_cpus=NUM
1229 1230 1231 1232 1233 1234
 * - Otherwise don't reserve additional CPUs.
 * We do this because additional CPUs waste a lot of memory.
 * -AK
 */
__init void prefill_possible_map(void)
{
T
Thomas Gleixner 已提交
1235
	int i, possible;
1236

1237 1238 1239 1240
	/* no processor from mptable or madt */
	if (!num_processors)
		num_processors = 1;

1241 1242 1243 1244 1245
	if (setup_possible_cpus == -1)
		possible = num_processors + disabled_cpus;
	else
		possible = setup_possible_cpus;

1246 1247
	total_cpus = max_t(int, possible, num_processors + disabled_cpus);

1248 1249 1250 1251 1252 1253
	if (possible > CONFIG_NR_CPUS) {
		printk(KERN_WARNING
			"%d Processors exceeds NR_CPUS limit of %d\n",
			possible, CONFIG_NR_CPUS);
		possible = CONFIG_NR_CPUS;
	}
1254 1255 1256 1257 1258

	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
		possible, max_t(int, possible - num_processors, 0));

	for (i = 0; i < possible; i++)
1259
		set_cpu_possible(i, true);
1260 1261

	nr_cpu_ids = possible;
1262
}
1263

1264 1265 1266 1267 1268 1269 1270
#ifdef CONFIG_HOTPLUG_CPU

static void remove_siblinginfo(int cpu)
{
	int sibling;
	struct cpuinfo_x86 *c = &cpu_data(cpu);

1271 1272
	for_each_cpu(sibling, cpu_core_mask(cpu)) {
		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1273 1274 1275
		/*/
		 * last thread sibling in this cpu core going down
		 */
1276
		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1277 1278 1279
			cpu_data(sibling).booted_cores--;
	}

1280 1281 1282 1283
	for_each_cpu(sibling, cpu_sibling_mask(cpu))
		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
	cpumask_clear(cpu_sibling_mask(cpu));
	cpumask_clear(cpu_core_mask(cpu));
1284 1285
	c->phys_proc_id = 0;
	c->cpu_core_id = 0;
1286
	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1287 1288
}

1289 1290
static void __ref remove_cpu_from_maps(int cpu)
{
1291 1292 1293
	set_cpu_online(cpu, false);
	cpumask_clear_cpu(cpu, cpu_callout_mask);
	cpumask_clear_cpu(cpu, cpu_callin_mask);
1294
	/* was set by cpu_init() */
1295
	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1296
	numa_remove_cpu(cpu);
1297 1298
}

1299
void cpu_disable_common(void)
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
{
	int cpu = smp_processor_id();
	/*
	 * HACK:
	 * Allow any queued timer interrupts to get serviced
	 * This is only a temporary solution until we cleanup
	 * fixup_irqs as we do for IA64.
	 */
	local_irq_enable();
	mdelay(1);

	local_irq_disable();
	remove_siblinginfo(cpu);

	/* It's now safe to remove this processor from the online map */
1315
	lock_vector_lock();
1316
	remove_cpu_from_maps(cpu);
1317
	unlock_vector_lock();
1318
	fixup_irqs();
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
}

int native_cpu_disable(void)
{
	int cpu = smp_processor_id();

	/*
	 * Perhaps use cpufreq to drop frequency, but that could go
	 * into generic code.
	 *
	 * We won't take down the boot processor on i386 due to some
	 * interrupts only being able to be serviced by the BSP.
	 * Especially so if we're not using an IOAPIC	-zwane
	 */
	if (cpu == 0)
		return -EBUSY;

	if (nmi_watchdog == NMI_LOCAL_APIC)
		stop_apic_nmi_watchdog(NULL);
	clear_local_APIC();

	cpu_disable_common();
1341 1342 1343
	return 0;
}

1344
void native_cpu_die(unsigned int cpu)
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
{
	/* We don't do anything here: idle task is faking death itself. */
	unsigned int i;

	for (i = 0; i < 10; i++) {
		/* They ack this in play_dead by setting CPU_DEAD */
		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
			printk(KERN_INFO "CPU %d is now offline\n", cpu);
			if (1 == num_online_cpus())
				alternatives_smp_switch(0);
			return;
		}
		msleep(100);
	}
	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
1361 1362 1363 1364 1365 1366

void play_dead_common(void)
{
	idle_task_exit();
	reset_lazy_tlbstate();
	irq_ctx_exit(raw_smp_processor_id());
1367
	c1e_remove_cpu(raw_smp_processor_id());
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384

	mb();
	/* Ack it */
	__get_cpu_var(cpu_state) = CPU_DEAD;

	/*
	 * With physical CPU hotplug, we should halt the cpu
	 */
	local_irq_disable();
}

void native_play_dead(void)
{
	play_dead_common();
	wbinvd_halt();
}

1385
#else /* ... !CONFIG_HOTPLUG_CPU */
1386
int native_cpu_disable(void)
1387 1388 1389 1390
{
	return -ENOSYS;
}

1391
void native_cpu_die(unsigned int cpu)
1392 1393 1394 1395
{
	/* We said "no" in __cpu_disable */
	BUG();
}
1396 1397 1398 1399 1400 1401

void native_play_dead(void)
{
	BUG();
}

1402
#endif