smp.c 17.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SMP support for ppc.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
 * deal of code from the sparc and intel versions.
 *
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 *
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#undef DEBUG

#include <linux/kernel.h>
21
#include <linux/export.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
30
#include <linux/device.h>
L
Linus Torvalds 已提交
31 32
#include <linux/cpu.h>
#include <linux/notifier.h>
33
#include <linux/topology.h>
L
Linus Torvalds 已提交
34 35

#include <asm/ptrace.h>
A
Arun Sharma 已提交
36
#include <linux/atomic.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42 43
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
44
#include <asm/cputhreads.h>
L
Linus Torvalds 已提交
45
#include <asm/cputable.h>
46
#include <asm/mpic.h>
47
#include <asm/vdso_datapage.h>
P
Paul Mackerras 已提交
48 49 50
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
51
#include <asm/vdso.h>
52
#include <asm/debug.h>
P
Paul Mackerras 已提交
53

L
Linus Torvalds 已提交
54
#ifdef DEBUG
55
#include <asm/udbg.h>
L
Linus Torvalds 已提交
56 57 58 59 60
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

61
#ifdef CONFIG_HOTPLUG_CPU
62 63
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
64 65
#endif

66 67
struct thread_info *secondary_ti;

68 69
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
L
Linus Torvalds 已提交
70

71
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
72
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
L
Linus Torvalds 已提交
73

P
Paul Mackerras 已提交
74
/* SMP operations for this machine */
L
Linus Torvalds 已提交
75 76
struct smp_ops_t *smp_ops;

77 78
/* Can't be static due to PowerMac hackery */
volatile unsigned int cpu_callin_map[NR_CPUS];
L
Linus Torvalds 已提交
79 80 81

int smt_enabled_at_boot = 1;

82 83
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/*
 * Returns 1 if the specified cpu should be brought up during boot.
 * Used to inhibit booting threads if they've been disabled or
 * limited on the command line
 */
int smp_generic_cpu_bootable(unsigned int nr)
{
	/* Special case - we inhibit secondary thread startup
	 * during boot if the user requests it.
	 */
	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
			return 0;
		if (smt_enabled_at_boot
		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
			return 0;
	}

	return 1;
}


P
Paul Mackerras 已提交
106
#ifdef CONFIG_PPC64
107
int smp_generic_kick_cpu(int nr)
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115
{
	BUG_ON(nr < 0 || nr >= NR_CPUS);

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero After we set cpu_start,
	 * the processor will continue on to secondary_start
	 */
116 117 118 119 120 121 122 123 124 125 126
	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Ok it's not there, so it might be soft-unplugged, let's
	 * try to bring it back
	 */
127
	generic_set_cpu_up(nr);
128 129 130
	smp_wmb();
	smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
131 132

	return 0;
L
Linus Torvalds 已提交
133
}
134
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
135

136 137 138 139 140 141 142 143
static irqreturn_t call_function_action(int irq, void *data)
{
	generic_smp_call_function_interrupt();
	return IRQ_HANDLED;
}

static irqreturn_t reschedule_action(int irq, void *data)
{
144
	scheduler_ipi();
145 146 147 148 149 150 151 152 153
	return IRQ_HANDLED;
}

static irqreturn_t call_function_single_action(int irq, void *data)
{
	generic_smp_call_function_single_interrupt();
	return IRQ_HANDLED;
}

154
static irqreturn_t debug_ipi_action(int irq, void *data)
155
{
156 157 158 159 160 161 162 163 164
	if (crash_ipi_function_ptr) {
		crash_ipi_function_ptr(get_irq_regs());
		return IRQ_HANDLED;
	}

#ifdef CONFIG_DEBUGGER
	debugger_ipi(get_irq_regs());
#endif /* CONFIG_DEBUGGER */

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	return IRQ_HANDLED;
}

static irq_handler_t smp_ipi_action[] = {
	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
	[PPC_MSG_RESCHEDULE] = reschedule_action,
	[PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
	[PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
};

const char *smp_ipi_name[] = {
	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
	[PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
	[PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
};

/* optional function to request ipi, for controllers with >= 4 ipis */
int smp_request_message_ipi(int virq, int msg)
{
	int err;

	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
		return -EINVAL;
	}
#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
	if (msg == PPC_MSG_DEBUGGER_BREAK) {
		return 1;
	}
#endif
195
	err = request_irq(virq, smp_ipi_action[msg],
196
			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
197
			  smp_ipi_name[msg], NULL);
198 199 200 201 202 203
	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
		virq, smp_ipi_name[msg], err);

	return err;
}

204
#ifdef CONFIG_PPC_SMP_MUXED_IPI
205
struct cpu_messages {
206
	int messages;			/* current messages */
207 208 209 210 211 212 213 214 215 216 217 218 219 220
	unsigned long data;		/* data for cause ipi */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);

void smp_muxed_ipi_set_data(int cpu, unsigned long data)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);

	info->data = data;
}

void smp_muxed_ipi_message_pass(int cpu, int msg)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
221
	char *message = (char *)&info->messages;
222

223 224 225 226
	/*
	 * Order previous accesses before accesses in the IPI handler.
	 */
	smp_mb();
227
	message[msg] = 1;
228 229 230 231
	/*
	 * cause_ipi functions are required to include a full barrier
	 * before doing whatever causes the IPI.
	 */
232 233 234 235 236 237
	smp_ops->cause_ipi(cpu, info->data);
}

irqreturn_t smp_ipi_demux(void)
{
	struct cpu_messages *info = &__get_cpu_var(ipi_message);
238
	unsigned int all;
239 240

	mb();	/* order any irq clear */
241 242

	do {
243
		all = xchg(&info->messages, 0);
244 245 246

#ifdef __BIG_ENDIAN
		if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
247
			generic_smp_call_function_interrupt();
248
		if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
249
			scheduler_ipi();
250
		if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
251
			generic_smp_call_function_single_interrupt();
252
		if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
253
			debug_ipi_action(0, NULL);
254 255
#else
#error Unsupported ENDIAN
256
#endif
257 258
	} while (info->messages);

259 260
	return IRQ_HANDLED;
}
261
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
262

263 264 265 266 267 268 269 270 271 272
static inline void do_message_pass(int cpu, int msg)
{
	if (smp_ops->message_pass)
		smp_ops->message_pass(cpu, msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
	else
		smp_muxed_ipi_message_pass(cpu, msg);
#endif
}

L
Linus Torvalds 已提交
273 274
void smp_send_reschedule(int cpu)
{
275
	if (likely(smp_ops))
276
		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
L
Linus Torvalds 已提交
277
}
278
EXPORT_SYMBOL_GPL(smp_send_reschedule);
L
Linus Torvalds 已提交
279

280 281
void arch_send_call_function_single_ipi(int cpu)
{
282
	do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
283 284
}

285
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
286 287 288
{
	unsigned int cpu;

289
	for_each_cpu(cpu, mask)
290
		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
291 292
}

293 294
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
void smp_send_debugger_break(void)
L
Linus Torvalds 已提交
295
{
296 297 298 299 300 301 302 303
	int cpu;
	int me = raw_smp_processor_id();

	if (unlikely(!smp_ops))
		return;

	for_each_online_cpu(cpu)
		if (cpu != me)
304
			do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
L
Linus Torvalds 已提交
305 306 307
}
#endif

308 309 310 311
#ifdef CONFIG_KEXEC
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
	crash_ipi_function_ptr = crash_ipi_callback;
312
	if (crash_ipi_callback) {
313
		mb();
314
		smp_send_debugger_break();
315 316 317 318
	}
}
#endif

L
Linus Torvalds 已提交
319 320
static void stop_this_cpu(void *dummy)
{
321 322 323
	/* Remove this CPU */
	set_cpu_online(smp_processor_id(), false);

L
Linus Torvalds 已提交
324 325 326 327 328
	local_irq_disable();
	while (1)
		;
}

329 330
void smp_send_stop(void)
{
331
	smp_call_function(stop_this_cpu, NULL, 0);
L
Linus Torvalds 已提交
332 333 334 335
}

struct thread_info *current_set[NR_CPUS];

336
static void smp_store_cpu_info(int id)
L
Linus Torvalds 已提交
337
{
338
	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
339 340 341 342
#ifdef CONFIG_PPC_FSL_BOOK3E
	per_cpu(next_tlbcam_idx, id)
		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
L
Linus Torvalds 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
}

void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu;

	DBG("smp_prepare_cpus\n");

	/* 
	 * setup_cpu may need to be called on the boot cpu. We havent
	 * spun any cpus up but lets be paranoid.
	 */
	BUG_ON(boot_cpuid != smp_processor_id());

	/* Fixup boot cpu */
	smp_store_cpu_info(boot_cpuid);
	cpu_callin_map[boot_cpuid] = 1;

361 362 363 364 365 366 367 368 369 370
	for_each_possible_cpu(cpu) {
		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
	}

	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));

371
	if (smp_ops)
372 373 374 375
		if (smp_ops->probe)
			max_cpus = smp_ops->probe();
		else
			max_cpus = NR_CPUS;
376 377
	else
		max_cpus = 1;
L
Linus Torvalds 已提交
378 379
}

380
void smp_prepare_boot_cpu(void)
L
Linus Torvalds 已提交
381 382
{
	BUG_ON(smp_processor_id() != boot_cpuid);
P
Paul Mackerras 已提交
383
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
384
	paca[boot_cpuid].__current = current;
P
Paul Mackerras 已提交
385
#endif
A
Al Viro 已提交
386
	current_set[boot_cpuid] = task_thread_info(current);
L
Linus Torvalds 已提交
387 388 389 390 391 392 393 394 395 396 397
}

#ifdef CONFIG_HOTPLUG_CPU

int generic_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == boot_cpuid)
		return -EBUSY;

398
	set_cpu_online(cpu, false);
399
#ifdef CONFIG_PPC64
400
	vdso_data->processorCount--;
401
#endif
402
	migrate_irqs();
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410
	return 0;
}

void generic_cpu_die(unsigned int cpu)
{
	int i;

	for (i = 0; i < 100; i++) {
411
		smp_rmb();
L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419 420 421 422 423
		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
			return;
		msleep(100);
	}
	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
}

void generic_mach_cpu_die(void)
{
	unsigned int cpu;

	local_irq_disable();
424
	idle_task_exit();
L
Linus Torvalds 已提交
425 426 427
	cpu = smp_processor_id();
	printk(KERN_DEBUG "CPU%d offline\n", cpu);
	__get_cpu_var(cpu_state) = CPU_DEAD;
428
	smp_wmb();
L
Linus Torvalds 已提交
429 430 431
	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
		cpu_relax();
}
432 433 434 435 436

void generic_set_cpu_dead(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_DEAD;
}
437

438 439 440 441 442 443 444 445 446 447
/*
 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 * which makes the delay in generic_cpu_die() not happen.
 */
void generic_set_cpu_up(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
}

448 449 450 451
int generic_check_cpu_restart(unsigned int cpu)
{
	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

static atomic_t secondary_inhibit_count;

/*
 * Don't allow secondary CPU threads to come online
 */
void inhibit_secondary_onlining(void)
{
	/*
	 * This makes secondary_inhibit_count stable during cpu
	 * online/offline operations.
	 */
	get_online_cpus();

	atomic_inc(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);

/*
 * Allow secondary CPU threads to come online again
 */
void uninhibit_secondary_onlining(void)
{
	get_online_cpus();
	atomic_dec(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);

static int secondaries_inhibited(void)
{
	return atomic_read(&secondary_inhibit_count);
}

#else /* HOTPLUG_CPU */

#define secondaries_inhibited()		0

L
Linus Torvalds 已提交
491 492
#endif

493
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
494
{
495
	struct thread_info *ti = task_thread_info(idle);
496 497

#ifdef CONFIG_PPC64
498
	paca[cpu].__current = idle;
499 500 501
	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
	ti->cpu = cpu;
502
	secondary_ti = current_set[cpu] = ti;
503 504
}

505
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
L
Linus Torvalds 已提交
506
{
507
	int rc, c;
L
Linus Torvalds 已提交
508

509 510 511 512 513 514 515
	/*
	 * Don't allow secondary threads to come online if inhibited
	 */
	if (threads_per_core > 1 && secondaries_inhibited() &&
	    cpu % threads_per_core != 0)
		return -EBUSY;

516 517
	if (smp_ops == NULL ||
	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
L
Linus Torvalds 已提交
518 519
		return -EINVAL;

520
	cpu_idle_thread_init(cpu, tidle);
521

L
Linus Torvalds 已提交
522 523 524 525 526 527 528 529 530
	/* Make sure callin-map entry is 0 (can be leftover a CPU
	 * hotplug
	 */
	cpu_callin_map[cpu] = 0;

	/* The information for processor bringup must
	 * be written out to main store before we release
	 * the processor.
	 */
531
	smp_mb();
L
Linus Torvalds 已提交
532 533 534

	/* wake up cpus */
	DBG("smp: kicking cpu %d\n", cpu);
535 536 537 538 539
	rc = smp_ops->kick_cpu(cpu);
	if (rc) {
		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
		return rc;
	}
L
Linus Torvalds 已提交
540 541 542 543 544 545 546

	/*
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
	 */
	if (system_state < SYSTEM_RUNNING)
547
		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
L
Linus Torvalds 已提交
548 549 550 551 552 553 554
			udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
	else
		/*
		 * CPUs can take much longer to come up in the
		 * hotplug case.  Wait five seconds.
		 */
555 556
		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
			msleep(1);
L
Linus Torvalds 已提交
557 558 559
#endif

	if (!cpu_callin_map[cpu]) {
560
		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
L
Linus Torvalds 已提交
561 562 563
		return -ENOENT;
	}

564
	DBG("Processor %u found.\n", cpu);
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575

	if (smp_ops->give_timebase)
		smp_ops->give_timebase();

	/* Wait until cpu puts itself in the online map */
	while (!cpu_online(cpu))
		cpu_relax();

	return 0;
}

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
/* Return the value of the reg property corresponding to the given
 * logical cpu.
 */
int cpu_to_core_id(int cpu)
{
	struct device_node *np;
	const int *reg;
	int id = -1;

	np = of_get_cpu_node(cpu, NULL);
	if (!np)
		goto out;

	reg = of_get_property(np, "reg", NULL);
	if (!reg)
		goto out;

	id = *reg;
out:
	of_node_put(np);
	return id;
}

599 600 601 602 603 604 605 606 607 608 609 610 611
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
{
	return cpu >> threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);

int cpu_first_thread_of_core(int core)
{
	return core << threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);

612
/* Must be called when no change can occur to cpu_present_mask,
613 614 615 616 617
 * i.e. during cpu online or offline.
 */
static struct device_node *cpu_to_l2cache(int cpu)
{
	struct device_node *np;
618
	struct device_node *cache;
619 620 621 622 623 624 625 626

	if (!cpu_present(cpu))
		return NULL;

	np = of_get_cpu_node(cpu, NULL);
	if (np == NULL)
		return NULL;

627 628
	cache = of_find_next_cache_node(np);

629 630
	of_node_put(np);

631
	return cache;
632
}
L
Linus Torvalds 已提交
633 634

/* Activate a secondary processor. */
635
void start_secondary(void *unused)
L
Linus Torvalds 已提交
636 637
{
	unsigned int cpu = smp_processor_id();
638
	struct device_node *l2_cache;
639
	int i, base;
L
Linus Torvalds 已提交
640 641 642 643 644

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);
P
Paul Mackerras 已提交
645
	set_dec(tb_ticks_per_jiffy);
A
Andrew Morton 已提交
646
	preempt_disable();
L
Linus Torvalds 已提交
647 648
	cpu_callin_map[cpu] = 1;

649 650
	if (smp_ops->setup_cpu)
		smp_ops->setup_cpu(cpu);
L
Linus Torvalds 已提交
651 652 653
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

654 655
	secondary_cpu_time_init();

656 657 658
#ifdef CONFIG_PPC64
	if (system_state == SYSTEM_RUNNING)
		vdso_data->processorCount++;
659 660

	vdso_getcpu_init();
661
#endif
662
	/* Update sibling maps */
663
	base = cpu_first_thread_sibling(cpu);
664
	for (i = 0; i < threads_per_core; i++) {
665
		if (cpu_is_offline(base + i) && (cpu != base + i))
666
			continue;
667 668
		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
669 670 671 672 673

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
674 675
		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
676
	}
677 678 679 680 681 682
	l2_cache = cpu_to_l2cache(cpu);
	for_each_online_cpu(i) {
		struct device_node *np = cpu_to_l2cache(i);
		if (!np)
			continue;
		if (np == l2_cache) {
683 684
			cpumask_set_cpu(cpu, cpu_core_mask(i));
			cpumask_set_cpu(i, cpu_core_mask(cpu));
685 686 687 688
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);
L
Linus Torvalds 已提交
689

690 691 692 693
	smp_wmb();
	notify_cpu_starting(cpu);
	set_cpu_online(cpu, true);

L
Linus Torvalds 已提交
694 695
	local_irq_enable();

T
Thomas Gleixner 已提交
696
	cpu_startup_entry(CPUHP_ONLINE);
697 698

	BUG();
L
Linus Torvalds 已提交
699 700 701 702 703 704 705 706 707
}

int setup_profiling_timer(unsigned int multiplier)
{
	return 0;
}

void __init smp_cpus_done(unsigned int max_cpus)
{
708
	cpumask_var_t old_mask;
L
Linus Torvalds 已提交
709 710 711 712 713

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
714
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
715
	cpumask_copy(old_mask, tsk_cpus_allowed(current));
J
Julia Lawall 已提交
716
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
L
Linus Torvalds 已提交
717
	
718
	if (smp_ops && smp_ops->setup_cpu)
719
		smp_ops->setup_cpu(boot_cpuid);
L
Linus Torvalds 已提交
720

721 722 723
	set_cpus_allowed_ptr(current, old_mask);

	free_cpumask_var(old_mask);
724

725 726 727
	if (smp_ops && smp_ops->bringup_done)
		smp_ops->bringup_done();

728
	dump_numa_cpu_topology();
729

L
Linus Torvalds 已提交
730 731
}

732 733 734 735 736 737 738 739 740
int arch_sd_sibling_asym_packing(void)
{
	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
		return SD_ASYM_PACKING;
	}
	return 0;
}

L
Linus Torvalds 已提交
741 742 743
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
744
	struct device_node *l2_cache;
745 746 747
	int cpu = smp_processor_id();
	int base, i;
	int err;
L
Linus Torvalds 已提交
748

749 750 751 752 753 754 755 756
	if (!smp_ops->cpu_disable)
		return -ENOSYS;

	err = smp_ops->cpu_disable();
	if (err)
		return err;

	/* Update sibling maps */
757
	base = cpu_first_thread_sibling(cpu);
758
	for (i = 0; i < threads_per_core; i++) {
759 760 761 762
		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
763 764 765 766 767 768 769 770
	}

	l2_cache = cpu_to_l2cache(cpu);
	for_each_present_cpu(i) {
		struct device_node *np = cpu_to_l2cache(i);
		if (!np)
			continue;
		if (np == l2_cache) {
771 772
			cpumask_clear_cpu(cpu, cpu_core_mask(i));
			cpumask_clear_cpu(i, cpu_core_mask(cpu));
773 774
		}
		of_node_put(np);
775
	}
776 777
	of_node_put(l2_cache);

778 779

	return 0;
L
Linus Torvalds 已提交
780 781 782 783 784 785 786
}

void __cpu_die(unsigned int cpu)
{
	if (smp_ops->cpu_die)
		smp_ops->cpu_die(cpu);
}
787 788 789 790 791 792 793 794 795 796 797 798

static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);

void cpu_hotplug_driver_lock()
{
	mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
}

void cpu_hotplug_driver_unlock()
{
	mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
}
799 800 801 802 803

void cpu_die(void)
{
	if (ppc_md.cpu_die)
		ppc_md.cpu_die();
804 805 806

	/* If we return, we re-enter start_secondary */
	start_secondary_resume();
807
}
808

L
Linus Torvalds 已提交
809
#endif