smp.c 18.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SMP support for ppc.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
 * deal of code from the sparc and intel versions.
 *
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 *
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#undef DEBUG

#include <linux/kernel.h>
21
#include <linux/export.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
30
#include <linux/device.h>
L
Linus Torvalds 已提交
31 32
#include <linux/cpu.h>
#include <linux/notifier.h>
33
#include <linux/topology.h>
L
Linus Torvalds 已提交
34 35

#include <asm/ptrace.h>
A
Arun Sharma 已提交
36
#include <linux/atomic.h>
L
Linus Torvalds 已提交
37
#include <asm/irq.h>
38
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
39 40 41 42 43 44
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
45
#include <asm/cputhreads.h>
L
Linus Torvalds 已提交
46
#include <asm/cputable.h>
47
#include <asm/mpic.h>
48
#include <asm/vdso_datapage.h>
P
Paul Mackerras 已提交
49 50 51
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
52
#include <asm/vdso.h>
53
#include <asm/debug.h>
P
Paul Mackerras 已提交
54

L
Linus Torvalds 已提交
55
#ifdef DEBUG
56
#include <asm/udbg.h>
L
Linus Torvalds 已提交
57 58 59 60 61
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

62
#ifdef CONFIG_HOTPLUG_CPU
63 64
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
65 66
#endif

67 68
struct thread_info *secondary_ti;

69 70
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
L
Linus Torvalds 已提交
71

72
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
73
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
L
Linus Torvalds 已提交
74

P
Paul Mackerras 已提交
75
/* SMP operations for this machine */
L
Linus Torvalds 已提交
76 77
struct smp_ops_t *smp_ops;

78 79
/* Can't be static due to PowerMac hackery */
volatile unsigned int cpu_callin_map[NR_CPUS];
L
Linus Torvalds 已提交
80 81 82

int smt_enabled_at_boot = 1;

83 84
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/*
 * Returns 1 if the specified cpu should be brought up during boot.
 * Used to inhibit booting threads if they've been disabled or
 * limited on the command line
 */
int smp_generic_cpu_bootable(unsigned int nr)
{
	/* Special case - we inhibit secondary thread startup
	 * during boot if the user requests it.
	 */
	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
			return 0;
		if (smt_enabled_at_boot
		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
			return 0;
	}

	return 1;
}


P
Paul Mackerras 已提交
107
#ifdef CONFIG_PPC64
108
int smp_generic_kick_cpu(int nr)
L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116
{
	BUG_ON(nr < 0 || nr >= NR_CPUS);

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero After we set cpu_start,
	 * the processor will continue on to secondary_start
	 */
117 118 119 120 121 122 123 124 125 126 127
	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Ok it's not there, so it might be soft-unplugged, let's
	 * try to bring it back
	 */
128
	generic_set_cpu_up(nr);
129 130 131
	smp_wmb();
	smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
132 133

	return 0;
L
Linus Torvalds 已提交
134
}
135
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
136

137 138 139 140 141 142 143 144
static irqreturn_t call_function_action(int irq, void *data)
{
	generic_smp_call_function_interrupt();
	return IRQ_HANDLED;
}

static irqreturn_t reschedule_action(int irq, void *data)
{
145
	scheduler_ipi();
146 147 148
	return IRQ_HANDLED;
}

149
static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
150
{
151
	tick_broadcast_ipi_handler();
152 153 154
	return IRQ_HANDLED;
}

155
static irqreturn_t debug_ipi_action(int irq, void *data)
156
{
157 158 159 160 161 162 163 164 165
	if (crash_ipi_function_ptr) {
		crash_ipi_function_ptr(get_irq_regs());
		return IRQ_HANDLED;
	}

#ifdef CONFIG_DEBUGGER
	debugger_ipi(get_irq_regs());
#endif /* CONFIG_DEBUGGER */

166 167 168 169 170 171
	return IRQ_HANDLED;
}

static irq_handler_t smp_ipi_action[] = {
	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
	[PPC_MSG_RESCHEDULE] = reschedule_action,
172
	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
173 174 175 176 177 178
	[PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
};

const char *smp_ipi_name[] = {
	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
179
	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	[PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
};

/* optional function to request ipi, for controllers with >= 4 ipis */
int smp_request_message_ipi(int virq, int msg)
{
	int err;

	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
		return -EINVAL;
	}
#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
	if (msg == PPC_MSG_DEBUGGER_BREAK) {
		return 1;
	}
#endif
196
	err = request_irq(virq, smp_ipi_action[msg],
197
			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
198
			  smp_ipi_name[msg], NULL);
199 200 201 202 203 204
	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
		virq, smp_ipi_name[msg], err);

	return err;
}

205
#ifdef CONFIG_PPC_SMP_MUXED_IPI
206
struct cpu_messages {
207
	int messages;			/* current messages */
208 209 210 211 212 213 214 215 216 217 218 219 220 221
	unsigned long data;		/* data for cause ipi */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);

void smp_muxed_ipi_set_data(int cpu, unsigned long data)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);

	info->data = data;
}

void smp_muxed_ipi_message_pass(int cpu, int msg)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
222
	char *message = (char *)&info->messages;
223

224 225 226 227
	/*
	 * Order previous accesses before accesses in the IPI handler.
	 */
	smp_mb();
228
	message[msg] = 1;
229 230 231 232
	/*
	 * cause_ipi functions are required to include a full barrier
	 * before doing whatever causes the IPI.
	 */
233 234 235
	smp_ops->cause_ipi(cpu, info->data);
}

236 237 238 239 240 241
#ifdef __BIG_ENDIAN__
#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
#else
#define IPI_MESSAGE(A) (1 << (8 * (A)))
#endif

242 243 244
irqreturn_t smp_ipi_demux(void)
{
	struct cpu_messages *info = &__get_cpu_var(ipi_message);
245
	unsigned int all;
246 247

	mb();	/* order any irq clear */
248 249

	do {
250
		all = xchg(&info->messages, 0);
251
		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
252
			generic_smp_call_function_interrupt();
253
		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
254
			scheduler_ipi();
255 256
		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
			tick_broadcast_ipi_handler();
257
		if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
258
			debug_ipi_action(0, NULL);
259 260
	} while (info->messages);

261 262
	return IRQ_HANDLED;
}
263
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
264

265 266 267 268 269 270 271 272 273 274
static inline void do_message_pass(int cpu, int msg)
{
	if (smp_ops->message_pass)
		smp_ops->message_pass(cpu, msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
	else
		smp_muxed_ipi_message_pass(cpu, msg);
#endif
}

L
Linus Torvalds 已提交
275 276
void smp_send_reschedule(int cpu)
{
277
	if (likely(smp_ops))
278
		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
L
Linus Torvalds 已提交
279
}
280
EXPORT_SYMBOL_GPL(smp_send_reschedule);
L
Linus Torvalds 已提交
281

282 283
void arch_send_call_function_single_ipi(int cpu)
{
284
	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
285 286
}

287
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
288 289 290
{
	unsigned int cpu;

291
	for_each_cpu(cpu, mask)
292
		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
293 294
}

295 296 297 298 299 300 301 302 303 304
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
}
#endif

305 306
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
void smp_send_debugger_break(void)
L
Linus Torvalds 已提交
307
{
308 309 310 311 312 313 314 315
	int cpu;
	int me = raw_smp_processor_id();

	if (unlikely(!smp_ops))
		return;

	for_each_online_cpu(cpu)
		if (cpu != me)
316
			do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
L
Linus Torvalds 已提交
317 318 319
}
#endif

320 321 322 323
#ifdef CONFIG_KEXEC
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
	crash_ipi_function_ptr = crash_ipi_callback;
324
	if (crash_ipi_callback) {
325
		mb();
326
		smp_send_debugger_break();
327 328 329 330
	}
}
#endif

L
Linus Torvalds 已提交
331 332
static void stop_this_cpu(void *dummy)
{
333 334 335
	/* Remove this CPU */
	set_cpu_online(smp_processor_id(), false);

L
Linus Torvalds 已提交
336 337 338 339 340
	local_irq_disable();
	while (1)
		;
}

341 342
void smp_send_stop(void)
{
343
	smp_call_function(stop_this_cpu, NULL, 0);
L
Linus Torvalds 已提交
344 345 346 347
}

struct thread_info *current_set[NR_CPUS];

348
static void smp_store_cpu_info(int id)
L
Linus Torvalds 已提交
349
{
350
	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
351 352 353 354
#ifdef CONFIG_PPC_FSL_BOOK3E
	per_cpu(next_tlbcam_idx, id)
		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
L
Linus Torvalds 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}

void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu;

	DBG("smp_prepare_cpus\n");

	/* 
	 * setup_cpu may need to be called on the boot cpu. We havent
	 * spun any cpus up but lets be paranoid.
	 */
	BUG_ON(boot_cpuid != smp_processor_id());

	/* Fixup boot cpu */
	smp_store_cpu_info(boot_cpuid);
	cpu_callin_map[boot_cpuid] = 1;

373 374 375 376 377 378 379 380 381 382
	for_each_possible_cpu(cpu) {
		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
	}

	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));

383 384
	if (smp_ops && smp_ops->probe)
		smp_ops->probe();
L
Linus Torvalds 已提交
385 386
}

387
void smp_prepare_boot_cpu(void)
L
Linus Torvalds 已提交
388 389
{
	BUG_ON(smp_processor_id() != boot_cpuid);
P
Paul Mackerras 已提交
390
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
391
	paca[boot_cpuid].__current = current;
P
Paul Mackerras 已提交
392
#endif
A
Al Viro 已提交
393
	current_set[boot_cpuid] = task_thread_info(current);
L
Linus Torvalds 已提交
394 395 396 397 398 399 400 401 402 403 404
}

#ifdef CONFIG_HOTPLUG_CPU

int generic_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == boot_cpuid)
		return -EBUSY;

405
	set_cpu_online(cpu, false);
406
#ifdef CONFIG_PPC64
407
	vdso_data->processorCount--;
408
#endif
409
	migrate_irqs();
L
Linus Torvalds 已提交
410 411 412 413 414 415 416 417
	return 0;
}

void generic_cpu_die(unsigned int cpu)
{
	int i;

	for (i = 0; i < 100; i++) {
418
		smp_rmb();
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429 430
		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
			return;
		msleep(100);
	}
	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
}

void generic_mach_cpu_die(void)
{
	unsigned int cpu;

	local_irq_disable();
431
	idle_task_exit();
L
Linus Torvalds 已提交
432 433 434
	cpu = smp_processor_id();
	printk(KERN_DEBUG "CPU%d offline\n", cpu);
	__get_cpu_var(cpu_state) = CPU_DEAD;
435
	smp_wmb();
L
Linus Torvalds 已提交
436 437 438
	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
		cpu_relax();
}
439 440 441 442 443

void generic_set_cpu_dead(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_DEAD;
}
444

445 446 447 448 449 450 451 452 453 454
/*
 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 * which makes the delay in generic_cpu_die() not happen.
 */
void generic_set_cpu_up(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
}

455 456 457 458
int generic_check_cpu_restart(unsigned int cpu)
{
	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497

static atomic_t secondary_inhibit_count;

/*
 * Don't allow secondary CPU threads to come online
 */
void inhibit_secondary_onlining(void)
{
	/*
	 * This makes secondary_inhibit_count stable during cpu
	 * online/offline operations.
	 */
	get_online_cpus();

	atomic_inc(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);

/*
 * Allow secondary CPU threads to come online again
 */
void uninhibit_secondary_onlining(void)
{
	get_online_cpus();
	atomic_dec(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);

static int secondaries_inhibited(void)
{
	return atomic_read(&secondary_inhibit_count);
}

#else /* HOTPLUG_CPU */

#define secondaries_inhibited()		0

L
Linus Torvalds 已提交
498 499
#endif

500
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
501
{
502
	struct thread_info *ti = task_thread_info(idle);
503 504

#ifdef CONFIG_PPC64
505
	paca[cpu].__current = idle;
506 507 508
	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
	ti->cpu = cpu;
509
	secondary_ti = current_set[cpu] = ti;
510 511
}

512
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
L
Linus Torvalds 已提交
513
{
514
	int rc, c;
L
Linus Torvalds 已提交
515

516 517 518 519 520 521 522
	/*
	 * Don't allow secondary threads to come online if inhibited
	 */
	if (threads_per_core > 1 && secondaries_inhibited() &&
	    cpu % threads_per_core != 0)
		return -EBUSY;

523 524
	if (smp_ops == NULL ||
	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
L
Linus Torvalds 已提交
525 526
		return -EINVAL;

527
	cpu_idle_thread_init(cpu, tidle);
528

L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536 537
	/* Make sure callin-map entry is 0 (can be leftover a CPU
	 * hotplug
	 */
	cpu_callin_map[cpu] = 0;

	/* The information for processor bringup must
	 * be written out to main store before we release
	 * the processor.
	 */
538
	smp_mb();
L
Linus Torvalds 已提交
539 540 541

	/* wake up cpus */
	DBG("smp: kicking cpu %d\n", cpu);
542 543 544 545 546
	rc = smp_ops->kick_cpu(cpu);
	if (rc) {
		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
		return rc;
	}
L
Linus Torvalds 已提交
547 548 549 550 551 552 553

	/*
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
	 */
	if (system_state < SYSTEM_RUNNING)
554
		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
L
Linus Torvalds 已提交
555 556 557 558 559 560 561
			udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
	else
		/*
		 * CPUs can take much longer to come up in the
		 * hotplug case.  Wait five seconds.
		 */
562 563
		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
			msleep(1);
L
Linus Torvalds 已提交
564 565 566
#endif

	if (!cpu_callin_map[cpu]) {
567
		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
L
Linus Torvalds 已提交
568 569 570
		return -ENOENT;
	}

571
	DBG("Processor %u found.\n", cpu);
L
Linus Torvalds 已提交
572 573 574 575 576 577 578 579 580 581 582

	if (smp_ops->give_timebase)
		smp_ops->give_timebase();

	/* Wait until cpu puts itself in the online map */
	while (!cpu_online(cpu))
		cpu_relax();

	return 0;
}

583 584 585 586 587 588
/* Return the value of the reg property corresponding to the given
 * logical cpu.
 */
int cpu_to_core_id(int cpu)
{
	struct device_node *np;
589
	const __be32 *reg;
590 591 592 593 594 595 596 597 598 599
	int id = -1;

	np = of_get_cpu_node(cpu, NULL);
	if (!np)
		goto out;

	reg = of_get_property(np, "reg", NULL);
	if (!reg)
		goto out;

600
	id = be32_to_cpup(reg);
601 602 603 604 605
out:
	of_node_put(np);
	return id;
}

606 607 608 609 610 611 612 613 614 615 616 617 618
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
{
	return cpu >> threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);

int cpu_first_thread_of_core(int core)
{
	return core << threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
{
	const struct cpumask *mask;
	struct device_node *np;
	int i, plen;
	const __be32 *prop;

	mask = add ? cpu_online_mask : cpu_present_mask;
	for_each_cpu(i, mask) {
		np = of_get_cpu_node(i, NULL);
		if (!np)
			continue;
		prop = of_get_property(np, "ibm,chip-id", &plen);
		if (prop && plen == sizeof(int) &&
		    of_read_number(prop, 1) == chipid) {
			if (add) {
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
			} else {
				cpumask_clear_cpu(cpu, cpu_core_mask(i));
				cpumask_clear_cpu(i, cpu_core_mask(cpu));
			}
		}
		of_node_put(np);
	}
}

646
/* Must be called when no change can occur to cpu_present_mask,
647 648 649 650 651
 * i.e. during cpu online or offline.
 */
static struct device_node *cpu_to_l2cache(int cpu)
{
	struct device_node *np;
652
	struct device_node *cache;
653 654 655 656 657 658 659 660

	if (!cpu_present(cpu))
		return NULL;

	np = of_get_cpu_node(cpu, NULL);
	if (np == NULL)
		return NULL;

661 662
	cache = of_find_next_cache_node(np);

663 664
	of_node_put(np);

665
	return cache;
666
}
L
Linus Torvalds 已提交
667

668 669
static void traverse_core_siblings(int cpu, bool add)
{
670
	struct device_node *l2_cache, *np;
671
	const struct cpumask *mask;
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	int i, chip, plen;
	const __be32 *prop;

	/* First see if we have ibm,chip-id properties in cpu nodes */
	np = of_get_cpu_node(cpu, NULL);
	if (np) {
		chip = -1;
		prop = of_get_property(np, "ibm,chip-id", &plen);
		if (prop && plen == sizeof(int))
			chip = of_read_number(prop, 1);
		of_node_put(np);
		if (chip >= 0) {
			traverse_siblings_chip_id(cpu, add, chip);
			return;
		}
	}
688 689 690 691

	l2_cache = cpu_to_l2cache(cpu);
	mask = add ? cpu_online_mask : cpu_present_mask;
	for_each_cpu(i, mask) {
692
		np = cpu_to_l2cache(i);
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
		if (!np)
			continue;
		if (np == l2_cache) {
			if (add) {
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
			} else {
				cpumask_clear_cpu(cpu, cpu_core_mask(i));
				cpumask_clear_cpu(i, cpu_core_mask(cpu));
			}
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);
}

L
Linus Torvalds 已提交
709
/* Activate a secondary processor. */
710
void start_secondary(void *unused)
L
Linus Torvalds 已提交
711 712
{
	unsigned int cpu = smp_processor_id();
713
	int i, base;
L
Linus Torvalds 已提交
714 715 716 717 718

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);
P
Paul Mackerras 已提交
719
	set_dec(tb_ticks_per_jiffy);
A
Andrew Morton 已提交
720
	preempt_disable();
L
Linus Torvalds 已提交
721 722
	cpu_callin_map[cpu] = 1;

723 724
	if (smp_ops->setup_cpu)
		smp_ops->setup_cpu(cpu);
L
Linus Torvalds 已提交
725 726 727
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

728 729
	secondary_cpu_time_init();

730 731 732
#ifdef CONFIG_PPC64
	if (system_state == SYSTEM_RUNNING)
		vdso_data->processorCount++;
733 734

	vdso_getcpu_init();
735
#endif
736
	/* Update sibling maps */
737
	base = cpu_first_thread_sibling(cpu);
738
	for (i = 0; i < threads_per_core; i++) {
739
		if (cpu_is_offline(base + i) && (cpu != base + i))
740
			continue;
741 742
		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
743 744 745 746 747

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
748 749
		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
750
	}
751
	traverse_core_siblings(cpu, true);
L
Linus Torvalds 已提交
752

753 754 755 756
	smp_wmb();
	notify_cpu_starting(cpu);
	set_cpu_online(cpu, true);

L
Linus Torvalds 已提交
757 758
	local_irq_enable();

T
Thomas Gleixner 已提交
759
	cpu_startup_entry(CPUHP_ONLINE);
760 761

	BUG();
L
Linus Torvalds 已提交
762 763 764 765 766 767 768
}

int setup_profiling_timer(unsigned int multiplier)
{
	return 0;
}

769 770 771 772
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
static const int powerpc_smt_flags(void)
{
773
	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790

	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
		flags |= SD_ASYM_PACKING;
	}
	return flags;
}
#endif

static struct sched_domain_topology_level powerpc_topology[] = {
#ifdef CONFIG_SCHED_SMT
	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
#endif
	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
	{ NULL, },
};

L
Linus Torvalds 已提交
791 792
void __init smp_cpus_done(unsigned int max_cpus)
{
793
	cpumask_var_t old_mask;
L
Linus Torvalds 已提交
794 795 796 797 798

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
799
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
800
	cpumask_copy(old_mask, tsk_cpus_allowed(current));
J
Julia Lawall 已提交
801
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
L
Linus Torvalds 已提交
802
	
803
	if (smp_ops && smp_ops->setup_cpu)
804
		smp_ops->setup_cpu(boot_cpuid);
L
Linus Torvalds 已提交
805

806 807 808
	set_cpus_allowed_ptr(current, old_mask);

	free_cpumask_var(old_mask);
809

810 811 812
	if (smp_ops && smp_ops->bringup_done)
		smp_ops->bringup_done();

813
	dump_numa_cpu_topology();
814

815
	set_sched_topology(powerpc_topology);
L
Linus Torvalds 已提交
816

817 818
}

L
Linus Torvalds 已提交
819 820 821
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
822 823 824
	int cpu = smp_processor_id();
	int base, i;
	int err;
L
Linus Torvalds 已提交
825

826 827 828 829 830 831 832 833
	if (!smp_ops->cpu_disable)
		return -ENOSYS;

	err = smp_ops->cpu_disable();
	if (err)
		return err;

	/* Update sibling maps */
834
	base = cpu_first_thread_sibling(cpu);
835
	for (i = 0; i < threads_per_core; i++) {
836 837 838 839
		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
840
	}
841
	traverse_core_siblings(cpu, false);
842 843

	return 0;
L
Linus Torvalds 已提交
844 845 846 847 848 849 850
}

void __cpu_die(unsigned int cpu)
{
	if (smp_ops->cpu_die)
		smp_ops->cpu_die(cpu);
}
851

852 853 854 855
void cpu_die(void)
{
	if (ppc_md.cpu_die)
		ppc_md.cpu_die();
856 857 858

	/* If we return, we re-enter start_secondary */
	start_secondary_resume();
859
}
860

L
Linus Torvalds 已提交
861
#endif