smp.c 18.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SMP support for ppc.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
 * deal of code from the sparc and intel versions.
 *
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 *
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#undef DEBUG

#include <linux/kernel.h>
21
#include <linux/export.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
30
#include <linux/device.h>
L
Linus Torvalds 已提交
31 32
#include <linux/cpu.h>
#include <linux/notifier.h>
33
#include <linux/topology.h>
L
Linus Torvalds 已提交
34 35

#include <asm/ptrace.h>
A
Arun Sharma 已提交
36
#include <linux/atomic.h>
L
Linus Torvalds 已提交
37
#include <asm/irq.h>
38
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
39 40 41 42 43 44
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
45
#include <asm/cputhreads.h>
L
Linus Torvalds 已提交
46
#include <asm/cputable.h>
47
#include <asm/mpic.h>
48
#include <asm/vdso_datapage.h>
P
Paul Mackerras 已提交
49 50 51
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
52
#include <asm/vdso.h>
53
#include <asm/debug.h>
P
Paul Mackerras 已提交
54

L
Linus Torvalds 已提交
55
#ifdef DEBUG
56
#include <asm/udbg.h>
L
Linus Torvalds 已提交
57 58 59 60 61
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

62
#ifdef CONFIG_HOTPLUG_CPU
63 64
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
65 66
#endif

67 68
struct thread_info *secondary_ti;

69 70
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
L
Linus Torvalds 已提交
71

72
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
73
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
L
Linus Torvalds 已提交
74

P
Paul Mackerras 已提交
75
/* SMP operations for this machine */
L
Linus Torvalds 已提交
76 77
struct smp_ops_t *smp_ops;

78 79
/* Can't be static due to PowerMac hackery */
volatile unsigned int cpu_callin_map[NR_CPUS];
L
Linus Torvalds 已提交
80 81 82

int smt_enabled_at_boot = 1;

83 84
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/*
 * Returns 1 if the specified cpu should be brought up during boot.
 * Used to inhibit booting threads if they've been disabled or
 * limited on the command line
 */
int smp_generic_cpu_bootable(unsigned int nr)
{
	/* Special case - we inhibit secondary thread startup
	 * during boot if the user requests it.
	 */
	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
			return 0;
		if (smt_enabled_at_boot
		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
			return 0;
	}

	return 1;
}


P
Paul Mackerras 已提交
107
#ifdef CONFIG_PPC64
108
int smp_generic_kick_cpu(int nr)
L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116
{
	BUG_ON(nr < 0 || nr >= NR_CPUS);

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero After we set cpu_start,
	 * the processor will continue on to secondary_start
	 */
117 118 119 120 121 122 123 124 125 126 127
	if (!paca[nr].cpu_start) {
		paca[nr].cpu_start = 1;
		smp_mb();
		return 0;
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * Ok it's not there, so it might be soft-unplugged, let's
	 * try to bring it back
	 */
128
	generic_set_cpu_up(nr);
129 130 131
	smp_wmb();
	smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
132 133

	return 0;
L
Linus Torvalds 已提交
134
}
135
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
136

137 138 139 140 141 142 143 144
static irqreturn_t call_function_action(int irq, void *data)
{
	generic_smp_call_function_interrupt();
	return IRQ_HANDLED;
}

static irqreturn_t reschedule_action(int irq, void *data)
{
145
	scheduler_ipi();
146 147 148
	return IRQ_HANDLED;
}

149
static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
150
{
151
	tick_broadcast_ipi_handler();
152 153 154
	return IRQ_HANDLED;
}

155
static irqreturn_t debug_ipi_action(int irq, void *data)
156
{
157 158 159 160 161 162 163 164 165
	if (crash_ipi_function_ptr) {
		crash_ipi_function_ptr(get_irq_regs());
		return IRQ_HANDLED;
	}

#ifdef CONFIG_DEBUGGER
	debugger_ipi(get_irq_regs());
#endif /* CONFIG_DEBUGGER */

166 167 168 169 170 171
	return IRQ_HANDLED;
}

static irq_handler_t smp_ipi_action[] = {
	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
	[PPC_MSG_RESCHEDULE] = reschedule_action,
172
	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
173 174 175 176 177 178
	[PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
};

const char *smp_ipi_name[] = {
	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
179
	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	[PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
};

/* optional function to request ipi, for controllers with >= 4 ipis */
int smp_request_message_ipi(int virq, int msg)
{
	int err;

	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
		return -EINVAL;
	}
#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
	if (msg == PPC_MSG_DEBUGGER_BREAK) {
		return 1;
	}
#endif
196
	err = request_irq(virq, smp_ipi_action[msg],
197
			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
198
			  smp_ipi_name[msg], NULL);
199 200 201 202 203 204
	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
		virq, smp_ipi_name[msg], err);

	return err;
}

205
#ifdef CONFIG_PPC_SMP_MUXED_IPI
206
struct cpu_messages {
207
	int messages;			/* current messages */
208 209 210 211 212 213 214 215 216 217 218 219 220 221
	unsigned long data;		/* data for cause ipi */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);

void smp_muxed_ipi_set_data(int cpu, unsigned long data)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);

	info->data = data;
}

void smp_muxed_ipi_message_pass(int cpu, int msg)
{
	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
222
	char *message = (char *)&info->messages;
223

224 225 226 227
	/*
	 * Order previous accesses before accesses in the IPI handler.
	 */
	smp_mb();
228
	message[msg] = 1;
229 230 231 232
	/*
	 * cause_ipi functions are required to include a full barrier
	 * before doing whatever causes the IPI.
	 */
233 234 235
	smp_ops->cause_ipi(cpu, info->data);
}

236 237 238 239 240 241
#ifdef __BIG_ENDIAN__
#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
#else
#define IPI_MESSAGE(A) (1 << (8 * (A)))
#endif

242 243 244
irqreturn_t smp_ipi_demux(void)
{
	struct cpu_messages *info = &__get_cpu_var(ipi_message);
245
	unsigned int all;
246 247

	mb();	/* order any irq clear */
248 249

	do {
250
		all = xchg(&info->messages, 0);
251
		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
252
			generic_smp_call_function_interrupt();
253
		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
254
			scheduler_ipi();
255 256
		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
			tick_broadcast_ipi_handler();
257
		if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
258
			debug_ipi_action(0, NULL);
259 260
	} while (info->messages);

261 262
	return IRQ_HANDLED;
}
263
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
264

265 266 267 268 269 270 271 272 273 274
static inline void do_message_pass(int cpu, int msg)
{
	if (smp_ops->message_pass)
		smp_ops->message_pass(cpu, msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
	else
		smp_muxed_ipi_message_pass(cpu, msg);
#endif
}

L
Linus Torvalds 已提交
275 276
void smp_send_reschedule(int cpu)
{
277
	if (likely(smp_ops))
278
		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
L
Linus Torvalds 已提交
279
}
280
EXPORT_SYMBOL_GPL(smp_send_reschedule);
L
Linus Torvalds 已提交
281

282 283
void arch_send_call_function_single_ipi(int cpu)
{
284
	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
285 286
}

287
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
288 289 290
{
	unsigned int cpu;

291
	for_each_cpu(cpu, mask)
292
		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
293 294
}

295 296 297 298 299 300 301 302 303 304
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
}
#endif

305 306
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
void smp_send_debugger_break(void)
L
Linus Torvalds 已提交
307
{
308 309 310 311 312 313 314 315
	int cpu;
	int me = raw_smp_processor_id();

	if (unlikely(!smp_ops))
		return;

	for_each_online_cpu(cpu)
		if (cpu != me)
316
			do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
L
Linus Torvalds 已提交
317 318 319
}
#endif

320 321 322 323
#ifdef CONFIG_KEXEC
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
	crash_ipi_function_ptr = crash_ipi_callback;
324
	if (crash_ipi_callback) {
325
		mb();
326
		smp_send_debugger_break();
327 328 329 330
	}
}
#endif

L
Linus Torvalds 已提交
331 332
static void stop_this_cpu(void *dummy)
{
333 334 335
	/* Remove this CPU */
	set_cpu_online(smp_processor_id(), false);

L
Linus Torvalds 已提交
336 337 338 339 340
	local_irq_disable();
	while (1)
		;
}

341 342
void smp_send_stop(void)
{
343
	smp_call_function(stop_this_cpu, NULL, 0);
L
Linus Torvalds 已提交
344 345 346 347
}

struct thread_info *current_set[NR_CPUS];

348
static void smp_store_cpu_info(int id)
L
Linus Torvalds 已提交
349
{
350
	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
351 352 353 354
#ifdef CONFIG_PPC_FSL_BOOK3E
	per_cpu(next_tlbcam_idx, id)
		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
L
Linus Torvalds 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}

void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu;

	DBG("smp_prepare_cpus\n");

	/* 
	 * setup_cpu may need to be called on the boot cpu. We havent
	 * spun any cpus up but lets be paranoid.
	 */
	BUG_ON(boot_cpuid != smp_processor_id());

	/* Fixup boot cpu */
	smp_store_cpu_info(boot_cpuid);
	cpu_callin_map[boot_cpuid] = 1;

373 374 375 376 377 378 379 380 381 382
	for_each_possible_cpu(cpu) {
		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
	}

	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));

383 384
	if (smp_ops && smp_ops->probe)
		smp_ops->probe();
L
Linus Torvalds 已提交
385 386
}

387
void smp_prepare_boot_cpu(void)
L
Linus Torvalds 已提交
388 389
{
	BUG_ON(smp_processor_id() != boot_cpuid);
P
Paul Mackerras 已提交
390
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
391
	paca[boot_cpuid].__current = current;
P
Paul Mackerras 已提交
392
#endif
393
	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
A
Al Viro 已提交
394
	current_set[boot_cpuid] = task_thread_info(current);
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403 404 405
}

#ifdef CONFIG_HOTPLUG_CPU

int generic_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == boot_cpuid)
		return -EBUSY;

406
	set_cpu_online(cpu, false);
407
#ifdef CONFIG_PPC64
408
	vdso_data->processorCount--;
409
#endif
410
	migrate_irqs();
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418
	return 0;
}

void generic_cpu_die(unsigned int cpu)
{
	int i;

	for (i = 0; i < 100; i++) {
419
		smp_rmb();
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430 431
		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
			return;
		msleep(100);
	}
	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
}

void generic_mach_cpu_die(void)
{
	unsigned int cpu;

	local_irq_disable();
432
	idle_task_exit();
L
Linus Torvalds 已提交
433 434 435
	cpu = smp_processor_id();
	printk(KERN_DEBUG "CPU%d offline\n", cpu);
	__get_cpu_var(cpu_state) = CPU_DEAD;
436
	smp_wmb();
L
Linus Torvalds 已提交
437 438 439
	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
		cpu_relax();
}
440 441 442 443 444

void generic_set_cpu_dead(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_DEAD;
}
445

446 447 448 449 450 451 452 453 454 455
/*
 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 * which makes the delay in generic_cpu_die() not happen.
 */
void generic_set_cpu_up(unsigned int cpu)
{
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
}

456 457 458 459
int generic_check_cpu_restart(unsigned int cpu)
{
	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

static atomic_t secondary_inhibit_count;

/*
 * Don't allow secondary CPU threads to come online
 */
void inhibit_secondary_onlining(void)
{
	/*
	 * This makes secondary_inhibit_count stable during cpu
	 * online/offline operations.
	 */
	get_online_cpus();

	atomic_inc(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);

/*
 * Allow secondary CPU threads to come online again
 */
void uninhibit_secondary_onlining(void)
{
	get_online_cpus();
	atomic_dec(&secondary_inhibit_count);
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);

static int secondaries_inhibited(void)
{
	return atomic_read(&secondary_inhibit_count);
}

#else /* HOTPLUG_CPU */

#define secondaries_inhibited()		0

L
Linus Torvalds 已提交
499 500
#endif

501
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
502
{
503
	struct thread_info *ti = task_thread_info(idle);
504 505

#ifdef CONFIG_PPC64
506
	paca[cpu].__current = idle;
507 508 509
	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
	ti->cpu = cpu;
510
	secondary_ti = current_set[cpu] = ti;
511 512
}

513
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
L
Linus Torvalds 已提交
514
{
515
	int rc, c;
L
Linus Torvalds 已提交
516

517 518 519 520 521 522 523
	/*
	 * Don't allow secondary threads to come online if inhibited
	 */
	if (threads_per_core > 1 && secondaries_inhibited() &&
	    cpu % threads_per_core != 0)
		return -EBUSY;

524 525
	if (smp_ops == NULL ||
	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
L
Linus Torvalds 已提交
526 527
		return -EINVAL;

528
	cpu_idle_thread_init(cpu, tidle);
529

L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537 538
	/* Make sure callin-map entry is 0 (can be leftover a CPU
	 * hotplug
	 */
	cpu_callin_map[cpu] = 0;

	/* The information for processor bringup must
	 * be written out to main store before we release
	 * the processor.
	 */
539
	smp_mb();
L
Linus Torvalds 已提交
540 541 542

	/* wake up cpus */
	DBG("smp: kicking cpu %d\n", cpu);
543 544 545 546 547
	rc = smp_ops->kick_cpu(cpu);
	if (rc) {
		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
		return rc;
	}
L
Linus Torvalds 已提交
548 549 550 551 552 553 554

	/*
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
	 */
	if (system_state < SYSTEM_RUNNING)
555
		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
L
Linus Torvalds 已提交
556 557 558 559 560 561 562
			udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
	else
		/*
		 * CPUs can take much longer to come up in the
		 * hotplug case.  Wait five seconds.
		 */
563 564
		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
			msleep(1);
L
Linus Torvalds 已提交
565 566 567
#endif

	if (!cpu_callin_map[cpu]) {
568
		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
L
Linus Torvalds 已提交
569 570 571
		return -ENOENT;
	}

572
	DBG("Processor %u found.\n", cpu);
L
Linus Torvalds 已提交
573 574 575 576 577 578 579 580 581 582 583

	if (smp_ops->give_timebase)
		smp_ops->give_timebase();

	/* Wait until cpu puts itself in the online map */
	while (!cpu_online(cpu))
		cpu_relax();

	return 0;
}

584 585 586 587 588 589
/* Return the value of the reg property corresponding to the given
 * logical cpu.
 */
int cpu_to_core_id(int cpu)
{
	struct device_node *np;
590
	const __be32 *reg;
591 592 593 594 595 596 597 598 599 600
	int id = -1;

	np = of_get_cpu_node(cpu, NULL);
	if (!np)
		goto out;

	reg = of_get_property(np, "reg", NULL);
	if (!reg)
		goto out;

601
	id = be32_to_cpup(reg);
602 603 604 605 606
out:
	of_node_put(np);
	return id;
}

607 608 609 610 611 612 613 614 615 616 617 618 619
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
{
	return cpu >> threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);

int cpu_first_thread_of_core(int core)
{
	return core << threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
{
	const struct cpumask *mask;
	struct device_node *np;
	int i, plen;
	const __be32 *prop;

	mask = add ? cpu_online_mask : cpu_present_mask;
	for_each_cpu(i, mask) {
		np = of_get_cpu_node(i, NULL);
		if (!np)
			continue;
		prop = of_get_property(np, "ibm,chip-id", &plen);
		if (prop && plen == sizeof(int) &&
		    of_read_number(prop, 1) == chipid) {
			if (add) {
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
			} else {
				cpumask_clear_cpu(cpu, cpu_core_mask(i));
				cpumask_clear_cpu(i, cpu_core_mask(cpu));
			}
		}
		of_node_put(np);
	}
}

647
/* Must be called when no change can occur to cpu_present_mask,
648 649 650 651 652
 * i.e. during cpu online or offline.
 */
static struct device_node *cpu_to_l2cache(int cpu)
{
	struct device_node *np;
653
	struct device_node *cache;
654 655 656 657 658 659 660 661

	if (!cpu_present(cpu))
		return NULL;

	np = of_get_cpu_node(cpu, NULL);
	if (np == NULL)
		return NULL;

662 663
	cache = of_find_next_cache_node(np);

664 665
	of_node_put(np);

666
	return cache;
667
}
L
Linus Torvalds 已提交
668

669 670
static void traverse_core_siblings(int cpu, bool add)
{
671
	struct device_node *l2_cache, *np;
672
	const struct cpumask *mask;
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
	int i, chip, plen;
	const __be32 *prop;

	/* First see if we have ibm,chip-id properties in cpu nodes */
	np = of_get_cpu_node(cpu, NULL);
	if (np) {
		chip = -1;
		prop = of_get_property(np, "ibm,chip-id", &plen);
		if (prop && plen == sizeof(int))
			chip = of_read_number(prop, 1);
		of_node_put(np);
		if (chip >= 0) {
			traverse_siblings_chip_id(cpu, add, chip);
			return;
		}
	}
689 690 691 692

	l2_cache = cpu_to_l2cache(cpu);
	mask = add ? cpu_online_mask : cpu_present_mask;
	for_each_cpu(i, mask) {
693
		np = cpu_to_l2cache(i);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
		if (!np)
			continue;
		if (np == l2_cache) {
			if (add) {
				cpumask_set_cpu(cpu, cpu_core_mask(i));
				cpumask_set_cpu(i, cpu_core_mask(cpu));
			} else {
				cpumask_clear_cpu(cpu, cpu_core_mask(i));
				cpumask_clear_cpu(i, cpu_core_mask(cpu));
			}
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);
}

L
Linus Torvalds 已提交
710
/* Activate a secondary processor. */
711
void start_secondary(void *unused)
L
Linus Torvalds 已提交
712 713
{
	unsigned int cpu = smp_processor_id();
714
	int i, base;
L
Linus Torvalds 已提交
715 716 717 718 719

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);
P
Paul Mackerras 已提交
720
	set_dec(tb_ticks_per_jiffy);
A
Andrew Morton 已提交
721
	preempt_disable();
L
Linus Torvalds 已提交
722 723
	cpu_callin_map[cpu] = 1;

724 725
	if (smp_ops->setup_cpu)
		smp_ops->setup_cpu(cpu);
L
Linus Torvalds 已提交
726 727 728
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

729 730
	secondary_cpu_time_init();

731 732 733
#ifdef CONFIG_PPC64
	if (system_state == SYSTEM_RUNNING)
		vdso_data->processorCount++;
734 735

	vdso_getcpu_init();
736
#endif
737
	/* Update sibling maps */
738
	base = cpu_first_thread_sibling(cpu);
739
	for (i = 0; i < threads_per_core; i++) {
740
		if (cpu_is_offline(base + i) && (cpu != base + i))
741
			continue;
742 743
		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
744 745 746 747 748

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
749 750
		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
751
	}
752
	traverse_core_siblings(cpu, true);
L
Linus Torvalds 已提交
753

754 755 756 757
	/*
	 * numa_node_id() works after this.
	 */
	set_numa_node(numa_cpu_lookup_table[cpu]);
758
	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
759

760 761 762 763
	smp_wmb();
	notify_cpu_starting(cpu);
	set_cpu_online(cpu, true);

L
Linus Torvalds 已提交
764 765
	local_irq_enable();

T
Thomas Gleixner 已提交
766
	cpu_startup_entry(CPUHP_ONLINE);
767 768

	BUG();
L
Linus Torvalds 已提交
769 770 771 772 773 774 775 776 777
}

int setup_profiling_timer(unsigned int multiplier)
{
	return 0;
}

void __init smp_cpus_done(unsigned int max_cpus)
{
778
	cpumask_var_t old_mask;
L
Linus Torvalds 已提交
779 780 781 782 783

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
784
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
785
	cpumask_copy(old_mask, tsk_cpus_allowed(current));
J
Julia Lawall 已提交
786
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
L
Linus Torvalds 已提交
787
	
788
	if (smp_ops && smp_ops->setup_cpu)
789
		smp_ops->setup_cpu(boot_cpuid);
L
Linus Torvalds 已提交
790

791 792 793
	set_cpus_allowed_ptr(current, old_mask);

	free_cpumask_var(old_mask);
794

795 796 797
	if (smp_ops && smp_ops->bringup_done)
		smp_ops->bringup_done();

798
	dump_numa_cpu_topology();
799

L
Linus Torvalds 已提交
800 801
}

802 803 804 805 806 807 808 809 810
int arch_sd_sibling_asym_packing(void)
{
	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
		return SD_ASYM_PACKING;
	}
	return 0;
}

L
Linus Torvalds 已提交
811 812 813
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
814 815 816
	int cpu = smp_processor_id();
	int base, i;
	int err;
L
Linus Torvalds 已提交
817

818 819 820 821 822 823 824 825
	if (!smp_ops->cpu_disable)
		return -ENOSYS;

	err = smp_ops->cpu_disable();
	if (err)
		return err;

	/* Update sibling maps */
826
	base = cpu_first_thread_sibling(cpu);
827
	for (i = 0; i < threads_per_core; i++) {
828 829 830 831
		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
		cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
		cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
832
	}
833
	traverse_core_siblings(cpu, false);
834 835

	return 0;
L
Linus Torvalds 已提交
836 837 838 839 840 841 842
}

void __cpu_die(unsigned int cpu)
{
	if (smp_ops->cpu_die)
		smp_ops->cpu_die(cpu);
}
843

844 845 846 847
void cpu_die(void)
{
	if (ppc_md.cpu_die)
		ppc_md.cpu_die();
848 849 850

	/* If we return, we re-enter start_secondary */
	start_secondary_resume();
851
}
852

L
Linus Torvalds 已提交
853
#endif