smp.c 16.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
R
Russell King 已提交
10
#include <linux/module.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
L
Linus Torvalds 已提交
21 22
#include <linux/cpu.h>
#include <linux/seq_file.h>
R
Russell King 已提交
23
#include <linux/irq.h>
24 25
#include <linux/percpu.h>
#include <linux/clockchips.h>
26
#include <linux/completion.h>
27
#include <linux/cpufreq.h>
L
Linus Torvalds 已提交
28

A
Arun Sharma 已提交
29
#include <linux/atomic.h>
30
#include <asm/smp.h>
L
Linus Torvalds 已提交
31 32
#include <asm/cacheflush.h>
#include <asm/cpu.h>
33
#include <asm/cputype.h>
34
#include <asm/exception.h>
35
#include <asm/idmap.h>
36
#include <asm/topology.h>
37 38 39
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
40
#include <asm/processor.h>
41
#include <asm/sections.h>
L
Linus Torvalds 已提交
42 43
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
44
#include <asm/localtimer.h>
45
#include <asm/smp_plat.h>
46
#include <asm/virt.h>
47
#include <asm/mach/arch.h>
L
Linus Torvalds 已提交
48

49 50 51 52 53 54 55
/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

56 57 58 59 60 61
/*
 * control for which core is the next to come out of the secondary
 * boot "holding pen"
 */
volatile int __cpuinitdata pen_release = -1;

L
Linus Torvalds 已提交
62
enum ipi_msg_type {
63 64
	IPI_WAKEUP,
	IPI_TIMER,
L
Linus Torvalds 已提交
65 66
	IPI_RESCHEDULE,
	IPI_CALL_FUNC,
67
	IPI_CALL_FUNC_SINGLE,
L
Linus Torvalds 已提交
68 69 70
	IPI_CPU_STOP,
};

71 72
static DECLARE_COMPLETION(cpu_running);

73 74 75 76 77 78 79 80
static struct smp_operations smp_ops;

void __init smp_set_ops(struct smp_operations *ops)
{
	if (ops)
		smp_ops = *ops;
};

81
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
L
Linus Torvalds 已提交
82 83 84
{
	int ret;

85 86 87 88
	/*
	 * We need to tell the secondary core where to find
	 * its stack and the page tables.
	 */
A
Al Viro 已提交
89
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
90
#ifdef CONFIG_MMU
91
	secondary_data.pgdir = virt_to_phys(idmap_pgd);
92
	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
93
#endif
94 95
	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
96

L
Linus Torvalds 已提交
97 98 99 100
	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
101 102 103 104 105
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it
		 * to come online or time out.
		 */
106 107
		wait_for_completion_timeout(&cpu_running,
						 msecs_to_jiffies(1000));
108

109 110
		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
111
			ret = -EIO;
112 113 114
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
115 116
	}

117
	secondary_data.stack = NULL;
118 119
	secondary_data.pgdir = 0;

L
Linus Torvalds 已提交
120 121 122
	return ret;
}

123
/* platform specific SMP operations */
124
void __init smp_init_cpus(void)
125 126 127 128 129
{
	if (smp_ops.smp_init_cpus)
		smp_ops.smp_init_cpus();
}

130
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
131 132 133 134 135 136
{
	if (smp_ops.smp_boot_secondary)
		return smp_ops.smp_boot_secondary(cpu, idle);
	return -ENOSYS;
}

137
#ifdef CONFIG_HOTPLUG_CPU
138 139
static void percpu_timer_stop(void);

140
static int platform_cpu_kill(unsigned int cpu)
141 142 143 144 145 146
{
	if (smp_ops.cpu_kill)
		return smp_ops.cpu_kill(cpu);
	return 1;
}

147
static int platform_cpu_disable(unsigned int cpu)
148 149 150 151 152 153 154 155 156 157 158
{
	if (smp_ops.cpu_disable)
		return smp_ops.cpu_disable(cpu);

	/*
	 * By default, allow disabling all CPUs except the first one,
	 * since this is special on a lot of platforms, e.g. because
	 * of clock tick interrupts.
	 */
	return cpu == 0 ? -EPERM : 0;
}
159 160 161
/*
 * __cpu_disable runs on the processor to be shutdown.
 */
162
int __cpuinit __cpu_disable(void)
163 164 165 166
{
	unsigned int cpu = smp_processor_id();
	int ret;

167
	ret = platform_cpu_disable(cpu);
168 169 170 171 172 173 174
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
175
	set_cpu_online(cpu, false);
176 177 178 179 180 181

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

182 183 184
	/*
	 * Stop the local timer for this CPU.
	 */
185
	percpu_timer_stop();
186

187 188 189
	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
190 191 192
	 *
	 * Caches are flushed to the Level of Unification Inner Shareable
	 * to write-back dirty lines to unified caches shared by all CPUs.
193
	 */
194
	flush_cache_louis();
195 196
	local_flush_tlb_all();

197
	clear_tasks_mm_cpumask(cpu);
198 199 200 201

	return 0;
}

202 203
static DECLARE_COMPLETION(cpu_died);

204 205 206 207
/*
 * called on the thread which is asking for a CPU to be shutdown -
 * waits until shutdown has completed, or it is timed out.
 */
208
void __cpuinit __cpu_die(unsigned int cpu)
209
{
210 211 212 213 214 215
	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
		pr_err("CPU%u: cpu didn't die\n", cpu);
		return;
	}
	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);

216 217 218 219 220 221 222
	/*
	 * platform_cpu_kill() is generally expected to do the powering off
	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
	 * be done by the CPU which is dying in preference to supporting
	 * this call, but that means there is _no_ synchronisation between
	 * the requesting CPU and the dying CPU actually losing power.
	 */
223 224 225 226 227 228 229 230 231 232 233 234
	if (!platform_cpu_kill(cpu))
		printk("CPU%u: unable to kill\n", cpu);
}

/*
 * Called from the idle thread for the CPU which has been shutdown.
 *
 * Note that we disable IRQs here, but do not re-enable them
 * before returning to the caller. This is also the behaviour
 * of the other hotplug-cpu capable cores, so presumably coming
 * out of idle fixes this.
 */
235
void __ref cpu_die(void)
236 237 238 239 240
{
	unsigned int cpu = smp_processor_id();

	idle_task_exit();

241 242
	local_irq_disable();

243 244 245 246 247 248 249 250 251 252 253 254 255
	/*
	 * Flush the data out of the L1 cache for this CPU.  This must be
	 * before the completion to ensure that data is safely written out
	 * before platform_cpu_kill() gets called - which may disable
	 * *this* CPU and power down its cache.
	 */
	flush_cache_louis();

	/*
	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
	 * this returns, power and/or clocks can be removed at any point
	 * from this CPU and its cache by platform_cpu_kill().
	 */
256
	complete(&cpu_died);
257

258
	/*
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	 * Ensure that the cache lines associated with that completion are
	 * written out.  This covers the case where _this_ CPU is doing the
	 * powering down, to ensure that the completion is visible to the
	 * CPU waiting for this one.
	 */
	flush_cache_louis();

	/*
	 * The actual CPU shutdown procedure is at least platform (if not
	 * CPU) specific.  This may remove power, or it may simply spin.
	 *
	 * Platforms are generally expected *NOT* to return from this call,
	 * although there are some which do because they have no way to
	 * power down the CPU.  These platforms are the _only_ reason we
	 * have a return path which uses the fragment of assembly below.
	 *
	 * The return path should not be used for platforms which can
	 * power off the CPU.
277
	 */
278 279
	if (smp_ops.cpu_die)
		smp_ops.cpu_die(cpu);
280 281 282 283 284 285 286

	/*
	 * Do not return to the idle loop - jump back to the secondary
	 * cpu initialisation.  There's some initialisation which needs
	 * to be repeated to undo the effects of taking the CPU offline.
	 */
	__asm__("mov	sp, %0\n"
287
	"	mov	fp, #0\n"
288 289
	"	b	secondary_start_kernel"
		:
A
Al Viro 已提交
290
		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
291 292 293
}
#endif /* CONFIG_HOTPLUG_CPU */

294 295 296 297 298 299 300 301 302
/*
 * Called by both boot and secondaries to move global data into
 * per-processor storage.
 */
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
{
	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);

	cpu_info->loops_per_jiffy = loops_per_jiffy;
303
	cpu_info->cpuid = read_cpuid_id();
304 305

	store_cpu_topology(cpuid);
306 307
}

308 309
static void percpu_timer_setup(void);

310 311 312 313
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
314
asmlinkage void __cpuinit secondary_start_kernel(void)
315 316
{
	struct mm_struct *mm = &init_mm;
317 318 319 320 321 322 323
	unsigned int cpu;

	/*
	 * The identity mapping is uncached (strongly ordered), so
	 * switch away from it before attempting any exclusive accesses.
	 */
	cpu_switch_mm(mm->pgd, mm);
324
	local_flush_bp_all();
325 326
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
327 328 329 330 331

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
332
	cpu = smp_processor_id();
333 334
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
335
	cpumask_set_cpu(cpu, mm_cpumask(mm));
336

337 338
	cpu_init();

339 340
	printk("CPU%u: Booted secondary processor\n", cpu);

341
	preempt_disable();
342
	trace_hardirqs_off();
343 344 345 346

	/*
	 * Give the platform a chance to do its own initialisation.
	 */
347 348
	if (smp_ops.smp_secondary_init)
		smp_ops.smp_secondary_init(cpu);
349

350
	notify_cpu_starting(cpu);
351

352 353 354 355 356
	calibrate_delay();

	smp_store_cpu_info(cpu);

	/*
357 358
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
359
	 * before we continue - which happens after __cpu_up returns.
360
	 */
361
	set_cpu_online(cpu, true);
362
	complete(&cpu_running);
363 364 365 366 367 368 369 370 371

	/*
	 * Setup the percpu timer for this CPU.
	 */
	percpu_timer_setup();

	local_irq_enable();
	local_fiq_enable();

372 373 374
	/*
	 * OK, it's off to the idle thread for us
	 */
T
Thomas Gleixner 已提交
375
	cpu_startup_entry(CPUHP_ONLINE);
376 377
}

L
Linus Torvalds 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390
void __init smp_cpus_done(unsigned int max_cpus)
{
	int cpu;
	unsigned long bogosum = 0;

	for_each_online_cpu(cpu)
		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n",
	       num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);
391 392

	hyp_mode_check();
L
Linus Torvalds 已提交
393 394 395 396
}

void __init smp_prepare_boot_cpu(void)
{
397
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
L
Linus Torvalds 已提交
398 399
}

400
void __init smp_prepare_cpus(unsigned int max_cpus)
L
Linus Torvalds 已提交
401
{
402
	unsigned int ncores = num_possible_cpus();
L
Linus Torvalds 已提交
403

404 405
	init_cpu_topology();

406
	smp_store_cpu_info(smp_processor_id());
L
Linus Torvalds 已提交
407 408

	/*
409
	 * are we trying to boot more cores than exist?
L
Linus Torvalds 已提交
410
	 */
411 412
	if (max_cpus > ncores)
		max_cpus = ncores;
413
	if (ncores > 1 && max_cpus) {
414 415 416 417 418
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();
L
Linus Torvalds 已提交
419

420 421 422
		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
423 424
		 * re-initialize the map in the platforms smp_prepare_cpus()
		 * if present != possible (e.g. physical hotplug).
425
		 */
426
		init_cpu_present(cpu_possible_mask);
427

428 429 430 431
		/*
		 * Initialise the SCU if there are more than one CPU
		 * and let them know where to start.
		 */
432 433
		if (smp_ops.smp_prepare_cpus)
			smp_ops.smp_prepare_cpus(max_cpus);
434
	}
L
Linus Torvalds 已提交
435 436
}

437 438 439 440
static void (*smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
441 442
	if (!smp_cross_call)
		smp_cross_call = fn;
443 444
}

445
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
L
Linus Torvalds 已提交
446
{
R
Russell King 已提交
447
	smp_cross_call(mask, IPI_CALL_FUNC);
L
Linus Torvalds 已提交
448 449
}

450 451 452 453 454
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
}

455
void arch_send_call_function_single_ipi(int cpu)
456
{
R
Russell King 已提交
457
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
458 459
}

460
static const char *ipi_types[NR_IPI] = {
461 462
#define S(x,s)	[x] = s
	S(IPI_WAKEUP, "CPU wakeup interrupts"),
463 464 465 466 467 468 469
	S(IPI_TIMER, "Timer broadcast interrupts"),
	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
	S(IPI_CALL_FUNC, "Function call interrupts"),
	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
	S(IPI_CPU_STOP, "CPU stop interrupts"),
};

470
void show_ipi_list(struct seq_file *p, int prec)
L
Linus Torvalds 已提交
471
{
472
	unsigned int cpu, i;
L
Linus Torvalds 已提交
473

474 475
	for (i = 0; i < NR_IPI; i++) {
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
L
Linus Torvalds 已提交
476

477
		for_each_online_cpu(cpu)
478 479
			seq_printf(p, "%10u ",
				   __get_irq_stat(cpu, ipi_irqs[i]));
L
Linus Torvalds 已提交
480

481 482
		seq_printf(p, " %s\n", ipi_types[i]);
	}
L
Linus Torvalds 已提交
483 484
}

485
u64 smp_irq_stat_cpu(unsigned int cpu)
486
{
487 488
	u64 sum = 0;
	int i;
489

490 491
	for (i = 0; i < NR_IPI; i++)
		sum += __get_irq_stat(cpu, ipi_irqs[i]);
492

493
	return sum;
494 495
}

496 497 498 499 500 501
/*
 * Timer (local or broadcast) support
 */
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
502
void tick_broadcast(const struct cpumask *mask)
503
{
R
Russell King 已提交
504
	smp_cross_call(mask, IPI_TIMER);
505
}
506
#endif
507 508 509 510 511 512

static void broadcast_timer_set_mode(enum clock_event_mode mode,
	struct clock_event_device *evt)
{
}

513
static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
514 515 516 517 518
{
	evt->name	= "dummy_timer";
	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
			  CLOCK_EVT_FEAT_PERIODIC |
			  CLOCK_EVT_FEAT_DUMMY;
519
	evt->rating	= 100;
520 521 522 523 524 525
	evt->mult	= 1;
	evt->set_mode	= broadcast_timer_set_mode;

	clockevents_register_device(evt);
}

526 527 528 529 530
static struct local_timer_ops *lt_ops;

#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
531 532 533
	if (!is_smp() || !setup_max_cpus)
		return -ENXIO;

534 535 536 537 538 539 540 541
	if (lt_ops)
		return -EBUSY;

	lt_ops = ops;
	return 0;
}
#endif

542
static void __cpuinit percpu_timer_setup(void)
543 544 545 546 547 548
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

	evt->cpumask = cpumask_of(cpu);

549
	if (!lt_ops || lt_ops->setup(evt))
550
		broadcast_timer_setup(evt);
551 552
}

553 554 555 556 557 558 559 560 561 562 563
#ifdef CONFIG_HOTPLUG_CPU
/*
 * The generic clock events code purposely does not stop the local timer
 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
 * manually here.
 */
static void percpu_timer_stop(void)
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

564 565
	if (lt_ops)
		lt_ops->stop(evt);
566 567 568
}
#endif

569
static DEFINE_RAW_SPINLOCK(stop_lock);
L
Linus Torvalds 已提交
570 571 572 573 574 575

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
576 577
	if (system_state == SYSTEM_BOOTING ||
	    system_state == SYSTEM_RUNNING) {
578
		raw_spin_lock(&stop_lock);
579 580
		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
		dump_stack();
581
		raw_spin_unlock(&stop_lock);
582
	}
L
Linus Torvalds 已提交
583

584
	set_cpu_online(cpu, false);
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595

	local_fiq_disable();
	local_irq_disable();

	while (1)
		cpu_relax();
}

/*
 * Main handler for inter-processor interrupts
 */
R
Russell King 已提交
596
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
597 598 599 600 601
{
	handle_IPI(ipinr, regs);
}

void handle_IPI(int ipinr, struct pt_regs *regs)
L
Linus Torvalds 已提交
602 603
{
	unsigned int cpu = smp_processor_id();
R
Russell King 已提交
604
	struct pt_regs *old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
605

606 607
	if (ipinr < NR_IPI)
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
L
Linus Torvalds 已提交
608

609
	switch (ipinr) {
610 611 612
	case IPI_WAKEUP:
		break;

613
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
614
	case IPI_TIMER:
615
		irq_enter();
616
		tick_receive_broadcast();
617
		irq_exit();
618
		break;
619
#endif
L
Linus Torvalds 已提交
620

621
	case IPI_RESCHEDULE:
622
		scheduler_ipi();
623
		break;
L
Linus Torvalds 已提交
624

625
	case IPI_CALL_FUNC:
626
		irq_enter();
627
		generic_smp_call_function_interrupt();
628
		irq_exit();
629
		break;
630

631
	case IPI_CALL_FUNC_SINGLE:
632
		irq_enter();
633
		generic_smp_call_function_single_interrupt();
634
		irq_exit();
635
		break;
L
Linus Torvalds 已提交
636

637
	case IPI_CPU_STOP:
638
		irq_enter();
639
		ipi_cpu_stop(cpu);
640
		irq_exit();
641
		break;
L
Linus Torvalds 已提交
642

643 644 645 646
	default:
		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
		       cpu, ipinr);
		break;
L
Linus Torvalds 已提交
647
	}
R
Russell King 已提交
648
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
649 650 651 652
}

void smp_send_reschedule(int cpu)
{
R
Russell King 已提交
653
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
L
Linus Torvalds 已提交
654 655
}

656 657 658 659 660 661 662 663 664 665 666
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
	unsigned int cpu;
	for_each_cpu(cpu, mask)
		platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif

L
Linus Torvalds 已提交
667 668
void smp_send_stop(void)
{
669
	unsigned long timeout;
670
	struct cpumask mask;
L
Linus Torvalds 已提交
671

672 673
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
674 675
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
676

677 678 679 680
	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
	while (num_online_cpus() > 1 && timeout--)
		udelay(1);
681

682 683
	if (num_online_cpus() > 1)
		pr_warning("SMP: failed to stop secondary CPUs\n");
684 685

	smp_kill_cpus(&mask);
686 687 688
}

/*
L
Linus Torvalds 已提交
689
 * not supported here
690
 */
691
int setup_profiling_timer(unsigned int multiplier)
692
{
L
Linus Torvalds 已提交
693
	return -EINVAL;
694
}
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747

#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
					unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;
	int cpu = freq->cpu;

	if (freq->flags & CPUFREQ_CONST_LOOPS)
		return NOTIFY_OK;

	if (!per_cpu(l_p_j_ref, cpu)) {
		per_cpu(l_p_j_ref, cpu) =
			per_cpu(cpu_data, cpu).loops_per_jiffy;
		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
		if (!global_l_p_j_ref) {
			global_l_p_j_ref = loops_per_jiffy;
			global_l_p_j_ref_freq = freq->old;
		}
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
						global_l_p_j_ref_freq,
						freq->new);
		per_cpu(cpu_data, cpu).loops_per_jiffy =
			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
					per_cpu(l_p_j_ref_freq, cpu),
					freq->new);
	}
	return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
	.notifier_call  = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
	return cpufreq_register_notifier(&cpufreq_notifier,
						CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif