smp.c 15.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
R
Russell King 已提交
10
#include <linux/module.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
L
Linus Torvalds 已提交
21 22
#include <linux/cpu.h>
#include <linux/seq_file.h>
R
Russell King 已提交
23
#include <linux/irq.h>
24 25
#include <linux/percpu.h>
#include <linux/clockchips.h>
26
#include <linux/completion.h>
27
#include <linux/cpufreq.h>
28
#include <linux/irq_work.h>
L
Linus Torvalds 已提交
29

A
Arun Sharma 已提交
30
#include <linux/atomic.h>
31
#include <asm/smp.h>
L
Linus Torvalds 已提交
32 33
#include <asm/cacheflush.h>
#include <asm/cpu.h>
34
#include <asm/cputype.h>
35
#include <asm/exception.h>
36
#include <asm/idmap.h>
37
#include <asm/topology.h>
38 39 40
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
41
#include <asm/processor.h>
42
#include <asm/sections.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
45
#include <asm/smp_plat.h>
46
#include <asm/virt.h>
47
#include <asm/mach/arch.h>
48
#include <asm/mpu.h>
L
Linus Torvalds 已提交
49

50 51 52 53 54 55 56
/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

57 58 59 60
/*
 * control for which core is the next to come out of the secondary
 * boot "holding pen"
 */
61
volatile int pen_release = -1;
62

L
Linus Torvalds 已提交
63
enum ipi_msg_type {
64 65
	IPI_WAKEUP,
	IPI_TIMER,
L
Linus Torvalds 已提交
66 67
	IPI_RESCHEDULE,
	IPI_CALL_FUNC,
68
	IPI_CALL_FUNC_SINGLE,
L
Linus Torvalds 已提交
69
	IPI_CPU_STOP,
70
	IPI_IRQ_WORK,
71
	IPI_COMPLETION,
L
Linus Torvalds 已提交
72 73
};

74 75
static DECLARE_COMPLETION(cpu_running);

76 77 78 79 80 81 82 83
static struct smp_operations smp_ops;

void __init smp_set_ops(struct smp_operations *ops)
{
	if (ops)
		smp_ops = *ops;
};

84 85
static unsigned long get_arch_pgd(pgd_t *pgd)
{
86
	phys_addr_t pgdir = virt_to_idmap(pgd);
87 88 89 90
	BUG_ON(pgdir & ARCH_PGD_MASK);
	return pgdir >> ARCH_PGD_SHIFT;
}

91
int __cpu_up(unsigned int cpu, struct task_struct *idle)
L
Linus Torvalds 已提交
92 93 94
{
	int ret;

95 96 97 98
	/*
	 * We need to tell the secondary core where to find
	 * its stack and the page tables.
	 */
A
Al Viro 已提交
99
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
100 101 102 103
#ifdef CONFIG_ARM_MPU
	secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
#endif

104
#ifdef CONFIG_MMU
105 106
	secondary_data.pgdir = get_arch_pgd(idmap_pgd);
	secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
107
#endif
108
	sync_cache_w(&secondary_data);
109

L
Linus Torvalds 已提交
110 111 112 113
	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
114 115 116 117 118
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it
		 * to come online or time out.
		 */
119 120
		wait_for_completion_timeout(&cpu_running,
						 msecs_to_jiffies(1000));
121

122 123
		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
124
			ret = -EIO;
125 126 127
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
128 129 130
	}


131
	memset(&secondary_data, 0, sizeof(secondary_data));
L
Linus Torvalds 已提交
132 133 134
	return ret;
}

135
/* platform specific SMP operations */
136
void __init smp_init_cpus(void)
137 138 139 140 141
{
	if (smp_ops.smp_init_cpus)
		smp_ops.smp_init_cpus();
}

142
int boot_secondary(unsigned int cpu, struct task_struct *idle)
143 144 145 146 147 148
{
	if (smp_ops.smp_boot_secondary)
		return smp_ops.smp_boot_secondary(cpu, idle);
	return -ENOSYS;
}

149 150 151 152 153 154 155 156 157 158
int platform_can_cpu_hotplug(void)
{
#ifdef CONFIG_HOTPLUG_CPU
	if (smp_ops.cpu_kill)
		return 1;
#endif

	return 0;
}

159
#ifdef CONFIG_HOTPLUG_CPU
160
static int platform_cpu_kill(unsigned int cpu)
161 162 163 164 165 166
{
	if (smp_ops.cpu_kill)
		return smp_ops.cpu_kill(cpu);
	return 1;
}

167
static int platform_cpu_disable(unsigned int cpu)
168 169 170 171 172 173 174 175 176 177 178
{
	if (smp_ops.cpu_disable)
		return smp_ops.cpu_disable(cpu);

	/*
	 * By default, allow disabling all CPUs except the first one,
	 * since this is special on a lot of platforms, e.g. because
	 * of clock tick interrupts.
	 */
	return cpu == 0 ? -EPERM : 0;
}
179 180 181
/*
 * __cpu_disable runs on the processor to be shutdown.
 */
182
int __cpu_disable(void)
183 184 185 186
{
	unsigned int cpu = smp_processor_id();
	int ret;

187
	ret = platform_cpu_disable(cpu);
188 189 190 191 192 193 194
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
195
	set_cpu_online(cpu, false);
196 197 198 199 200 201 202 203 204

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
205 206 207
	 *
	 * Caches are flushed to the Level of Unification Inner Shareable
	 * to write-back dirty lines to unified caches shared by all CPUs.
208
	 */
209
	flush_cache_louis();
210 211
	local_flush_tlb_all();

212
	clear_tasks_mm_cpumask(cpu);
213 214 215 216

	return 0;
}

217 218
static DECLARE_COMPLETION(cpu_died);

219 220 221 222
/*
 * called on the thread which is asking for a CPU to be shutdown -
 * waits until shutdown has completed, or it is timed out.
 */
223
void __cpu_die(unsigned int cpu)
224
{
225 226 227 228 229 230
	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
		pr_err("CPU%u: cpu didn't die\n", cpu);
		return;
	}
	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);

231 232 233 234 235 236 237
	/*
	 * platform_cpu_kill() is generally expected to do the powering off
	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
	 * be done by the CPU which is dying in preference to supporting
	 * this call, but that means there is _no_ synchronisation between
	 * the requesting CPU and the dying CPU actually losing power.
	 */
238 239 240 241 242 243 244 245 246 247 248 249
	if (!platform_cpu_kill(cpu))
		printk("CPU%u: unable to kill\n", cpu);
}

/*
 * Called from the idle thread for the CPU which has been shutdown.
 *
 * Note that we disable IRQs here, but do not re-enable them
 * before returning to the caller. This is also the behaviour
 * of the other hotplug-cpu capable cores, so presumably coming
 * out of idle fixes this.
 */
250
void __ref cpu_die(void)
251 252 253 254 255
{
	unsigned int cpu = smp_processor_id();

	idle_task_exit();

256 257
	local_irq_disable();

258 259 260 261 262 263 264 265 266 267 268 269 270
	/*
	 * Flush the data out of the L1 cache for this CPU.  This must be
	 * before the completion to ensure that data is safely written out
	 * before platform_cpu_kill() gets called - which may disable
	 * *this* CPU and power down its cache.
	 */
	flush_cache_louis();

	/*
	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
	 * this returns, power and/or clocks can be removed at any point
	 * from this CPU and its cache by platform_cpu_kill().
	 */
271
	complete(&cpu_died);
272

273
	/*
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
	 * Ensure that the cache lines associated with that completion are
	 * written out.  This covers the case where _this_ CPU is doing the
	 * powering down, to ensure that the completion is visible to the
	 * CPU waiting for this one.
	 */
	flush_cache_louis();

	/*
	 * The actual CPU shutdown procedure is at least platform (if not
	 * CPU) specific.  This may remove power, or it may simply spin.
	 *
	 * Platforms are generally expected *NOT* to return from this call,
	 * although there are some which do because they have no way to
	 * power down the CPU.  These platforms are the _only_ reason we
	 * have a return path which uses the fragment of assembly below.
	 *
	 * The return path should not be used for platforms which can
	 * power off the CPU.
292
	 */
293 294
	if (smp_ops.cpu_die)
		smp_ops.cpu_die(cpu);
295

296 297 298
	pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
		cpu);

299 300 301 302 303 304
	/*
	 * Do not return to the idle loop - jump back to the secondary
	 * cpu initialisation.  There's some initialisation which needs
	 * to be repeated to undo the effects of taking the CPU offline.
	 */
	__asm__("mov	sp, %0\n"
305
	"	mov	fp, #0\n"
306 307
	"	b	secondary_start_kernel"
		:
A
Al Viro 已提交
308
		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
309 310 311
}
#endif /* CONFIG_HOTPLUG_CPU */

312 313 314 315
/*
 * Called by both boot and secondaries to move global data into
 * per-processor storage.
 */
316
static void smp_store_cpu_info(unsigned int cpuid)
317 318 319 320
{
	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);

	cpu_info->loops_per_jiffy = loops_per_jiffy;
321
	cpu_info->cpuid = read_cpuid_id();
322 323

	store_cpu_topology(cpuid);
324 325
}

326 327 328 329
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
330
asmlinkage void secondary_start_kernel(void)
331 332
{
	struct mm_struct *mm = &init_mm;
333 334 335 336 337 338 339
	unsigned int cpu;

	/*
	 * The identity mapping is uncached (strongly ordered), so
	 * switch away from it before attempting any exclusive accesses.
	 */
	cpu_switch_mm(mm->pgd, mm);
340
	local_flush_bp_all();
341 342
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
343 344 345 346 347

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
348
	cpu = smp_processor_id();
349 350
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
351
	cpumask_set_cpu(cpu, mm_cpumask(mm));
352

353 354
	cpu_init();

355 356
	printk("CPU%u: Booted secondary processor\n", cpu);

357
	preempt_disable();
358
	trace_hardirqs_off();
359 360 361 362

	/*
	 * Give the platform a chance to do its own initialisation.
	 */
363 364
	if (smp_ops.smp_secondary_init)
		smp_ops.smp_secondary_init(cpu);
365

366
	notify_cpu_starting(cpu);
367

368 369 370 371 372
	calibrate_delay();

	smp_store_cpu_info(cpu);

	/*
373 374
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
375
	 * before we continue - which happens after __cpu_up returns.
376
	 */
377
	set_cpu_online(cpu, true);
378
	complete(&cpu_running);
379 380 381 382

	local_irq_enable();
	local_fiq_enable();

383 384 385
	/*
	 * OK, it's off to the idle thread for us
	 */
T
Thomas Gleixner 已提交
386
	cpu_startup_entry(CPUHP_ONLINE);
387 388
}

L
Linus Torvalds 已提交
389 390
void __init smp_cpus_done(unsigned int max_cpus)
{
391 392
	printk(KERN_INFO "SMP: Total of %d processors activated.\n",
	       num_online_cpus());
393 394

	hyp_mode_check();
L
Linus Torvalds 已提交
395 396 397 398
}

void __init smp_prepare_boot_cpu(void)
{
399
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
L
Linus Torvalds 已提交
400 401
}

402
void __init smp_prepare_cpus(unsigned int max_cpus)
L
Linus Torvalds 已提交
403
{
404
	unsigned int ncores = num_possible_cpus();
L
Linus Torvalds 已提交
405

406 407
	init_cpu_topology();

408
	smp_store_cpu_info(smp_processor_id());
L
Linus Torvalds 已提交
409 410

	/*
411
	 * are we trying to boot more cores than exist?
L
Linus Torvalds 已提交
412
	 */
413 414
	if (max_cpus > ncores)
		max_cpus = ncores;
415 416 417 418
	if (ncores > 1 && max_cpus) {
		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
419 420
		 * re-initialize the map in the platforms smp_prepare_cpus()
		 * if present != possible (e.g. physical hotplug).
421
		 */
422
		init_cpu_present(cpu_possible_mask);
423

424 425 426 427
		/*
		 * Initialise the SCU if there are more than one CPU
		 * and let them know where to start.
		 */
428 429
		if (smp_ops.smp_prepare_cpus)
			smp_ops.smp_prepare_cpus(max_cpus);
430
	}
L
Linus Torvalds 已提交
431 432
}

433 434 435 436
static void (*smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
437 438
	if (!smp_cross_call)
		smp_cross_call = fn;
439 440
}

441
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
L
Linus Torvalds 已提交
442
{
R
Russell King 已提交
443
	smp_cross_call(mask, IPI_CALL_FUNC);
L
Linus Torvalds 已提交
444 445
}

446 447 448 449 450
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
}

451
void arch_send_call_function_single_ipi(int cpu)
452
{
R
Russell King 已提交
453
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
454 455
}

456 457 458
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
459 460
	if (is_smp())
		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
461 462 463
}
#endif

464
static const char *ipi_types[NR_IPI] = {
465 466
#define S(x,s)	[x] = s
	S(IPI_WAKEUP, "CPU wakeup interrupts"),
467 468 469 470 471
	S(IPI_TIMER, "Timer broadcast interrupts"),
	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
	S(IPI_CALL_FUNC, "Function call interrupts"),
	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
	S(IPI_CPU_STOP, "CPU stop interrupts"),
472
	S(IPI_IRQ_WORK, "IRQ work interrupts"),
473
	S(IPI_COMPLETION, "completion interrupts"),
474 475
};

476
void show_ipi_list(struct seq_file *p, int prec)
L
Linus Torvalds 已提交
477
{
478
	unsigned int cpu, i;
L
Linus Torvalds 已提交
479

480 481
	for (i = 0; i < NR_IPI; i++) {
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
L
Linus Torvalds 已提交
482

483
		for_each_online_cpu(cpu)
484 485
			seq_printf(p, "%10u ",
				   __get_irq_stat(cpu, ipi_irqs[i]));
L
Linus Torvalds 已提交
486

487 488
		seq_printf(p, " %s\n", ipi_types[i]);
	}
L
Linus Torvalds 已提交
489 490
}

491
u64 smp_irq_stat_cpu(unsigned int cpu)
492
{
493 494
	u64 sum = 0;
	int i;
495

496 497
	for (i = 0; i < NR_IPI; i++)
		sum += __get_irq_stat(cpu, ipi_irqs[i]);
498

499
	return sum;
500 501
}

502
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
503
void tick_broadcast(const struct cpumask *mask)
504
{
R
Russell King 已提交
505
	smp_cross_call(mask, IPI_TIMER);
506
}
507
#endif
508

509
static DEFINE_RAW_SPINLOCK(stop_lock);
L
Linus Torvalds 已提交
510 511 512 513 514 515

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
516 517
	if (system_state == SYSTEM_BOOTING ||
	    system_state == SYSTEM_RUNNING) {
518
		raw_spin_lock(&stop_lock);
519 520
		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
		dump_stack();
521
		raw_spin_unlock(&stop_lock);
522
	}
L
Linus Torvalds 已提交
523

524
	set_cpu_online(cpu, false);
L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532

	local_fiq_disable();
	local_irq_disable();

	while (1)
		cpu_relax();
}

533 534 535 536 537 538 539 540 541 542 543 544 545
static DEFINE_PER_CPU(struct completion *, cpu_completion);

int register_ipi_completion(struct completion *completion, int cpu)
{
	per_cpu(cpu_completion, cpu) = completion;
	return IPI_COMPLETION;
}

static void ipi_complete(unsigned int cpu)
{
	complete(per_cpu(cpu_completion, cpu));
}

L
Linus Torvalds 已提交
546 547 548
/*
 * Main handler for inter-processor interrupts
 */
R
Russell King 已提交
549
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
550 551 552 553 554
{
	handle_IPI(ipinr, regs);
}

void handle_IPI(int ipinr, struct pt_regs *regs)
L
Linus Torvalds 已提交
555 556
{
	unsigned int cpu = smp_processor_id();
R
Russell King 已提交
557
	struct pt_regs *old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
558

559 560
	if (ipinr < NR_IPI)
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
L
Linus Torvalds 已提交
561

562
	switch (ipinr) {
563 564 565
	case IPI_WAKEUP:
		break;

566
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
567
	case IPI_TIMER:
568
		irq_enter();
569
		tick_receive_broadcast();
570
		irq_exit();
571
		break;
572
#endif
L
Linus Torvalds 已提交
573

574
	case IPI_RESCHEDULE:
575
		scheduler_ipi();
576
		break;
L
Linus Torvalds 已提交
577

578
	case IPI_CALL_FUNC:
579
		irq_enter();
580
		generic_smp_call_function_interrupt();
581
		irq_exit();
582
		break;
583

584
	case IPI_CALL_FUNC_SINGLE:
585
		irq_enter();
586
		generic_smp_call_function_single_interrupt();
587
		irq_exit();
588
		break;
L
Linus Torvalds 已提交
589

590
	case IPI_CPU_STOP:
591
		irq_enter();
592
		ipi_cpu_stop(cpu);
593
		irq_exit();
594
		break;
L
Linus Torvalds 已提交
595

596 597 598 599 600 601 602 603
#ifdef CONFIG_IRQ_WORK
	case IPI_IRQ_WORK:
		irq_enter();
		irq_work_run();
		irq_exit();
		break;
#endif

604 605 606 607 608 609
	case IPI_COMPLETION:
		irq_enter();
		ipi_complete(cpu);
		irq_exit();
		break;

610 611 612 613
	default:
		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
		       cpu, ipinr);
		break;
L
Linus Torvalds 已提交
614
	}
R
Russell King 已提交
615
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
616 617 618 619
}

void smp_send_reschedule(int cpu)
{
R
Russell King 已提交
620
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
L
Linus Torvalds 已提交
621 622 623 624
}

void smp_send_stop(void)
{
625
	unsigned long timeout;
626
	struct cpumask mask;
L
Linus Torvalds 已提交
627

628 629
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
630 631
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
632

633 634 635 636
	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
	while (num_online_cpus() > 1 && timeout--)
		udelay(1);
637

638 639
	if (num_online_cpus() > 1)
		pr_warning("SMP: failed to stop secondary CPUs\n");
640 641 642
}

/*
L
Linus Torvalds 已提交
643
 * not supported here
644
 */
645
int setup_profiling_timer(unsigned int multiplier)
646
{
L
Linus Torvalds 已提交
647
	return -EINVAL;
648
}
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701

#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
					unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;
	int cpu = freq->cpu;

	if (freq->flags & CPUFREQ_CONST_LOOPS)
		return NOTIFY_OK;

	if (!per_cpu(l_p_j_ref, cpu)) {
		per_cpu(l_p_j_ref, cpu) =
			per_cpu(cpu_data, cpu).loops_per_jiffy;
		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
		if (!global_l_p_j_ref) {
			global_l_p_j_ref = loops_per_jiffy;
			global_l_p_j_ref_freq = freq->old;
		}
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
						global_l_p_j_ref_freq,
						freq->new);
		per_cpu(cpu_data, cpu).loops_per_jiffy =
			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
					per_cpu(l_p_j_ref_freq, cpu),
					freq->new);
	}
	return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
	.notifier_call  = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
	return cpufreq_register_notifier(&cpufreq_notifier,
						CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif