smp.c 15.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
R
Russell King 已提交
10
#include <linux/module.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
L
Linus Torvalds 已提交
21 22
#include <linux/cpu.h>
#include <linux/seq_file.h>
R
Russell King 已提交
23
#include <linux/irq.h>
24 25
#include <linux/percpu.h>
#include <linux/clockchips.h>
26
#include <linux/completion.h>
27
#include <linux/cpufreq.h>
L
Linus Torvalds 已提交
28

A
Arun Sharma 已提交
29
#include <linux/atomic.h>
30
#include <asm/smp.h>
L
Linus Torvalds 已提交
31 32
#include <asm/cacheflush.h>
#include <asm/cpu.h>
33
#include <asm/cputype.h>
34
#include <asm/exception.h>
35
#include <asm/idmap.h>
36
#include <asm/topology.h>
37 38 39
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
40
#include <asm/processor.h>
41
#include <asm/sections.h>
L
Linus Torvalds 已提交
42 43
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
44
#include <asm/localtimer.h>
45
#include <asm/smp_plat.h>
46
#include <asm/virt.h>
47
#include <asm/mach/arch.h>
L
Linus Torvalds 已提交
48

49 50 51 52 53 54 55
/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

56 57 58 59 60 61
/*
 * control for which core is the next to come out of the secondary
 * boot "holding pen"
 */
volatile int __cpuinitdata pen_release = -1;

L
Linus Torvalds 已提交
62
enum ipi_msg_type {
63 64
	IPI_WAKEUP,
	IPI_TIMER,
L
Linus Torvalds 已提交
65 66
	IPI_RESCHEDULE,
	IPI_CALL_FUNC,
67
	IPI_CALL_FUNC_SINGLE,
L
Linus Torvalds 已提交
68 69 70
	IPI_CPU_STOP,
};

71 72
static DECLARE_COMPLETION(cpu_running);

73 74 75 76 77 78 79 80
static struct smp_operations smp_ops;

void __init smp_set_ops(struct smp_operations *ops)
{
	if (ops)
		smp_ops = *ops;
};

81
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
L
Linus Torvalds 已提交
82 83 84
{
	int ret;

85 86 87 88
	/*
	 * We need to tell the secondary core where to find
	 * its stack and the page tables.
	 */
A
Al Viro 已提交
89
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
90
	secondary_data.pgdir = virt_to_phys(idmap_pgd);
91
	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
92 93
	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
94

L
Linus Torvalds 已提交
95 96 97 98
	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
99 100 101 102 103
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it
		 * to come online or time out.
		 */
104 105
		wait_for_completion_timeout(&cpu_running,
						 msecs_to_jiffies(1000));
106

107 108
		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
109
			ret = -EIO;
110 111 112
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
113 114
	}

115
	secondary_data.stack = NULL;
116 117
	secondary_data.pgdir = 0;

L
Linus Torvalds 已提交
118 119 120
	return ret;
}

121
/* platform specific SMP operations */
122
void __init smp_init_cpus(void)
123 124 125 126 127
{
	if (smp_ops.smp_init_cpus)
		smp_ops.smp_init_cpus();
}

128
static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129 130 131 132 133
{
	if (smp_ops.smp_prepare_cpus)
		smp_ops.smp_prepare_cpus(max_cpus);
}

134
static void __cpuinit platform_secondary_init(unsigned int cpu)
135 136 137 138 139
{
	if (smp_ops.smp_secondary_init)
		smp_ops.smp_secondary_init(cpu);
}

140
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141 142 143 144 145 146
{
	if (smp_ops.smp_boot_secondary)
		return smp_ops.smp_boot_secondary(cpu, idle);
	return -ENOSYS;
}

147
#ifdef CONFIG_HOTPLUG_CPU
148 149
static void percpu_timer_stop(void);

150
static int platform_cpu_kill(unsigned int cpu)
151 152 153 154 155 156
{
	if (smp_ops.cpu_kill)
		return smp_ops.cpu_kill(cpu);
	return 1;
}

157
static void platform_cpu_die(unsigned int cpu)
158 159 160 161 162
{
	if (smp_ops.cpu_die)
		smp_ops.cpu_die(cpu);
}

163
static int platform_cpu_disable(unsigned int cpu)
164 165 166 167 168 169 170 171 172 173 174
{
	if (smp_ops.cpu_disable)
		return smp_ops.cpu_disable(cpu);

	/*
	 * By default, allow disabling all CPUs except the first one,
	 * since this is special on a lot of platforms, e.g. because
	 * of clock tick interrupts.
	 */
	return cpu == 0 ? -EPERM : 0;
}
175 176 177
/*
 * __cpu_disable runs on the processor to be shutdown.
 */
178
int __cpuinit __cpu_disable(void)
179 180 181 182
{
	unsigned int cpu = smp_processor_id();
	int ret;

183
	ret = platform_cpu_disable(cpu);
184 185 186 187 188 189 190
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
191
	set_cpu_online(cpu, false);
192 193 194 195 196 197

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

198 199 200
	/*
	 * Stop the local timer for this CPU.
	 */
201
	percpu_timer_stop();
202

203 204 205
	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
206 207 208
	 *
	 * Caches are flushed to the Level of Unification Inner Shareable
	 * to write-back dirty lines to unified caches shared by all CPUs.
209
	 */
210
	flush_cache_louis();
211 212
	local_flush_tlb_all();

213
	clear_tasks_mm_cpumask(cpu);
214 215 216 217

	return 0;
}

218 219
static DECLARE_COMPLETION(cpu_died);

220 221 222 223
/*
 * called on the thread which is asking for a CPU to be shutdown -
 * waits until shutdown has completed, or it is timed out.
 */
224
void __cpuinit __cpu_die(unsigned int cpu)
225
{
226 227 228 229 230 231
	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
		pr_err("CPU%u: cpu didn't die\n", cpu);
		return;
	}
	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);

232 233 234 235 236 237 238 239 240 241 242 243
	if (!platform_cpu_kill(cpu))
		printk("CPU%u: unable to kill\n", cpu);
}

/*
 * Called from the idle thread for the CPU which has been shutdown.
 *
 * Note that we disable IRQs here, but do not re-enable them
 * before returning to the caller. This is also the behaviour
 * of the other hotplug-cpu capable cores, so presumably coming
 * out of idle fixes this.
 */
244
void __ref cpu_die(void)
245 246 247 248 249
{
	unsigned int cpu = smp_processor_id();

	idle_task_exit();

250 251 252
	local_irq_disable();
	mb();

253
	/* Tell __cpu_die() that this CPU is now safe to dispose of */
254
	RCU_NONIDLE(complete(&cpu_died));
255

256 257
	/*
	 * actual CPU shutdown procedure is at least platform (if not
258
	 * CPU) specific.
259 260 261 262 263 264 265 266 267
	 */
	platform_cpu_die(cpu);

	/*
	 * Do not return to the idle loop - jump back to the secondary
	 * cpu initialisation.  There's some initialisation which needs
	 * to be repeated to undo the effects of taking the CPU offline.
	 */
	__asm__("mov	sp, %0\n"
268
	"	mov	fp, #0\n"
269 270
	"	b	secondary_start_kernel"
		:
A
Al Viro 已提交
271
		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
272 273 274
}
#endif /* CONFIG_HOTPLUG_CPU */

275 276 277 278 279 280 281 282 283
/*
 * Called by both boot and secondaries to move global data into
 * per-processor storage.
 */
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
{
	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);

	cpu_info->loops_per_jiffy = loops_per_jiffy;
284
	cpu_info->cpuid = read_cpuid_id();
285 286

	store_cpu_topology(cpuid);
287 288
}

289 290
static void percpu_timer_setup(void);

291 292 293 294
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
295
asmlinkage void __cpuinit secondary_start_kernel(void)
296 297
{
	struct mm_struct *mm = &init_mm;
298 299 300 301 302 303 304 305 306
	unsigned int cpu;

	/*
	 * The identity mapping is uncached (strongly ordered), so
	 * switch away from it before attempting any exclusive accesses.
	 */
	cpu_switch_mm(mm->pgd, mm);
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
307 308 309 310 311

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
312
	cpu = smp_processor_id();
313 314
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
315
	cpumask_set_cpu(cpu, mm_cpumask(mm));
316

317 318
	cpu_init();

319 320
	printk("CPU%u: Booted secondary processor\n", cpu);

321
	preempt_disable();
322
	trace_hardirqs_off();
323 324 325 326 327 328

	/*
	 * Give the platform a chance to do its own initialisation.
	 */
	platform_secondary_init(cpu);

329
	notify_cpu_starting(cpu);
330

331 332 333 334 335
	calibrate_delay();

	smp_store_cpu_info(cpu);

	/*
336 337
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
338
	 * before we continue - which happens after __cpu_up returns.
339
	 */
340
	set_cpu_online(cpu, true);
341
	complete(&cpu_running);
342 343 344 345 346 347 348 349 350

	/*
	 * Setup the percpu timer for this CPU.
	 */
	percpu_timer_setup();

	local_irq_enable();
	local_fiq_enable();

351 352 353 354 355 356
	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_idle();
}

L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369
void __init smp_cpus_done(unsigned int max_cpus)
{
	int cpu;
	unsigned long bogosum = 0;

	for_each_online_cpu(cpu)
		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n",
	       num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);
370 371

	hyp_mode_check();
L
Linus Torvalds 已提交
372 373 374 375
}

void __init smp_prepare_boot_cpu(void)
{
376
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
L
Linus Torvalds 已提交
377 378
}

379
void __init smp_prepare_cpus(unsigned int max_cpus)
L
Linus Torvalds 已提交
380
{
381
	unsigned int ncores = num_possible_cpus();
L
Linus Torvalds 已提交
382

383 384
	init_cpu_topology();

385
	smp_store_cpu_info(smp_processor_id());
L
Linus Torvalds 已提交
386 387

	/*
388
	 * are we trying to boot more cores than exist?
L
Linus Torvalds 已提交
389
	 */
390 391
	if (max_cpus > ncores)
		max_cpus = ncores;
392
	if (ncores > 1 && max_cpus) {
393 394 395 396 397
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();
L
Linus Torvalds 已提交
398

399 400 401 402 403 404
		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
		 * re-initialize the map in platform_smp_prepare_cpus() if
		 * present != possible (e.g. physical hotplug).
		 */
405
		init_cpu_present(cpu_possible_mask);
406

407 408 409 410 411 412
		/*
		 * Initialise the SCU if there are more than one CPU
		 * and let them know where to start.
		 */
		platform_smp_prepare_cpus(max_cpus);
	}
L
Linus Torvalds 已提交
413 414
}

415 416 417 418 419 420 421
static void (*smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
	smp_cross_call = fn;
}

422
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
L
Linus Torvalds 已提交
423
{
R
Russell King 已提交
424
	smp_cross_call(mask, IPI_CALL_FUNC);
L
Linus Torvalds 已提交
425 426
}

427 428 429 430 431
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_WAKEUP);
}

432
void arch_send_call_function_single_ipi(int cpu)
433
{
R
Russell King 已提交
434
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
435 436
}

437
static const char *ipi_types[NR_IPI] = {
438 439
#define S(x,s)	[x] = s
	S(IPI_WAKEUP, "CPU wakeup interrupts"),
440 441 442 443 444 445 446
	S(IPI_TIMER, "Timer broadcast interrupts"),
	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
	S(IPI_CALL_FUNC, "Function call interrupts"),
	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
	S(IPI_CPU_STOP, "CPU stop interrupts"),
};

447
void show_ipi_list(struct seq_file *p, int prec)
L
Linus Torvalds 已提交
448
{
449
	unsigned int cpu, i;
L
Linus Torvalds 已提交
450

451 452
	for (i = 0; i < NR_IPI; i++) {
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
L
Linus Torvalds 已提交
453

454
		for_each_online_cpu(cpu)
455 456
			seq_printf(p, "%10u ",
				   __get_irq_stat(cpu, ipi_irqs[i]));
L
Linus Torvalds 已提交
457

458 459
		seq_printf(p, " %s\n", ipi_types[i]);
	}
L
Linus Torvalds 已提交
460 461
}

462
u64 smp_irq_stat_cpu(unsigned int cpu)
463
{
464 465
	u64 sum = 0;
	int i;
466

467 468
	for (i = 0; i < NR_IPI; i++)
		sum += __get_irq_stat(cpu, ipi_irqs[i]);
469

470
	return sum;
471 472
}

473 474 475 476 477
/*
 * Timer (local or broadcast) support
 */
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);

R
Russell King 已提交
478
static void ipi_timer(void)
L
Linus Torvalds 已提交
479
{
480 481
	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
	evt->event_handler(evt);
L
Linus Torvalds 已提交
482 483
}

484 485 486
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast(const struct cpumask *mask)
{
R
Russell King 已提交
487
	smp_cross_call(mask, IPI_TIMER);
488
}
489 490 491
#else
#define smp_timer_broadcast	NULL
#endif
492 493 494 495 496 497

static void broadcast_timer_set_mode(enum clock_event_mode mode,
	struct clock_event_device *evt)
{
}

498
static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
499 500 501 502 503 504 505 506 507 508 509 510
{
	evt->name	= "dummy_timer";
	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
			  CLOCK_EVT_FEAT_PERIODIC |
			  CLOCK_EVT_FEAT_DUMMY;
	evt->rating	= 400;
	evt->mult	= 1;
	evt->set_mode	= broadcast_timer_set_mode;

	clockevents_register_device(evt);
}

511 512 513 514 515
static struct local_timer_ops *lt_ops;

#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
516 517 518
	if (!is_smp() || !setup_max_cpus)
		return -ENXIO;

519 520 521 522 523 524 525 526
	if (lt_ops)
		return -EBUSY;

	lt_ops = ops;
	return 0;
}
#endif

527
static void __cpuinit percpu_timer_setup(void)
528 529 530 531 532
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

	evt->cpumask = cpumask_of(cpu);
533
	evt->broadcast = smp_timer_broadcast;
534

535
	if (!lt_ops || lt_ops->setup(evt))
536
		broadcast_timer_setup(evt);
537 538
}

539 540 541 542 543 544 545 546 547 548 549
#ifdef CONFIG_HOTPLUG_CPU
/*
 * The generic clock events code purposely does not stop the local timer
 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
 * manually here.
 */
static void percpu_timer_stop(void)
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

550 551
	if (lt_ops)
		lt_ops->stop(evt);
552 553 554
}
#endif

555
static DEFINE_RAW_SPINLOCK(stop_lock);
L
Linus Torvalds 已提交
556 557 558 559 560 561

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
562 563
	if (system_state == SYSTEM_BOOTING ||
	    system_state == SYSTEM_RUNNING) {
564
		raw_spin_lock(&stop_lock);
565 566
		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
		dump_stack();
567
		raw_spin_unlock(&stop_lock);
568
	}
L
Linus Torvalds 已提交
569

570
	set_cpu_online(cpu, false);
L
Linus Torvalds 已提交
571 572 573 574 575 576 577 578 579 580 581

	local_fiq_disable();
	local_irq_disable();

	while (1)
		cpu_relax();
}

/*
 * Main handler for inter-processor interrupts
 */
R
Russell King 已提交
582
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
583 584 585 586 587
{
	handle_IPI(ipinr, regs);
}

void handle_IPI(int ipinr, struct pt_regs *regs)
L
Linus Torvalds 已提交
588 589
{
	unsigned int cpu = smp_processor_id();
R
Russell King 已提交
590
	struct pt_regs *old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
591

592 593
	if (ipinr < NR_IPI)
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
L
Linus Torvalds 已提交
594

595
	switch (ipinr) {
596 597 598
	case IPI_WAKEUP:
		break;

599
	case IPI_TIMER:
600
		irq_enter();
601
		ipi_timer();
602
		irq_exit();
603
		break;
L
Linus Torvalds 已提交
604

605
	case IPI_RESCHEDULE:
606
		scheduler_ipi();
607
		break;
L
Linus Torvalds 已提交
608

609
	case IPI_CALL_FUNC:
610
		irq_enter();
611
		generic_smp_call_function_interrupt();
612
		irq_exit();
613
		break;
614

615
	case IPI_CALL_FUNC_SINGLE:
616
		irq_enter();
617
		generic_smp_call_function_single_interrupt();
618
		irq_exit();
619
		break;
L
Linus Torvalds 已提交
620

621
	case IPI_CPU_STOP:
622
		irq_enter();
623
		ipi_cpu_stop(cpu);
624
		irq_exit();
625
		break;
L
Linus Torvalds 已提交
626

627 628 629 630
	default:
		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
		       cpu, ipinr);
		break;
L
Linus Torvalds 已提交
631
	}
R
Russell King 已提交
632
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
633 634 635 636
}

void smp_send_reschedule(int cpu)
{
R
Russell King 已提交
637
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
L
Linus Torvalds 已提交
638 639
}

640 641 642 643 644 645 646 647 648 649 650
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
	unsigned int cpu;
	for_each_cpu(cpu, mask)
		platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif

L
Linus Torvalds 已提交
651 652
void smp_send_stop(void)
{
653
	unsigned long timeout;
654
	struct cpumask mask;
L
Linus Torvalds 已提交
655

656 657
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
658 659
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
660

661 662 663 664
	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
	while (num_online_cpus() > 1 && timeout--)
		udelay(1);
665

666 667
	if (num_online_cpus() > 1)
		pr_warning("SMP: failed to stop secondary CPUs\n");
668 669

	smp_kill_cpus(&mask);
670 671 672
}

/*
L
Linus Torvalds 已提交
673
 * not supported here
674
 */
675
int setup_profiling_timer(unsigned int multiplier)
676
{
L
Linus Torvalds 已提交
677
	return -EINVAL;
678
}
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731

#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
					unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;
	int cpu = freq->cpu;

	if (freq->flags & CPUFREQ_CONST_LOOPS)
		return NOTIFY_OK;

	if (!per_cpu(l_p_j_ref, cpu)) {
		per_cpu(l_p_j_ref, cpu) =
			per_cpu(cpu_data, cpu).loops_per_jiffy;
		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
		if (!global_l_p_j_ref) {
			global_l_p_j_ref = loops_per_jiffy;
			global_l_p_j_ref_freq = freq->old;
		}
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
						global_l_p_j_ref_freq,
						freq->new);
		per_cpu(cpu_data, cpu).loops_per_jiffy =
			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
					per_cpu(l_p_j_ref_freq, cpu),
					freq->new);
	}
	return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
	.notifier_call  = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
	return cpufreq_register_notifier(&cpufreq_notifier,
						CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif