smp.c 15.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
R
Russell King 已提交
10
#include <linux/module.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
L
Linus Torvalds 已提交
21 22
#include <linux/cpu.h>
#include <linux/seq_file.h>
R
Russell King 已提交
23
#include <linux/irq.h>
24 25
#include <linux/percpu.h>
#include <linux/clockchips.h>
26
#include <linux/completion.h>
27
#include <linux/cpufreq.h>
L
Linus Torvalds 已提交
28

A
Arun Sharma 已提交
29
#include <linux/atomic.h>
30
#include <asm/smp.h>
L
Linus Torvalds 已提交
31 32
#include <asm/cacheflush.h>
#include <asm/cpu.h>
33
#include <asm/cputype.h>
34
#include <asm/exception.h>
35
#include <asm/idmap.h>
36
#include <asm/topology.h>
37 38 39
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
40
#include <asm/processor.h>
41
#include <asm/sections.h>
L
Linus Torvalds 已提交
42 43
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
44
#include <asm/localtimer.h>
45
#include <asm/smp_plat.h>
46
#include <asm/virt.h>
47
#include <asm/mach/arch.h>
L
Linus Torvalds 已提交
48

49 50 51 52 53 54 55
/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

56 57 58 59 60 61
/*
 * control for which core is the next to come out of the secondary
 * boot "holding pen"
 */
volatile int __cpuinitdata pen_release = -1;

L
Linus Torvalds 已提交
62
enum ipi_msg_type {
63 64
	IPI_WAKEUP,
	IPI_TIMER,
L
Linus Torvalds 已提交
65 66
	IPI_RESCHEDULE,
	IPI_CALL_FUNC,
67
	IPI_CALL_FUNC_SINGLE,
L
Linus Torvalds 已提交
68 69 70
	IPI_CPU_STOP,
};

71 72
static DECLARE_COMPLETION(cpu_running);

73 74 75 76 77 78 79 80
static struct smp_operations smp_ops;

void __init smp_set_ops(struct smp_operations *ops)
{
	if (ops)
		smp_ops = *ops;
};

81
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
L
Linus Torvalds 已提交
82 83 84
{
	int ret;

85 86 87 88
	/*
	 * We need to tell the secondary core where to find
	 * its stack and the page tables.
	 */
A
Al Viro 已提交
89
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
90
	secondary_data.pgdir = virt_to_phys(idmap_pgd);
91
	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
92 93
	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
94

L
Linus Torvalds 已提交
95 96 97 98
	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
99 100 101 102 103
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it
		 * to come online or time out.
		 */
104 105
		wait_for_completion_timeout(&cpu_running,
						 msecs_to_jiffies(1000));
106

107 108
		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
109
			ret = -EIO;
110 111 112
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
113 114
	}

115
	secondary_data.stack = NULL;
116 117
	secondary_data.pgdir = 0;

L
Linus Torvalds 已提交
118 119 120
	return ret;
}

121
/* platform specific SMP operations */
122
void __init smp_init_cpus(void)
123 124 125 126 127
{
	if (smp_ops.smp_init_cpus)
		smp_ops.smp_init_cpus();
}

128
static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129 130 131 132 133
{
	if (smp_ops.smp_prepare_cpus)
		smp_ops.smp_prepare_cpus(max_cpus);
}

134
static void __cpuinit platform_secondary_init(unsigned int cpu)
135 136 137 138 139
{
	if (smp_ops.smp_secondary_init)
		smp_ops.smp_secondary_init(cpu);
}

140
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141 142 143 144 145 146
{
	if (smp_ops.smp_boot_secondary)
		return smp_ops.smp_boot_secondary(cpu, idle);
	return -ENOSYS;
}

147
#ifdef CONFIG_HOTPLUG_CPU
148 149
static void percpu_timer_stop(void);

150
static int platform_cpu_kill(unsigned int cpu)
151 152 153 154 155 156
{
	if (smp_ops.cpu_kill)
		return smp_ops.cpu_kill(cpu);
	return 1;
}

157
static void platform_cpu_die(unsigned int cpu)
158 159 160 161 162
{
	if (smp_ops.cpu_die)
		smp_ops.cpu_die(cpu);
}

163
static int platform_cpu_disable(unsigned int cpu)
164 165 166 167 168 169 170 171 172 173 174
{
	if (smp_ops.cpu_disable)
		return smp_ops.cpu_disable(cpu);

	/*
	 * By default, allow disabling all CPUs except the first one,
	 * since this is special on a lot of platforms, e.g. because
	 * of clock tick interrupts.
	 */
	return cpu == 0 ? -EPERM : 0;
}
175 176 177
/*
 * __cpu_disable runs on the processor to be shutdown.
 */
178
int __cpuinit __cpu_disable(void)
179 180 181 182
{
	unsigned int cpu = smp_processor_id();
	int ret;

183
	ret = platform_cpu_disable(cpu);
184 185 186 187 188 189 190
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
191
	set_cpu_online(cpu, false);
192 193 194 195 196 197

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

198 199 200
	/*
	 * Stop the local timer for this CPU.
	 */
201
	percpu_timer_stop();
202

203 204 205
	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
206 207 208
	 *
	 * Caches are flushed to the Level of Unification Inner Shareable
	 * to write-back dirty lines to unified caches shared by all CPUs.
209
	 */
210
	flush_cache_louis();
211 212
	local_flush_tlb_all();

213
	clear_tasks_mm_cpumask(cpu);
214 215 216 217

	return 0;
}

218 219
static DECLARE_COMPLETION(cpu_died);

220 221 222 223
/*
 * called on the thread which is asking for a CPU to be shutdown -
 * waits until shutdown has completed, or it is timed out.
 */
224
void __cpuinit __cpu_die(unsigned int cpu)
225
{
226 227 228 229 230 231
	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
		pr_err("CPU%u: cpu didn't die\n", cpu);
		return;
	}
	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);

232 233 234 235 236 237 238 239 240 241 242 243
	if (!platform_cpu_kill(cpu))
		printk("CPU%u: unable to kill\n", cpu);
}

/*
 * Called from the idle thread for the CPU which has been shutdown.
 *
 * Note that we disable IRQs here, but do not re-enable them
 * before returning to the caller. This is also the behaviour
 * of the other hotplug-cpu capable cores, so presumably coming
 * out of idle fixes this.
 */
244
void __ref cpu_die(void)
245 246 247 248 249
{
	unsigned int cpu = smp_processor_id();

	idle_task_exit();

250 251 252
	local_irq_disable();
	mb();

253
	/* Tell __cpu_die() that this CPU is now safe to dispose of */
254
	RCU_NONIDLE(complete(&cpu_died));
255

256 257
	/*
	 * actual CPU shutdown procedure is at least platform (if not
258
	 * CPU) specific.
259 260 261 262 263 264 265 266 267
	 */
	platform_cpu_die(cpu);

	/*
	 * Do not return to the idle loop - jump back to the secondary
	 * cpu initialisation.  There's some initialisation which needs
	 * to be repeated to undo the effects of taking the CPU offline.
	 */
	__asm__("mov	sp, %0\n"
268
	"	mov	fp, #0\n"
269 270
	"	b	secondary_start_kernel"
		:
A
Al Viro 已提交
271
		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
272 273 274
}
#endif /* CONFIG_HOTPLUG_CPU */

275 276 277 278 279 280 281 282 283
/*
 * Called by both boot and secondaries to move global data into
 * per-processor storage.
 */
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
{
	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);

	cpu_info->loops_per_jiffy = loops_per_jiffy;
284 285

	store_cpu_topology(cpuid);
286 287
}

288 289
static void percpu_timer_setup(void);

290 291 292 293
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
294
asmlinkage void __cpuinit secondary_start_kernel(void)
295 296
{
	struct mm_struct *mm = &init_mm;
297 298 299 300 301 302 303 304 305
	unsigned int cpu;

	/*
	 * The identity mapping is uncached (strongly ordered), so
	 * switch away from it before attempting any exclusive accesses.
	 */
	cpu_switch_mm(mm->pgd, mm);
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
306 307 308 309 310

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
311
	cpu = smp_processor_id();
312 313
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
314
	cpumask_set_cpu(cpu, mm_cpumask(mm));
315

316 317
	printk("CPU%u: Booted secondary processor\n", cpu);

318
	cpu_init();
319
	preempt_disable();
320
	trace_hardirqs_off();
321 322 323 324 325 326

	/*
	 * Give the platform a chance to do its own initialisation.
	 */
	platform_secondary_init(cpu);

327
	notify_cpu_starting(cpu);
328

329 330 331 332 333
	calibrate_delay();

	smp_store_cpu_info(cpu);

	/*
334 335
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
336
	 * before we continue - which happens after __cpu_up returns.
337
	 */
338
	set_cpu_online(cpu, true);
339
	complete(&cpu_running);
340 341 342 343 344 345 346 347 348

	/*
	 * Setup the percpu timer for this CPU.
	 */
	percpu_timer_setup();

	local_irq_enable();
	local_fiq_enable();

349 350 351 352 353 354
	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_idle();
}

L
Linus Torvalds 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367
void __init smp_cpus_done(unsigned int max_cpus)
{
	int cpu;
	unsigned long bogosum = 0;

	for_each_online_cpu(cpu)
		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n",
	       num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);
368 369

	hyp_mode_check();
L
Linus Torvalds 已提交
370 371 372 373 374 375
}

void __init smp_prepare_boot_cpu(void)
{
}

376
void __init smp_prepare_cpus(unsigned int max_cpus)
L
Linus Torvalds 已提交
377
{
378
	unsigned int ncores = num_possible_cpus();
L
Linus Torvalds 已提交
379

380 381
	init_cpu_topology();

382
	smp_store_cpu_info(smp_processor_id());
L
Linus Torvalds 已提交
383 384

	/*
385
	 * are we trying to boot more cores than exist?
L
Linus Torvalds 已提交
386
	 */
387 388
	if (max_cpus > ncores)
		max_cpus = ncores;
389
	if (ncores > 1 && max_cpus) {
390 391 392 393 394
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();
L
Linus Torvalds 已提交
395

396 397 398 399 400 401
		/*
		 * Initialise the present map, which describes the set of CPUs
		 * actually populated at the present time. A platform should
		 * re-initialize the map in platform_smp_prepare_cpus() if
		 * present != possible (e.g. physical hotplug).
		 */
402
		init_cpu_present(cpu_possible_mask);
403

404 405 406 407 408 409
		/*
		 * Initialise the SCU if there are more than one CPU
		 * and let them know where to start.
		 */
		platform_smp_prepare_cpus(max_cpus);
	}
L
Linus Torvalds 已提交
410 411
}

412 413 414 415 416 417 418
static void (*smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
	smp_cross_call = fn;
}

419
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
L
Linus Torvalds 已提交
420
{
R
Russell King 已提交
421
	smp_cross_call(mask, IPI_CALL_FUNC);
L
Linus Torvalds 已提交
422 423
}

424
void arch_send_call_function_single_ipi(int cpu)
425
{
R
Russell King 已提交
426
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
427 428
}

429
static const char *ipi_types[NR_IPI] = {
430 431
#define S(x,s)	[x] = s
	S(IPI_WAKEUP, "CPU wakeup interrupts"),
432 433 434 435 436 437 438
	S(IPI_TIMER, "Timer broadcast interrupts"),
	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
	S(IPI_CALL_FUNC, "Function call interrupts"),
	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
	S(IPI_CPU_STOP, "CPU stop interrupts"),
};

439
void show_ipi_list(struct seq_file *p, int prec)
L
Linus Torvalds 已提交
440
{
441
	unsigned int cpu, i;
L
Linus Torvalds 已提交
442

443 444
	for (i = 0; i < NR_IPI; i++) {
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
L
Linus Torvalds 已提交
445

446 447 448
		for_each_present_cpu(cpu)
			seq_printf(p, "%10u ",
				   __get_irq_stat(cpu, ipi_irqs[i]));
L
Linus Torvalds 已提交
449

450 451
		seq_printf(p, " %s\n", ipi_types[i]);
	}
L
Linus Torvalds 已提交
452 453
}

454
u64 smp_irq_stat_cpu(unsigned int cpu)
455
{
456 457
	u64 sum = 0;
	int i;
458

459 460
	for (i = 0; i < NR_IPI; i++)
		sum += __get_irq_stat(cpu, ipi_irqs[i]);
461

462
	return sum;
463 464
}

465 466 467 468 469
/*
 * Timer (local or broadcast) support
 */
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);

R
Russell King 已提交
470
static void ipi_timer(void)
L
Linus Torvalds 已提交
471
{
472 473
	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
	evt->event_handler(evt);
L
Linus Torvalds 已提交
474 475
}

476 477 478
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast(const struct cpumask *mask)
{
R
Russell King 已提交
479
	smp_cross_call(mask, IPI_TIMER);
480
}
481 482 483
#else
#define smp_timer_broadcast	NULL
#endif
484 485 486 487 488 489

static void broadcast_timer_set_mode(enum clock_event_mode mode,
	struct clock_event_device *evt)
{
}

490
static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
491 492 493 494 495 496 497 498 499 500 501 502
{
	evt->name	= "dummy_timer";
	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
			  CLOCK_EVT_FEAT_PERIODIC |
			  CLOCK_EVT_FEAT_DUMMY;
	evt->rating	= 400;
	evt->mult	= 1;
	evt->set_mode	= broadcast_timer_set_mode;

	clockevents_register_device(evt);
}

503 504 505 506 507
static struct local_timer_ops *lt_ops;

#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
508 509 510
	if (!is_smp() || !setup_max_cpus)
		return -ENXIO;

511 512 513 514 515 516 517 518
	if (lt_ops)
		return -EBUSY;

	lt_ops = ops;
	return 0;
}
#endif

519
static void __cpuinit percpu_timer_setup(void)
520 521 522 523 524
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

	evt->cpumask = cpumask_of(cpu);
525
	evt->broadcast = smp_timer_broadcast;
526

527
	if (!lt_ops || lt_ops->setup(evt))
528
		broadcast_timer_setup(evt);
529 530
}

531 532 533 534 535 536 537 538 539 540 541
#ifdef CONFIG_HOTPLUG_CPU
/*
 * The generic clock events code purposely does not stop the local timer
 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
 * manually here.
 */
static void percpu_timer_stop(void)
{
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

542 543
	if (lt_ops)
		lt_ops->stop(evt);
544 545 546
}
#endif

547
static DEFINE_RAW_SPINLOCK(stop_lock);
L
Linus Torvalds 已提交
548 549 550 551 552 553

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
554 555
	if (system_state == SYSTEM_BOOTING ||
	    system_state == SYSTEM_RUNNING) {
556
		raw_spin_lock(&stop_lock);
557 558
		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
		dump_stack();
559
		raw_spin_unlock(&stop_lock);
560
	}
L
Linus Torvalds 已提交
561

562
	set_cpu_online(cpu, false);
L
Linus Torvalds 已提交
563 564 565 566 567 568 569 570 571 572 573

	local_fiq_disable();
	local_irq_disable();

	while (1)
		cpu_relax();
}

/*
 * Main handler for inter-processor interrupts
 */
R
Russell King 已提交
574
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
575 576 577 578 579
{
	handle_IPI(ipinr, regs);
}

void handle_IPI(int ipinr, struct pt_regs *regs)
L
Linus Torvalds 已提交
580 581
{
	unsigned int cpu = smp_processor_id();
R
Russell King 已提交
582
	struct pt_regs *old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
583

584 585
	if (ipinr < NR_IPI)
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
L
Linus Torvalds 已提交
586

587
	switch (ipinr) {
588 589 590
	case IPI_WAKEUP:
		break;

591
	case IPI_TIMER:
592
		irq_enter();
593
		ipi_timer();
594
		irq_exit();
595
		break;
L
Linus Torvalds 已提交
596

597
	case IPI_RESCHEDULE:
598
		scheduler_ipi();
599
		break;
L
Linus Torvalds 已提交
600

601
	case IPI_CALL_FUNC:
602
		irq_enter();
603
		generic_smp_call_function_interrupt();
604
		irq_exit();
605
		break;
606

607
	case IPI_CALL_FUNC_SINGLE:
608
		irq_enter();
609
		generic_smp_call_function_single_interrupt();
610
		irq_exit();
611
		break;
L
Linus Torvalds 已提交
612

613
	case IPI_CPU_STOP:
614
		irq_enter();
615
		ipi_cpu_stop(cpu);
616
		irq_exit();
617
		break;
L
Linus Torvalds 已提交
618

619 620 621 622
	default:
		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
		       cpu, ipinr);
		break;
L
Linus Torvalds 已提交
623
	}
R
Russell King 已提交
624
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
625 626 627 628
}

void smp_send_reschedule(int cpu)
{
R
Russell King 已提交
629
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
L
Linus Torvalds 已提交
630 631
}

632 633 634 635 636 637 638 639 640 641 642
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
	unsigned int cpu;
	for_each_cpu(cpu, mask)
		platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif

L
Linus Torvalds 已提交
643 644
void smp_send_stop(void)
{
645
	unsigned long timeout;
646
	struct cpumask mask;
L
Linus Torvalds 已提交
647

648 649
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
650 651
	if (!cpumask_empty(&mask))
		smp_cross_call(&mask, IPI_CPU_STOP);
652

653 654 655 656
	/* Wait up to one second for other CPUs to stop */
	timeout = USEC_PER_SEC;
	while (num_online_cpus() > 1 && timeout--)
		udelay(1);
657

658 659
	if (num_online_cpus() > 1)
		pr_warning("SMP: failed to stop secondary CPUs\n");
660 661

	smp_kill_cpus(&mask);
662 663 664
}

/*
L
Linus Torvalds 已提交
665
 * not supported here
666
 */
667
int setup_profiling_timer(unsigned int multiplier)
668
{
L
Linus Torvalds 已提交
669
	return -EINVAL;
670
}
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723

#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
					unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;
	int cpu = freq->cpu;

	if (freq->flags & CPUFREQ_CONST_LOOPS)
		return NOTIFY_OK;

	if (!per_cpu(l_p_j_ref, cpu)) {
		per_cpu(l_p_j_ref, cpu) =
			per_cpu(cpu_data, cpu).loops_per_jiffy;
		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
		if (!global_l_p_j_ref) {
			global_l_p_j_ref = loops_per_jiffy;
			global_l_p_j_ref_freq = freq->old;
		}
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
						global_l_p_j_ref_freq,
						freq->new);
		per_cpu(cpu_data, cpu).loops_per_jiffy =
			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
					per_cpu(l_p_j_ref_freq, cpu),
					freq->new);
	}
	return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
	.notifier_call  = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
	return cpufreq_register_notifier(&cpufreq_notifier,
						CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif