idle.c 3.2 KB
Newer Older
1 2 3
/*
 * The idle loop for all SuperH platforms.
 *
4
 *  Copyright (C) 2002 - 2009  Paul Mundt
5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/preempt.h>
#include <linux/thread_info.h>
#include <linux/irqflags.h>
18
#include <linux/smp.h>
19 20 21 22
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/atomic.h>

23
void (*pm_idle)(void) = NULL;
24 25

static int hlt_counter;
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

static int __init nohlt_setup(char *__unused)
{
	hlt_counter = 1;
	return 1;
}
__setup("nohlt", nohlt_setup);

static int __init hlt_setup(char *__unused)
{
	hlt_counter = 0;
	return 1;
}
__setup("hlt", hlt_setup);

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
static inline int hlt_works(void)
{
	return !hlt_counter;
}

/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static void poll_idle(void)
{
	local_irq_enable();
	while (!need_resched())
		cpu_relax();
}

58
void default_idle(void)
59
{
60
	if (hlt_works()) {
61 62 63
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb__after_clear_bit();

64
		set_bl_bit();
65 66
		if (!need_resched()) {
			local_irq_enable();
67
			cpu_sleep();
68 69
		} else
			local_irq_enable();
70 71

		set_thread_flag(TIF_POLLING_NRFLAG);
72
		clear_bl_bit();
73
	} else
74
		poll_idle();
75 76
}

77 78 79 80 81
/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
82 83
void cpu_idle(void)
{
84 85
	unsigned int cpu = smp_processor_id();

86 87 88 89
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
90
		tick_nohz_stop_sched_tick(1);
91

92
		while (!need_resched() && cpu_online(cpu)) {
93 94 95
			check_pgt_cache();
			rmb();

96 97 98 99 100 101 102 103 104 105 106
			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			/*
			 * Sanity check to ensure that pm_idle() returns
			 * with IRQs enabled
			 */
			WARN_ON(irqs_disabled());
			start_critical_timings();
		}
107 108 109 110 111 112 113

		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
114

115
void __init select_idle_routine(void)
116 117 118 119 120 121 122 123 124 125 126 127 128
{
	/*
	 * If a platform has set its own idle routine, leave it alone.
	 */
	if (pm_idle)
		return;

	if (hlt_works())
		pm_idle = default_idle;
	else
		pm_idle = poll_idle;
}

129 130 131 132
static void do_nothing(void *unused)
{
}

133 134 135
void stop_this_cpu(void *unused)
{
	local_irq_disable();
136
	set_cpu_online(smp_processor_id(), false);
137 138 139 140 141

	for (;;)
		cpu_sleep();
}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/*
 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
 * pm_idle and update to new pm_idle value. Required while changing pm_idle
 * handler on SMP systems.
 *
 * Caller must have changed pm_idle to the new value before the call. Old
 * pm_idle value will not be used by any CPU after the return of this function.
 */
void cpu_idle_wait(void)
{
	smp_mb();
	/* kick all the CPUs so that they exit out of pm_idle */
	smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);