cpuidle.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * cpuidle.c - core cpuidle infrastructure
 *
 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *               Shaohua Li <shaohua.li@intel.com>
 *               Adam Belay <abelay@novell.com>
 *
 * This code is licenced under the GPL.
 */

#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
15
#include <linux/pm_qos.h>
16 17
#include <linux/cpu.h>
#include <linux/cpuidle.h>
18
#include <linux/ktime.h>
19
#include <linux/hrtimer.h>
20
#include <linux/module.h>
21
#include <trace/events/power.h>
22 23 24 25 26 27 28 29 30

#include "cpuidle.h"

DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);

DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);

static int enabled_devices;
31
static int off __read_mostly;
L
Len Brown 已提交
32
static int initialized __read_mostly;
33 34 35 36 37

int cpuidle_disabled(void)
{
	return off;
}
38 39 40 41
void disable_cpuidle(void)
{
	off = 1;
}
42

V
Venki Pallipadi 已提交
43 44 45 46 47 48 49 50 51 52 53
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
{
	cpu_idle_wait();
}
#elif defined(CONFIG_SMP)
# error "Arch needs cpu_idle_wait() equivalent here"
#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
static void cpuidle_kick_cpus(void) {}
#endif

54 55
static int __cpuidle_register_device(struct cpuidle_device *dev);

56 57 58 59
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
L
Len Brown 已提交
60
 * return non-zero on failure
61
 */
L
Len Brown 已提交
62
int cpuidle_idle_call(void)
63
{
64
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
65
	struct cpuidle_driver *drv = cpuidle_get_driver();
66
	struct cpuidle_state *target_state;
67
	int next_state, entered_state;
68

L
Len Brown 已提交
69 70 71 72 73 74
	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

75
	/* check if the device is ready */
L
Len Brown 已提交
76 77
	if (!dev || !dev->enabled)
		return -EBUSY;
78

79 80
#if 0
	/* shows regressions, re-enable for 2.6.29 */
81 82 83 84 85
	/*
	 * run any timers that can be run now, at this point
	 * before calculating the idle duration etc.
	 */
	hrtimer_peek_ahead_timers();
86
#endif
87

88
	/* ask the governor for the next state */
89
	next_state = cpuidle_curr_governor->select(drv, dev);
90 91
	if (need_resched()) {
		local_irq_enable();
L
Len Brown 已提交
92
		return 0;
93 94
	}

95
	target_state = &drv->states[next_state];
96 97 98 99

	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
	trace_cpu_idle(next_state, dev->cpu);

100
	entered_state = target_state->enter(dev, drv, next_state);
101 102 103 104

	trace_power_end(dev->cpu);
	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);

105 106 107 108 109
	if (entered_state >= 0) {
		/* Update cpuidle counters */
		/* This can be moved to within driver enter routine
		 * but that results in multiple copies of same code.
		 */
110
		dev->states_usage[entered_state].time +=
111
				(unsigned long long)dev->last_residency;
112
		dev->states_usage[entered_state].usage++;
113
	}
114 115 116

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
117
		cpuidle_curr_governor->reflect(dev, entered_state);
L
Len Brown 已提交
118 119

	return 0;
120 121 122 123 124 125 126
}

/**
 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
 */
void cpuidle_install_idle_handler(void)
{
L
Len Brown 已提交
127
	if (enabled_devices) {
128 129
		/* Make sure all changes finished before we switch to new idle */
		smp_wmb();
L
Len Brown 已提交
130
		initialized = 1;
131 132 133 134 135 136 137 138
	}
}

/**
 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 */
void cpuidle_uninstall_idle_handler(void)
{
L
Len Brown 已提交
139 140
	if (enabled_devices) {
		initialized = 0;
V
Venki Pallipadi 已提交
141
		cpuidle_kick_cpus();
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	}
}

/**
 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 */
void cpuidle_pause_and_lock(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_uninstall_idle_handler();
}

EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);

/**
 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 */
void cpuidle_resume_and_unlock(void)
{
	cpuidle_install_idle_handler();
	mutex_unlock(&cpuidle_lock);
}

EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);

167
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
168 169
static int poll_idle(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
170 171 172 173 174 175 176 177 178 179 180 181 182 183
{
	ktime_t	t1, t2;
	s64 diff;

	t1 = ktime_get();
	local_irq_enable();
	while (!need_resched())
		cpu_relax();

	t2 = ktime_get();
	diff = ktime_to_us(ktime_sub(t2, t1));
	if (diff > INT_MAX)
		diff = INT_MAX;

184 185 186
	dev->last_residency = (int) diff;

	return index;
187 188
}

189
static void poll_idle_init(struct cpuidle_driver *drv)
190
{
191
	struct cpuidle_state *state = &drv->states[0];
192

193
	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
194 195 196 197
	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
	state->exit_latency = 0;
	state->target_residency = 0;
	state->power_usage = -1;
198
	state->flags = 0;
199 200 201
	state->enter = poll_idle;
}
#else
202
static void poll_idle_init(struct cpuidle_driver *drv) {}
203 204
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */

205 206 207 208 209 210 211 212 213 214 215 216 217
/**
 * cpuidle_enable_device - enables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
int cpuidle_enable_device(struct cpuidle_device *dev)
{
	int ret, i;

	if (dev->enabled)
		return 0;
218
	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
219 220 221 222
		return -EIO;
	if (!dev->state_count)
		return -EINVAL;

223 224 225 226 227 228
	if (dev->registered == 0) {
		ret = __cpuidle_register_device(dev);
		if (ret)
			return ret;
	}

229
	poll_idle_init(cpuidle_get_driver());
230

231 232 233 234
	if ((ret = cpuidle_add_state_sysfs(dev)))
		return ret;

	if (cpuidle_curr_governor->enable &&
235
	    (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
236 237 238
		goto fail_sysfs;

	for (i = 0; i < dev->state_count; i++) {
239 240
		dev->states_usage[i].usage = 0;
		dev->states_usage[i].time = 0;
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	}
	dev->last_residency = 0;

	smp_wmb();

	dev->enabled = 1;

	enabled_devices++;
	return 0;

fail_sysfs:
	cpuidle_remove_state_sysfs(dev);

	return ret;
}

EXPORT_SYMBOL_GPL(cpuidle_enable_device);

/**
 * cpuidle_disable_device - disables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
void cpuidle_disable_device(struct cpuidle_device *dev)
{
	if (!dev->enabled)
		return;
270
	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
271 272 273 274 275
		return;

	dev->enabled = 0;

	if (cpuidle_curr_governor->disable)
276
		cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
277 278 279 280 281 282 283 284

	cpuidle_remove_state_sysfs(dev);
	enabled_devices--;
}

EXPORT_SYMBOL_GPL(cpuidle_disable_device);

/**
285 286
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
287
 * @dev: the cpu
288 289
 *
 * cpuidle_lock mutex must be held before this is called
290
 */
291
static int __cpuidle_register_device(struct cpuidle_device *dev)
292 293 294
{
	int ret;
	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
295
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
296 297 298

	if (!sys_dev)
		return -EINVAL;
299
	if (!try_module_get(cpuidle_driver->owner))
300 301 302 303 304 305 306
		return -EINVAL;

	init_completion(&dev->kobj_unregister);

	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);
	if ((ret = cpuidle_add_sysfs(sys_dev))) {
307
		module_put(cpuidle_driver->owner);
308 309 310
		return ret;
	}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	dev->registered = 1;
	return 0;
}

/**
 * cpuidle_register_device - registers a CPU's idle PM feature
 * @dev: the cpu
 */
int cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret;

	mutex_lock(&cpuidle_lock);

	if ((ret = __cpuidle_register_device(dev))) {
		mutex_unlock(&cpuidle_lock);
		return ret;
	}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	cpuidle_enable_device(dev);
	cpuidle_install_idle_handler();

	mutex_unlock(&cpuidle_lock);

	return 0;

}

EXPORT_SYMBOL_GPL(cpuidle_register_device);

/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
348
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
349

350 351 352
	if (dev->registered == 0)
		return;

353 354 355 356 357 358 359 360 361 362 363
	cpuidle_pause_and_lock();

	cpuidle_disable_device(dev);

	cpuidle_remove_sysfs(sys_dev);
	list_del(&dev->device_list);
	wait_for_completion(&dev->kobj_unregister);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;

	cpuidle_resume_and_unlock();

364
	module_put(cpuidle_driver->owner);
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
}

EXPORT_SYMBOL_GPL(cpuidle_unregister_device);

#ifdef CONFIG_SMP

static void smp_callback(void *v)
{
	/* we already woke the CPU up, nothing more to do */
}

/*
 * This function gets called when a part of the kernel has a new latency
 * requirement.  This means we need to get all processors out of their C-state,
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 * wakes them all right up.
 */
static int cpuidle_latency_notify(struct notifier_block *b,
		unsigned long l, void *v)
{
385
	smp_call_function(smp_callback, NULL, 1);
386 387 388 389 390 391 392
	return NOTIFY_OK;
}

static struct notifier_block cpuidle_latency_notifier = {
	.notifier_call = cpuidle_latency_notify,
};

M
Mark Gross 已提交
393 394 395 396
static inline void latency_notifier_init(struct notifier_block *n)
{
	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
}
397 398 399 400 401 402 403 404 405 406 407 408 409 410

#else /* CONFIG_SMP */

#define latency_notifier_init(x) do { } while (0)

#endif /* CONFIG_SMP */

/**
 * cpuidle_init - core initializer
 */
static int __init cpuidle_init(void)
{
	int ret;

411 412 413
	if (cpuidle_disabled())
		return -ENODEV;

414 415 416 417 418 419 420 421 422
	ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
	if (ret)
		return ret;

	latency_notifier_init(&cpuidle_latency_notifier);

	return 0;
}

423
module_param(off, int, 0444);
424
core_initcall(cpuidle_init);