cpuidle.c 11.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * cpuidle.c - core cpuidle infrastructure
 *
 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *               Shaohua Li <shaohua.li@intel.com>
 *               Adam Belay <abelay@novell.com>
 *
 * This code is licenced under the GPL.
 */

11
#include <linux/clockchips.h>
12 13 14 15
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
16
#include <linux/pm_qos.h>
17 18
#include <linux/cpu.h>
#include <linux/cpuidle.h>
19
#include <linux/ktime.h>
20
#include <linux/hrtimer.h>
21
#include <linux/module.h>
22
#include <trace/events/power.h>
23 24 25 26

#include "cpuidle.h"

DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
27
DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
28 29 30 31 32

DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);

static int enabled_devices;
33
static int off __read_mostly;
L
Len Brown 已提交
34
static int initialized __read_mostly;
35 36 37 38 39

int cpuidle_disabled(void)
{
	return off;
}
40 41 42 43
void disable_cpuidle(void)
{
	off = 1;
}
44

45 46 47
/**
 * cpuidle_play_dead - cpu off-lining
 *
48
 * Returns in case of an error or no driver
49 50 51 52
 */
int cpuidle_play_dead(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
D
Daniel Lezcano 已提交
53
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
54
	int i;
55

56 57 58
	if (!drv)
		return -ENODEV;

59
	/* Find lowest-power state that supports long-term idle */
60 61 62
	for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
		if (drv->states[i].enter_dead)
			return drv->states[i].enter_dead(dev, i);
63 64 65 66

	return -ENODEV;
}

67 68 69 70 71 72 73
/**
 * cpuidle_enter_state - enter the state and update stats
 * @dev: cpuidle device for this cpu
 * @drv: cpuidle driver for this cpu
 * @next_state: index into drv->states of the state to enter
 */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
74
			int index)
75 76 77
{
	int entered_state;

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	struct cpuidle_state *target_state = &drv->states[index];
	ktime_t time_start, time_end;
	s64 diff;

	time_start = ktime_get();

	entered_state = target_state->enter(dev, drv, index);

	time_end = ktime_get();

	local_irq_enable();

	diff = ktime_to_us(ktime_sub(time_end, time_start));
	if (diff > INT_MAX)
		diff = INT_MAX;

	dev->last_residency = (int) diff;
95 96 97 98 99 100

	if (entered_state >= 0) {
		/* Update cpuidle counters */
		/* This can be moved to within driver enter routine
		 * but that results in multiple copies of same code.
		 */
101
		dev->states_usage[entered_state].time += dev->last_residency;
102 103 104 105 106 107 108 109
		dev->states_usage[entered_state].usage++;
	} else {
		dev->last_residency = 0;
	}

	return entered_state;
}

110 111 112 113
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
L
Len Brown 已提交
114
 * return non-zero on failure
115
 */
L
Len Brown 已提交
116
int cpuidle_idle_call(void)
117
{
118
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
D
Daniel Lezcano 已提交
119
	struct cpuidle_driver *drv;
120
	int next_state, entered_state;
121
	bool broadcast;
122

123
	if (off || !initialized)
L
Len Brown 已提交
124 125
		return -ENODEV;

126
	/* check if the device is ready */
L
Len Brown 已提交
127 128
	if (!dev || !dev->enabled)
		return -EBUSY;
129

D
Daniel Lezcano 已提交
130 131
	drv = cpuidle_get_cpu_driver(dev);

132
	/* ask the governor for the next state */
133
	next_state = cpuidle_curr_governor->select(drv, dev);
134
	if (need_resched()) {
135 136 137 138
		dev->last_residency = 0;
		/* give the governor an opportunity to reflect on the outcome */
		if (cpuidle_curr_governor->reflect)
			cpuidle_curr_governor->reflect(dev, next_state);
139
		local_irq_enable();
L
Len Brown 已提交
140
		return 0;
141 142
	}

143
	trace_cpu_idle_rcuidle(next_state, dev->cpu);
144

145 146 147
	broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);

	if (broadcast)
148
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
149

150 151 152 153 154
	if (cpuidle_state_is_coupled(dev, drv, next_state))
		entered_state = cpuidle_enter_state_coupled(dev, drv,
							    next_state);
	else
		entered_state = cpuidle_enter_state(dev, drv, next_state);
155

156
	if (broadcast)
157
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
158

159
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
160

161 162
	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
163
		cpuidle_curr_governor->reflect(dev, entered_state);
L
Len Brown 已提交
164 165

	return 0;
166 167 168 169 170 171 172
}

/**
 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
 */
void cpuidle_install_idle_handler(void)
{
L
Len Brown 已提交
173
	if (enabled_devices) {
174 175
		/* Make sure all changes finished before we switch to new idle */
		smp_wmb();
L
Len Brown 已提交
176
		initialized = 1;
177 178 179 180 181 182 183 184
	}
}

/**
 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 */
void cpuidle_uninstall_idle_handler(void)
{
L
Len Brown 已提交
185 186
	if (enabled_devices) {
		initialized = 0;
187
		kick_all_cpus_sync();
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	}
}

/**
 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 */
void cpuidle_pause_and_lock(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_uninstall_idle_handler();
}

EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);

/**
 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 */
void cpuidle_resume_and_unlock(void)
{
	cpuidle_install_idle_handler();
	mutex_unlock(&cpuidle_lock);
}

EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/* Currently used in suspend/resume path to suspend cpuidle */
void cpuidle_pause(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_uninstall_idle_handler();
	mutex_unlock(&cpuidle_lock);
}

/* Currently used in suspend/resume path to resume cpuidle */
void cpuidle_resume(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_install_idle_handler();
	mutex_unlock(&cpuidle_lock);
}

229 230 231 232 233 234 235 236 237
/**
 * cpuidle_enable_device - enables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
int cpuidle_enable_device(struct cpuidle_device *dev)
{
238
	int ret;
D
Daniel Lezcano 已提交
239
	struct cpuidle_driver *drv;
240

241 242 243
	if (!dev)
		return -EINVAL;

244 245
	if (dev->enabled)
		return 0;
D
Daniel Lezcano 已提交
246 247 248

	drv = cpuidle_get_cpu_driver(dev);

249
	if (!drv || !cpuidle_curr_governor)
250
		return -EIO;
D
Daniel Lezcano 已提交
251

252 253 254
	if (!dev->registered)
		return -EINVAL;

255
	if (!dev->state_count)
256
		dev->state_count = drv->state_count;
257

D
Daniel Lezcano 已提交
258 259
	ret = cpuidle_add_device_sysfs(dev);
	if (ret)
260 261 262
		return ret;

	if (cpuidle_curr_governor->enable &&
263
	    (ret = cpuidle_curr_governor->enable(drv, dev)))
264 265 266 267 268 269 270 271 272 273
		goto fail_sysfs;

	smp_wmb();

	dev->enabled = 1;

	enabled_devices++;
	return 0;

fail_sysfs:
D
Daniel Lezcano 已提交
274
	cpuidle_remove_device_sysfs(dev);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	return ret;
}

EXPORT_SYMBOL_GPL(cpuidle_enable_device);

/**
 * cpuidle_disable_device - disables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
void cpuidle_disable_device(struct cpuidle_device *dev)
{
D
Daniel Lezcano 已提交
290 291
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);

292
	if (!dev || !dev->enabled)
293
		return;
D
Daniel Lezcano 已提交
294 295

	if (!drv || !cpuidle_curr_governor)
296 297 298 299 300
		return;

	dev->enabled = 0;

	if (cpuidle_curr_governor->disable)
D
Daniel Lezcano 已提交
301
		cpuidle_curr_governor->disable(drv, dev);
302

D
Daniel Lezcano 已提交
303
	cpuidle_remove_device_sysfs(dev);
304 305 306 307 308
	enabled_devices--;
}

EXPORT_SYMBOL_GPL(cpuidle_disable_device);

309 310 311 312 313 314 315 316 317
static void __cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);

	list_del(&dev->device_list);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;
	module_put(drv->owner);
}

318
static void __cpuidle_device_init(struct cpuidle_device *dev)
319 320 321 322 323
{
	memset(dev->states_usage, 0, sizeof(dev->states_usage));
	dev->last_residency = 0;
}

324
/**
325 326
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
327
 * @dev: the cpu
328 329
 *
 * cpuidle_lock mutex must be held before this is called
330
 */
331
static int __cpuidle_register_device(struct cpuidle_device *dev)
332 333
{
	int ret;
D
Daniel Lezcano 已提交
334
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
335

D
Daniel Lezcano 已提交
336
	if (!try_module_get(drv->owner))
337 338 339 340 341
		return -EINVAL;

	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);

342
	ret = cpuidle_coupled_register_device(dev);
343
	if (ret)
344
		__cpuidle_unregister_device(dev);
345 346
	else
		dev->registered = 1;
347

348
	return ret;
349 350 351 352 353 354 355 356
}

/**
 * cpuidle_register_device - registers a CPU's idle PM feature
 * @dev: the cpu
 */
int cpuidle_register_device(struct cpuidle_device *dev)
{
357
	int ret = -EBUSY;
358

359 360 361
	if (!dev)
		return -EINVAL;

362 363
	mutex_lock(&cpuidle_lock);

364 365 366
	if (dev->registered)
		goto out_unlock;

367
	__cpuidle_device_init(dev);
368

369 370 371 372 373 374 375
	ret = __cpuidle_register_device(dev);
	if (ret)
		goto out_unlock;

	ret = cpuidle_add_sysfs(dev);
	if (ret)
		goto out_unregister;
376

377
	ret = cpuidle_enable_device(dev);
378 379
	if (ret)
		goto out_sysfs;
380

381 382
	cpuidle_install_idle_handler();

383
out_unlock:
384 385
	mutex_unlock(&cpuidle_lock);

386 387 388 389 390 391 392
	return ret;

out_sysfs:
	cpuidle_remove_sysfs(dev);
out_unregister:
	__cpuidle_unregister_device(dev);
	goto out_unlock;
393 394 395 396 397 398 399 400 401 402
}

EXPORT_SYMBOL_GPL(cpuidle_register_device);

/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
403
	if (!dev || dev->registered == 0)
404 405
		return;

406 407 408 409
	cpuidle_pause_and_lock();

	cpuidle_disable_device(dev);

410
	cpuidle_remove_sysfs(dev);
411 412

	__cpuidle_unregister_device(dev);
413

414 415
	cpuidle_coupled_unregister_device(dev);

416 417 418 419 420
	cpuidle_resume_and_unlock();
}

EXPORT_SYMBOL_GPL(cpuidle_unregister_device);

D
Daniel Lezcano 已提交
421
/**
422 423 424 425 426 427 428 429 430 431 432
 * cpuidle_unregister: unregister a driver and the devices. This function
 * can be used only if the driver has been previously registered through
 * the cpuidle_register function.
 *
 * @drv: a valid pointer to a struct cpuidle_driver
 */
void cpuidle_unregister(struct cpuidle_driver *drv)
{
	int cpu;
	struct cpuidle_device *device;

433
	for_each_cpu(cpu, drv->cpumask) {
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		device = &per_cpu(cpuidle_dev, cpu);
		cpuidle_unregister_device(device);
	}

	cpuidle_unregister_driver(drv);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister);

/**
 * cpuidle_register: registers the driver and the cpu devices with the
 * coupled_cpus passed as parameter. This function is used for all common
 * initialization pattern there are in the arch specific drivers. The
 * devices is globally defined in this file.
 *
 * @drv         : a valid pointer to a struct cpuidle_driver
 * @coupled_cpus: a cpumask for the coupled states
 *
 * Returns 0 on success, < 0 otherwise
 */
int cpuidle_register(struct cpuidle_driver *drv,
		     const struct cpumask *const coupled_cpus)
{
	int ret, cpu;
	struct cpuidle_device *device;

	ret = cpuidle_register_driver(drv);
	if (ret) {
		pr_err("failed to register cpuidle driver\n");
		return ret;
	}

465
	for_each_cpu(cpu, drv->cpumask) {
466 467 468 469 470
		device = &per_cpu(cpuidle_dev, cpu);
		device->cpu = cpu;

#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
		/*
471
		 * On multiplatform for ARM, the coupled idle states could be
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
		 * enabled in the kernel even if the cpuidle driver does not
		 * use it. Note, coupled_cpus is a struct copy.
		 */
		if (coupled_cpus)
			device->coupled_cpus = *coupled_cpus;
#endif
		ret = cpuidle_register_device(device);
		if (!ret)
			continue;

		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);

		cpuidle_unregister(drv);
		break;
	}

	return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register);

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
#ifdef CONFIG_SMP

static void smp_callback(void *v)
{
	/* we already woke the CPU up, nothing more to do */
}

/*
 * This function gets called when a part of the kernel has a new latency
 * requirement.  This means we need to get all processors out of their C-state,
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 * wakes them all right up.
 */
static int cpuidle_latency_notify(struct notifier_block *b,
		unsigned long l, void *v)
{
508
	smp_call_function(smp_callback, NULL, 1);
509 510 511 512 513 514 515
	return NOTIFY_OK;
}

static struct notifier_block cpuidle_latency_notifier = {
	.notifier_call = cpuidle_latency_notify,
};

M
Mark Gross 已提交
516 517 518 519
static inline void latency_notifier_init(struct notifier_block *n)
{
	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
}
520 521 522 523 524 525 526 527 528 529 530 531 532 533

#else /* CONFIG_SMP */

#define latency_notifier_init(x) do { } while (0)

#endif /* CONFIG_SMP */

/**
 * cpuidle_init - core initializer
 */
static int __init cpuidle_init(void)
{
	int ret;

534 535 536
	if (cpuidle_disabled())
		return -ENODEV;

537
	ret = cpuidle_add_interface(cpu_subsys.dev_root);
538 539 540 541 542 543 544 545
	if (ret)
		return ret;

	latency_notifier_init(&cpuidle_latency_notifier);

	return 0;
}

546
module_param(off, int, 0444);
547
core_initcall(cpuidle_init);