cpuidle34xx.c 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * linux/arch/arm/mach-omap2/cpuidle34xx.c
 *
 * OMAP3 CPU IDLE Routines
 *
 * Copyright (C) 2008 Texas Instruments, Inc.
 * Rajendra Nayak <rnayak@ti.com>
 *
 * Copyright (C) 2007 Texas Instruments, Inc.
 * Karthik Dasu <karthik-dp@ti.com>
 *
 * Copyright (C) 2006 Nokia Corporation
 * Tony Lindgren <tony@atomide.com>
 *
 * Copyright (C) 2005 Texas Instruments, Inc.
 * Richard Woodruff <r-woodruff2@ti.com>
 *
 * Based on pm.c for omap2
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

25
#include <linux/sched.h>
26
#include <linux/cpuidle.h>
27
#include <linux/export.h>
28
#include <linux/cpu_pm.h>
29 30

#include <plat/prcm.h>
31
#include <plat/irqs.h>
32
#include "powerdomain.h"
33
#include "clockdomain.h"
34

35
#include "pm.h"
36
#include "control.h"
37
#include "common.h"
38

39 40
#ifdef CONFIG_CPU_IDLE

41 42 43 44 45 46 47 48 49
/*
 * The latencies/thresholds for various C states have
 * to be configured from the respective board files.
 * These are some default values (which might not provide
 * the best power savings) used on boards which do not
 * pass these details from the board file.
 */
static struct cpuidle_params cpuidle_params_table[] = {
	/* C1 */
50
	{2 + 2, 5, 1},
51
	/* C2 */
52
	{10 + 10, 30, 1},
53
	/* C3 */
54
	{50 + 50, 300, 1},
55
	/* C4 */
56
	{1500 + 1800, 4000, 1},
57
	/* C5 */
58
	{2500 + 7500, 12000, 1},
59
	/* C6 */
60
	{3000 + 8500, 15000, 1},
61
	/* C7 */
62
	{10000 + 30000, 300000, 1},
63
};
64 65 66 67 68 69 70 71 72 73
#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)

/* Mach specific information to be recorded in the C-state driver_data */
struct omap3_idle_statedata {
	u32 mpu_state;
	u32 core_state;
};
struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];

struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
74

75 76 77
static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
78
	clkdm_allow_idle(clkdm);
79 80 81 82 83 84
	return 0;
}

static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
85
	clkdm_deny_idle(clkdm);
86 87 88
	return 0;
}

89
static int __omap3_enter_idle(struct cpuidle_device *dev,
90
				struct cpuidle_driver *drv,
91
				int index)
92
{
93
	struct omap3_idle_statedata *cx =
94
			cpuidle_get_statedata(&dev->states_usage[index]);
95
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
96 97 98

	local_fiq_disable();

99 100
	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);
101

102
	if (omap_irq_pending() || need_resched())
103
		goto return_sleep_time;
104

105
	/* Deny idle for C1 */
106
	if (index == 0) {
107 108 109 110
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

111 112 113 114 115 116 117
	/*
	 * Call idle CPU PM enter notifier chain so that
	 * VFP context is saved.
	 */
	if (mpu_state == PWRDM_POWER_OFF)
		cpu_pm_enter();

118 119 120
	/* Execute ARM wfi */
	omap_sram_idle();

121 122 123 124 125 126 127
	/*
	 * Call idle CPU PM enter notifier chain to restore
	 * VFP context.
	 */
	if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
		cpu_pm_exit();

128
	/* Re-allow idle for C1 */
129
	if (index == 0) {
130 131 132 133
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

134
return_sleep_time:
135 136 137

	local_fiq_enable();

138
	return index;
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @drv: cpuidle driver
 * @index: the index of state to be entered
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static inline int omap3_enter_idle(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
}

157
/**
158
 * next_valid_state - Find next valid C-state
159
 * @dev: cpuidle device
160
 * @drv: cpuidle driver
161
 * @index: Index of currently selected c-state
162
 *
163 164 165
 * If the state corresponding to index is valid, index is returned back
 * to the caller. Else, this function searches for a lower c-state which is
 * still valid (as defined in omap3_power_states[]) and returns its index.
166 167 168
 *
 * A state is valid if the 'valid' field is enabled and
 * if it satisfies the enable_off_mode condition.
169
 */
170
static int next_valid_state(struct cpuidle_device *dev,
171
			struct cpuidle_driver *drv,
172
				int index)
173
{
174
	struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
175
	struct cpuidle_state *curr = &drv->states[index];
176
	struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
177 178
	u32 mpu_deepest_state = PWRDM_POWER_RET;
	u32 core_deepest_state = PWRDM_POWER_RET;
179
	int next_index = -1;
180 181 182 183 184 185 186 187 188 189 190

	if (enable_off_mode) {
		mpu_deepest_state = PWRDM_POWER_OFF;
		/*
		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
		 * CORE OFF mode is not supported in a stable form, restrict
		 * instead the CORE state to RET.
		 */
		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
			core_deepest_state = PWRDM_POWER_OFF;
	}
191 192

	/* Check if current state is valid */
193
	if ((cx->mpu_state >= mpu_deepest_state) &&
194
	    (cx->core_state >= core_deepest_state)) {
195
		return index;
196
	} else {
197
		int idx = OMAP3_NUM_STATES - 1;
198

199
		/* Reach the current state starting at highest C-state */
200
		for (; idx >= 0; idx--) {
201
			if (&drv->states[idx] == curr) {
202
				next_index = idx;
203 204 205 206
				break;
			}
		}

207
		/* Should never hit this condition */
208
		WARN_ON(next_index == -1);
209 210 211 212 213 214

		/*
		 * Drop to next valid state.
		 * Start search from the next (lower) state.
		 */
		idx--;
215
		for (; idx >= 0; idx--) {
216
			cx = cpuidle_get_statedata(&dev->states_usage[idx]);
217
			if ((cx->mpu_state >= mpu_deepest_state) &&
218
			    (cx->core_state >= core_deepest_state)) {
219
				next_index = idx;
220 221 222 223
				break;
			}
		}
		/*
224
		 * C1 is always valid.
225 226
		 * So, no need to check for 'next_index == -1' outside
		 * this loop.
227 228 229
		 */
	}

230
	return next_index;
231 232
}

233 234 235
/**
 * omap3_enter_idle_bm - Checks for any bus activity
 * @dev: cpuidle device
236
 * @drv: cpuidle driver
237
 * @index: array index of target state to be programmed
238
 *
239 240
 * This function checks for any pending activity and then programs
 * the device to the specified or a safer state.
241 242
 */
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
243
				struct cpuidle_driver *drv,
244
			       int index)
245
{
246
	int new_state_idx;
247
	u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
248
	struct omap3_idle_statedata *cx;
249
	int ret;
250

251 252 253 254 255 256
	/*
	 * Prevent idle completely if CAM is active.
	 * CAM does not have wakeup capability in OMAP3.
	 */
	cam_state = pwrdm_read_pwrst(cam_pd);
	if (cam_state == PWRDM_POWER_ON) {
257
		new_state_idx = drv->safe_state_index;
258 259 260
		goto select_state;
	}

261 262 263 264 265 266 267 268
	/*
	 * FIXME: we currently manage device-specific idle states
	 *        for PER and CORE in combination with CPU-specific
	 *        idle states.  This is wrong, and device-specific
	 *        idle management needs to be separated out into
	 *        its own code.
	 */

269 270 271 272
	/*
	 * Prevent PER off if CORE is not in retention or off as this
	 * would disable PER wakeups completely.
	 */
273
	cx = cpuidle_get_statedata(&dev->states_usage[index]);
274
	core_next_state = cx->core_state;
275 276
	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
	if ((per_next_state == PWRDM_POWER_OFF) &&
277
	    (core_next_state > PWRDM_POWER_RET))
278
		per_next_state = PWRDM_POWER_RET;
279

280 281 282 283
	/* Are we changing PER target state? */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_next_state);

284
	new_state_idx = next_valid_state(dev, drv, index);
285

286
select_state:
287
	ret = omap3_enter_idle(dev, drv, new_state_idx);
288 289 290 291 292 293

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	return ret;
294 295 296 297 298 299 300
}

DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);

struct cpuidle_driver omap3_idle_driver = {
	.name = 	"omap3_idle",
	.owner = 	THIS_MODULE,
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	.states = {
		{
			.enter		  = omap3_enter_idle,
			.exit_latency	  = 2 + 2,
			.target_residency = 5,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C1",
			.desc		  = "MPU ON + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 10 + 10,
			.target_residency = 30,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C2",
			.desc		  = "MPU ON + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 50 + 50,
			.target_residency = 300,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C3",
			.desc		  = "MPU RET + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 1500 + 1800,
			.target_residency = 4000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C4",
			.desc		  = "MPU OFF + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 2500 + 7500,
			.target_residency = 12000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C5",
			.desc		  = "MPU RET + CORE RET",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 3000 + 8500,
			.target_residency = 15000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C6",
			.desc		  = "MPU OFF + CORE RET",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 10000 + 30000,
			.target_residency = 30000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C7",
			.desc		  = "MPU OFF + CORE OFF",
		},
	},
	.state_count = OMAP3_NUM_STATES,
	.safe_state_index = 0,
361 362
};

363 364 365 366 367 368 369 370
/* Helper to register the driver_data */
static inline struct omap3_idle_statedata *_fill_cstate_usage(
					struct cpuidle_device *dev,
					int idx)
{
	struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
	struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];

371
	cpuidle_set_statedata(state_usage, cx);
372 373 374 375

	return cx;
}

376 377 378
/**
 * omap3_idle_init - Init routine for OMAP3 idle
 *
379
 * Registers the OMAP3 specific cpuidle driver to the cpuidle
380 381
 * framework with the valid set of states.
 */
382
int __init omap3_idle_init(void)
383 384
{
	struct cpuidle_device *dev;
385
	struct omap3_idle_statedata *cx;
386 387

	mpu_pd = pwrdm_lookup("mpu_pwrdm");
388
	core_pd = pwrdm_lookup("core_pwrdm");
389 390
	per_pd = pwrdm_lookup("per_pwrdm");
	cam_pd = pwrdm_lookup("cam_pwrdm");
391

392

393 394
	dev = &per_cpu(omap3_idle_dev, smp_processor_id());

395
	/* C1 . MPU WFI + Core active */
396
	cx = _fill_cstate_usage(dev, 0);
397 398 399 400
	cx->mpu_state = PWRDM_POWER_ON;
	cx->core_state = PWRDM_POWER_ON;

	/* C2 . MPU WFI + Core inactive */
401
	cx = _fill_cstate_usage(dev, 1);
402 403 404 405
	cx->mpu_state = PWRDM_POWER_ON;
	cx->core_state = PWRDM_POWER_ON;

	/* C3 . MPU CSWR + Core inactive */
406
	cx = _fill_cstate_usage(dev, 2);
407 408 409 410
	cx->mpu_state = PWRDM_POWER_RET;
	cx->core_state = PWRDM_POWER_ON;

	/* C4 . MPU OFF + Core inactive */
411
	cx = _fill_cstate_usage(dev, 3);
412 413 414 415
	cx->mpu_state = PWRDM_POWER_OFF;
	cx->core_state = PWRDM_POWER_ON;

	/* C5 . MPU RET + Core RET */
416
	cx = _fill_cstate_usage(dev, 4);
417 418
	cx->mpu_state = PWRDM_POWER_RET;
	cx->core_state = PWRDM_POWER_RET;
419

420
	/* C6 . MPU OFF + Core RET */
421
	cx = _fill_cstate_usage(dev, 5);
422 423 424 425
	cx->mpu_state = PWRDM_POWER_OFF;
	cx->core_state = PWRDM_POWER_RET;

	/* C7 . MPU OFF + Core OFF */
426
	cx = _fill_cstate_usage(dev, 6);
427 428
	cx->mpu_state = PWRDM_POWER_OFF;
	cx->core_state = PWRDM_POWER_OFF;
429

430 431
	cpuidle_register_driver(&omap3_idle_driver);

432 433 434 435 436 437 438 439
	if (cpuidle_register_device(dev)) {
		printk(KERN_ERR "%s: CPUidle register device failed\n",
		       __func__);
		return -EIO;
	}

	return 0;
}
440 441 442 443 444
#else
int __init omap3_idle_init(void)
{
	return 0;
}
445
#endif /* CONFIG_CPU_IDLE */