cpuidle34xx.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * linux/arch/arm/mach-omap2/cpuidle34xx.c
 *
 * OMAP3 CPU IDLE Routines
 *
 * Copyright (C) 2008 Texas Instruments, Inc.
 * Rajendra Nayak <rnayak@ti.com>
 *
 * Copyright (C) 2007 Texas Instruments, Inc.
 * Karthik Dasu <karthik-dp@ti.com>
 *
 * Copyright (C) 2006 Nokia Corporation
 * Tony Lindgren <tony@atomide.com>
 *
 * Copyright (C) 2005 Texas Instruments, Inc.
 * Richard Woodruff <r-woodruff2@ti.com>
 *
 * Based on pm.c for omap2
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

25
#include <linux/sched.h>
26
#include <linux/cpuidle.h>
27
#include <linux/export.h>
28
#include <linux/cpu_pm.h>
29
#include <asm/cpuidle.h>
30

31
#include "powerdomain.h"
32
#include "clockdomain.h"
33

34
#include "pm.h"
35
#include "control.h"
36
#include "common.h"
37

38 39
/* Mach specific information to be recorded in the C-state driver_data */
struct omap3_idle_statedata {
40 41 42
	u8 mpu_state;
	u8 core_state;
	u8 per_min_state;
43
	u8 flags;
44
};
45

46 47
static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;

48 49 50 51 52 53 54 55 56 57 58
/*
 * Possible flag bits for struct omap3_idle_statedata.flags:
 *
 * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go
 *    inactive.  This in turn prevents the MPU DPLL from entering autoidle
 *    mode, so wakeup latency is greatly reduced, at the cost of additional
 *    energy consumption.  This also prevents the CORE clockdomain from
 *    entering idle.
 */
#define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE		BIT(0)

59 60 61 62
/*
 * Prevent PER OFF if CORE is not in RETention or OFF as this would
 * disable PER wakeups completely.
 */
63
static struct omap3_idle_statedata omap3_idle_data[] = {
64 65 66
	{
		.mpu_state = PWRDM_POWER_ON,
		.core_state = PWRDM_POWER_ON,
67 68
		/* In C1 do not allow PER state lower than CORE state */
		.per_min_state = PWRDM_POWER_ON,
69
		.flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,
70 71 72 73
	},
	{
		.mpu_state = PWRDM_POWER_ON,
		.core_state = PWRDM_POWER_ON,
74
		.per_min_state = PWRDM_POWER_RET,
75 76 77 78
	},
	{
		.mpu_state = PWRDM_POWER_RET,
		.core_state = PWRDM_POWER_ON,
79
		.per_min_state = PWRDM_POWER_RET,
80 81 82 83
	},
	{
		.mpu_state = PWRDM_POWER_OFF,
		.core_state = PWRDM_POWER_ON,
84
		.per_min_state = PWRDM_POWER_RET,
85 86 87 88
	},
	{
		.mpu_state = PWRDM_POWER_RET,
		.core_state = PWRDM_POWER_RET,
89
		.per_min_state = PWRDM_POWER_OFF,
90 91 92 93
	},
	{
		.mpu_state = PWRDM_POWER_OFF,
		.core_state = PWRDM_POWER_RET,
94
		.per_min_state = PWRDM_POWER_OFF,
95 96 97 98
	},
	{
		.mpu_state = PWRDM_POWER_OFF,
		.core_state = PWRDM_POWER_OFF,
99
		.per_min_state = PWRDM_POWER_OFF,
100 101
	},
};
102

103 104 105 106 107 108 109 110 111
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @drv: cpuidle driver
 * @index: the index of state to be entered
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			    struct cpuidle_driver *drv,
			    int index)
112
{
113
	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
114

115
	if (omap_irq_pending() || need_resched())
116
		goto return_sleep_time;
117

118
	/* Deny idle for C1 */
119
	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {
120
		clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
121 122 123
	} else {
		pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
		pwrdm_set_next_pwrst(core_pd, cx->core_state);
124 125
	}

126 127 128 129
	/*
	 * Call idle CPU PM enter notifier chain so that
	 * VFP context is saved.
	 */
130
	if (cx->mpu_state == PWRDM_POWER_OFF)
131 132
		cpu_pm_enter();

133 134 135
	/* Execute ARM wfi */
	omap_sram_idle();

136 137 138 139
	/*
	 * Call idle CPU PM enter notifier chain to restore
	 * VFP context.
	 */
140 141
	if (cx->mpu_state == PWRDM_POWER_OFF &&
	    pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
142 143
		cpu_pm_exit();

144
	/* Re-allow idle for C1 */
145
	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
146
		clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
147

148
return_sleep_time:
149

150
	return index;
151 152
}

153
/**
154
 * next_valid_state - Find next valid C-state
155
 * @dev: cpuidle device
156
 * @drv: cpuidle driver
157
 * @index: Index of currently selected c-state
158
 *
159 160 161
 * If the state corresponding to index is valid, index is returned back
 * to the caller. Else, this function searches for a lower c-state which is
 * still valid (as defined in omap3_power_states[]) and returns its index.
162 163 164
 *
 * A state is valid if the 'valid' field is enabled and
 * if it satisfies the enable_off_mode condition.
165
 */
166
static int next_valid_state(struct cpuidle_device *dev,
167
			    struct cpuidle_driver *drv, int index)
168
{
169
	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
170 171
	u32 mpu_deepest_state = PWRDM_POWER_RET;
	u32 core_deepest_state = PWRDM_POWER_RET;
172
	int idx;
173
	int next_index = 0; /* C1 is the default value */
174 175 176 177 178 179 180 181 182 183 184

	if (enable_off_mode) {
		mpu_deepest_state = PWRDM_POWER_OFF;
		/*
		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
		 * CORE OFF mode is not supported in a stable form, restrict
		 * instead the CORE state to RET.
		 */
		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
			core_deepest_state = PWRDM_POWER_OFF;
	}
185 186

	/* Check if current state is valid */
187
	if ((cx->mpu_state >= mpu_deepest_state) &&
188
	    (cx->core_state >= core_deepest_state))
189
		return index;
190

191 192 193 194 195
	/*
	 * Drop to next valid state.
	 * Start search from the next (lower) state.
	 */
	for (idx = index - 1; idx >= 0; idx--) {
196
		cx = &omap3_idle_data[idx];
197 198 199 200
		if ((cx->mpu_state >= mpu_deepest_state) &&
		    (cx->core_state >= core_deepest_state)) {
			next_index = idx;
			break;
201 202 203
		}
	}

204
	return next_index;
205 206
}

207 208 209
/**
 * omap3_enter_idle_bm - Checks for any bus activity
 * @dev: cpuidle device
210
 * @drv: cpuidle driver
211
 * @index: array index of target state to be programmed
212
 *
213 214
 * This function checks for any pending activity and then programs
 * the device to the specified or a safer state.
215 216
 */
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
217
			       struct cpuidle_driver *drv,
218
			       int index)
219
{
220 221
	int new_state_idx, ret;
	u8 per_next_state, per_saved_state;
222
	struct omap3_idle_statedata *cx;
223

224
	/*
225
	 * Use only C1 if CAM is active.
226 227
	 * CAM does not have wakeup capability in OMAP3.
	 */
228
	if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
229
		new_state_idx = drv->safe_state_index;
230 231
	else
		new_state_idx = next_valid_state(dev, drv, index);
232

233 234 235 236 237 238 239 240
	/*
	 * FIXME: we currently manage device-specific idle states
	 *        for PER and CORE in combination with CPU-specific
	 *        idle states.  This is wrong, and device-specific
	 *        idle management needs to be separated out into
	 *        its own code.
	 */

241 242
	/* Program PER state */
	cx = &omap3_idle_data[new_state_idx];
243

244 245 246 247
	per_next_state = pwrdm_read_next_pwrst(per_pd);
	per_saved_state = per_next_state;
	if (per_next_state < cx->per_min_state) {
		per_next_state = cx->per_min_state;
248
		pwrdm_set_next_pwrst(per_pd, per_next_state);
249
	}
250

251
	ret = omap3_enter_idle(dev, drv, new_state_idx);
252 253 254 255 256 257

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	return ret;
258 259
}

260
static struct cpuidle_driver omap3_idle_driver = {
261 262
	.name             = "omap3_idle",
	.owner            = THIS_MODULE,
263 264
	.states = {
		{
265
			.enter		  = omap3_enter_idle_bm,
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
			.exit_latency	  = 2 + 2,
			.target_residency = 5,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C1",
			.desc		  = "MPU ON + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 10 + 10,
			.target_residency = 30,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C2",
			.desc		  = "MPU ON + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 50 + 50,
			.target_residency = 300,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C3",
			.desc		  = "MPU RET + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 1500 + 1800,
			.target_residency = 4000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C4",
			.desc		  = "MPU OFF + CORE ON",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 2500 + 7500,
			.target_residency = 12000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C5",
			.desc		  = "MPU RET + CORE RET",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 3000 + 8500,
			.target_residency = 15000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C6",
			.desc		  = "MPU OFF + CORE RET",
		},
		{
			.enter		  = omap3_enter_idle_bm,
			.exit_latency	  = 10000 + 30000,
			.target_residency = 30000,
			.flags		  = CPUIDLE_FLAG_TIME_VALID,
			.name		  = "C7",
			.desc		  = "MPU OFF + CORE OFF",
		},
	},
321
	.state_count = ARRAY_SIZE(omap3_idle_data),
322
	.safe_state_index = 0,
323 324
};

325 326
/* Public functions */

327 328 329
/**
 * omap3_idle_init - Init routine for OMAP3 idle
 *
330
 * Registers the OMAP3 specific cpuidle driver to the cpuidle
331 332
 * framework with the valid set of states.
 */
333
int __init omap3_idle_init(void)
334 335
{
	mpu_pd = pwrdm_lookup("mpu_pwrdm");
336
	core_pd = pwrdm_lookup("core_pwrdm");
337 338
	per_pd = pwrdm_lookup("per_pwrdm");
	cam_pd = pwrdm_lookup("cam_pwrdm");
339

340 341 342
	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
		return -ENODEV;

343
	return cpuidle_register(&omap3_idle_driver, NULL);
344
}