cpuidle34xx.c 15.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * linux/arch/arm/mach-omap2/cpuidle34xx.c
 *
 * OMAP3 CPU IDLE Routines
 *
 * Copyright (C) 2008 Texas Instruments, Inc.
 * Rajendra Nayak <rnayak@ti.com>
 *
 * Copyright (C) 2007 Texas Instruments, Inc.
 * Karthik Dasu <karthik-dp@ti.com>
 *
 * Copyright (C) 2006 Nokia Corporation
 * Tony Lindgren <tony@atomide.com>
 *
 * Copyright (C) 2005 Texas Instruments, Inc.
 * Richard Woodruff <r-woodruff2@ti.com>
 *
 * Based on pm.c for omap2
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

25
#include <linux/sched.h>
26 27 28
#include <linux/cpuidle.h>

#include <plat/prcm.h>
29
#include <plat/irqs.h>
30
#include "powerdomain.h"
31
#include "clockdomain.h"
32
#include <plat/serial.h>
33

34
#include "pm.h"
35
#include "control.h"
36

37 38
#ifdef CONFIG_CPU_IDLE

39 40 41 42 43 44 45 46
#define OMAP3_MAX_STATES 7
#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47

48 49
#define OMAP3_STATE_MAX OMAP3_STATE_C7

50 51
#define CPUIDLE_FLAG_CHECK_BM	0x10000	/* use omap3_enter_idle_bm() */

52 53 54 55 56 57 58 59 60 61 62 63 64
struct omap3_processor_cx {
	u8 valid;
	u8 type;
	u32 sleep_latency;
	u32 wakeup_latency;
	u32 mpu_state;
	u32 core_state;
	u32 threshold;
	u32 flags;
};

struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
struct omap3_processor_cx current_cx_state;
65 66
struct powerdomain *mpu_pd, *core_pd, *per_pd;
struct powerdomain *cam_pd;
67

68 69 70 71 72 73 74 75 76
/*
 * The latencies/thresholds for various C states have
 * to be configured from the respective board files.
 * These are some default values (which might not provide
 * the best power savings) used on boards which do not
 * pass these details from the board file.
 */
static struct cpuidle_params cpuidle_params_table[] = {
	/* C1 */
77
	{1, 2, 2, 5},
78
	/* C2 */
79
	{1, 10, 10, 30},
80
	/* C3 */
81
	{1, 50, 50, 300},
82
	/* C4 */
83
	{1, 1500, 1800, 4000},
84
	/* C5 */
85
	{1, 2500, 7500, 12000},
86
	/* C6 */
87
	{1, 3000, 8500, 15000},
88
	/* C7 */
89
	{1, 10000, 30000, 300000},
90 91
};

92 93
static int omap3_idle_bm_check(void)
{
94 95
	if (!omap3_can_sleep())
		return 1;
96 97 98
	return 0;
}

99 100 101 102 103 104 105 106 107 108 109 110 111 112
static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
	omap2_clkdm_allow_idle(clkdm);
	return 0;
}

static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
	omap2_clkdm_deny_idle(clkdm);
	return 0;
}

113 114 115 116 117 118 119 120 121 122 123 124 125
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
126
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
127 128 129 130 131 132 133 134 135

	current_cx_state = *cx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

136 137
	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);
138

139
	if (omap_irq_pending() || need_resched())
140
		goto return_sleep_time;
141

142 143 144 145 146
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

147 148 149
	/* Execute ARM wfi */
	omap_sram_idle();

150 151 152 153 154
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

155
return_sleep_time:
156 157 158 159 160 161
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

162
	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/**
 * next_valid_state - Find next valid c-state
 * @dev: cpuidle device
 * @state: Currently selected c-state
 *
 * If the current state is valid, it is returned back to the caller.
 * Else, this function searches for a lower c-state which is still
 * valid (as defined in omap3_power_states[]).
 */
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
						struct cpuidle_state *curr)
{
	struct cpuidle_state *next = NULL;
	struct omap3_processor_cx *cx;

	cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);

	/* Check if current state is valid */
	if (cx->valid) {
		return curr;
	} else {
		u8 idx = OMAP3_STATE_MAX;

		/*
		 * Reach the current state starting at highest C-state
		 */
		for (; idx >= OMAP3_STATE_C1; idx--) {
			if (&dev->states[idx] == curr) {
				next = &dev->states[idx];
				break;
			}
		}

		/*
		 * Should never hit this condition.
		 */
		WARN_ON(next == NULL);

		/*
		 * Drop to next valid state.
		 * Start search from the next (lower) state.
		 */
		idx--;
		for (; idx >= OMAP3_STATE_C1; idx--) {
			struct omap3_processor_cx *cx;

			cx = cpuidle_get_statedata(&dev->states[idx]);
			if (cx->valid) {
				next = &dev->states[idx];
				break;
			}
		}
		/*
		 * C1 and C2 are always valid.
		 * So, no need to check for 'next==NULL' outside this loop.
		 */
	}

	return next;
}

226 227 228 229 230 231 232 233 234 235 236 237
/**
 * omap3_enter_idle_bm - Checks for any bus activity
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
 * function checks for any pending activity and then programs the
 * device to the specified or a safer state.
 */
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
			       struct cpuidle_state *state)
{
238
	struct cpuidle_state *new_state = next_valid_state(dev, state);
239 240 241 242
	u32 core_next_state, per_next_state = 0, per_saved_state = 0;
	u32 cam_state;
	struct omap3_processor_cx *cx;
	int ret;
243

244
	if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
245 246
		BUG_ON(!dev->safe_state);
		new_state = dev->safe_state;
247 248 249 250 251 252 253 254 255 256
		goto select_state;
	}

	cx = cpuidle_get_statedata(state);
	core_next_state = cx->core_state;

	/*
	 * FIXME: we currently manage device-specific idle states
	 *        for PER and CORE in combination with CPU-specific
	 *        idle states.  This is wrong, and device-specific
257
	 *        idle management needs to be separated out into 
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	 *        its own code.
	 */

	/*
	 * Prevent idle completely if CAM is active.
	 * CAM does not have wakeup capability in OMAP3.
	 */
	cam_state = pwrdm_read_pwrst(cam_pd);
	if (cam_state == PWRDM_POWER_ON) {
		new_state = dev->safe_state;
		goto select_state;
	}

	/*
	 * Prevent PER off if CORE is not in retention or off as this
	 * would disable PER wakeups completely.
	 */
	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
	if ((per_next_state == PWRDM_POWER_OFF) &&
277
	    (core_next_state > PWRDM_POWER_RET))
278
		per_next_state = PWRDM_POWER_RET;
279

280 281 282 283 284
	/* Are we changing PER target state? */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_next_state);

select_state:
285
	dev->last_state = new_state;
286 287 288 289 290 291 292
	ret = omap3_enter_idle(dev, new_state);

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	return ret;
293 294 295 296
}

DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);

297
/**
298 299 300
 * omap3_cpuidle_update_states() - Update the cpuidle states
 * @mpu_deepest_state:	Enable states upto and including this for mpu domain
 * @core_deepest_state:	Enable states upto and including this for core domain
301
 *
302 303 304
 * This goes through the list of states available and enables and disables the
 * validity of C states based on deepest state that can be achieved for the
 * variable domain
305
 */
306
void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
307 308 309 310 311 312
{
	int i;

	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
		struct omap3_processor_cx *cx = &omap3_power_states[i];

313 314
		if ((cx->mpu_state >= mpu_deepest_state) &&
		    (cx->core_state >= core_deepest_state)) {
315 316
			cx->valid = 1;
		} else {
317
			cx->valid = 0;
318 319 320 321
		}
	}
}

322 323 324 325 326 327 328 329
void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
{
	int i;

	if (!cpuidle_board_params)
		return;

	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
330 331
		cpuidle_params_table[i].valid =
			cpuidle_board_params[i].valid;
332 333 334 335 336 337 338 339 340 341
		cpuidle_params_table[i].sleep_latency =
			cpuidle_board_params[i].sleep_latency;
		cpuidle_params_table[i].wake_latency =
			cpuidle_board_params[i].wake_latency;
		cpuidle_params_table[i].threshold =
			cpuidle_board_params[i].threshold;
	}
	return;
}

342 343 344
/* omap3_init_power_states - Initialises the OMAP3 specific C states.
 *
 * Below is the desciption of each C state.
345 346 347 348 349 350 351
 * 	C1 . MPU WFI + Core active
 *	C2 . MPU WFI + Core inactive
 *	C3 . MPU CSWR + Core inactive
 *	C4 . MPU OFF + Core inactive
 *	C5 . MPU CSWR + Core CSWR
 *	C6 . MPU OFF + Core CSWR
 *	C7 . MPU OFF + Core OFF
352 353 354 355
 */
void omap_init_power_states(void)
{
	/* C1 . MPU WFI + Core active */
356 357
	omap3_power_states[OMAP3_STATE_C1].valid =
			cpuidle_params_table[OMAP3_STATE_C1].valid;
358
	omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
359 360 361 362 363 364
	omap3_power_states[OMAP3_STATE_C1].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
	omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
	omap3_power_states[OMAP3_STATE_C1].threshold =
			cpuidle_params_table[OMAP3_STATE_C1].threshold;
365 366 367 368
	omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
	omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
	omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;

369
	/* C2 . MPU WFI + Core inactive */
370 371
	omap3_power_states[OMAP3_STATE_C2].valid =
			cpuidle_params_table[OMAP3_STATE_C2].valid;
372
	omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
373 374 375 376 377 378
	omap3_power_states[OMAP3_STATE_C2].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
	omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
	omap3_power_states[OMAP3_STATE_C2].threshold =
			cpuidle_params_table[OMAP3_STATE_C2].threshold;
379
	omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
380
	omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
381 382
	omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
383

384
	/* C3 . MPU CSWR + Core inactive */
385 386
	omap3_power_states[OMAP3_STATE_C3].valid =
			cpuidle_params_table[OMAP3_STATE_C3].valid;
387
	omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
388 389 390 391 392 393
	omap3_power_states[OMAP3_STATE_C3].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
	omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
	omap3_power_states[OMAP3_STATE_C3].threshold =
			cpuidle_params_table[OMAP3_STATE_C3].threshold;
394
	omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
395
	omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
396 397
	omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
398

399
	/* C4 . MPU OFF + Core inactive */
400 401
	omap3_power_states[OMAP3_STATE_C4].valid =
			cpuidle_params_table[OMAP3_STATE_C4].valid;
402
	omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
403 404 405 406 407 408
	omap3_power_states[OMAP3_STATE_C4].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
	omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
	omap3_power_states[OMAP3_STATE_C4].threshold =
			cpuidle_params_table[OMAP3_STATE_C4].threshold;
409 410
	omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
411 412 413
	omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;

414
	/* C5 . MPU CSWR + Core CSWR*/
415 416
	omap3_power_states[OMAP3_STATE_C5].valid =
			cpuidle_params_table[OMAP3_STATE_C5].valid;
417
	omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
418 419 420 421 422 423
	omap3_power_states[OMAP3_STATE_C5].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
	omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
	omap3_power_states[OMAP3_STATE_C5].threshold =
			cpuidle_params_table[OMAP3_STATE_C5].threshold;
424
	omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
425 426 427 428
	omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
	omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;

429
	/* C6 . MPU OFF + Core CSWR */
430 431
	omap3_power_states[OMAP3_STATE_C6].valid =
			cpuidle_params_table[OMAP3_STATE_C6].valid;
432
	omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
433 434 435 436 437 438
	omap3_power_states[OMAP3_STATE_C6].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
	omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
	omap3_power_states[OMAP3_STATE_C6].threshold =
			cpuidle_params_table[OMAP3_STATE_C6].threshold;
439
	omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
440
	omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
441 442
	omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
443 444

	/* C7 . MPU OFF + Core OFF */
445 446
	omap3_power_states[OMAP3_STATE_C7].valid =
			cpuidle_params_table[OMAP3_STATE_C7].valid;
447
	omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
448 449 450 451 452 453
	omap3_power_states[OMAP3_STATE_C7].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
	omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
	omap3_power_states[OMAP3_STATE_C7].threshold =
			cpuidle_params_table[OMAP3_STATE_C7].threshold;
454 455 456 457
	omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
458 459 460 461 462 463 464 465 466

	/*
	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
	 * enable OFF mode in a stable form for previous revisions.
	 * we disable C7 state as a result.
	 */
	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
		omap3_power_states[OMAP3_STATE_C7].valid = 0;
		cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
467
		pr_warn("%s: core off state C7 disabled due to i583\n",
468 469
				__func__);
	}
470 471 472 473 474 475 476 477 478 479 480 481 482
}

struct cpuidle_driver omap3_idle_driver = {
	.name = 	"omap3_idle",
	.owner = 	THIS_MODULE,
};

/**
 * omap3_idle_init - Init routine for OMAP3 idle
 *
 * Registers the OMAP3 specific cpuidle driver with the cpuidle
 * framework with the valid set of states.
 */
483
int __init omap3_idle_init(void)
484 485 486 487 488 489 490
{
	int i, count = 0;
	struct omap3_processor_cx *cx;
	struct cpuidle_state *state;
	struct cpuidle_device *dev;

	mpu_pd = pwrdm_lookup("mpu_pwrdm");
491
	core_pd = pwrdm_lookup("core_pwrdm");
492 493
	per_pd = pwrdm_lookup("per_pwrdm");
	cam_pd = pwrdm_lookup("cam_pwrdm");
494 495 496 497 498 499

	omap_init_power_states();
	cpuidle_register_driver(&omap3_idle_driver);

	dev = &per_cpu(omap3_idle_dev, smp_processor_id());

500
	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
		cx = &omap3_power_states[i];
		state = &dev->states[count];

		if (!cx->valid)
			continue;
		cpuidle_set_statedata(state, cx);
		state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
		state->target_residency = cx->threshold;
		state->flags = cx->flags;
		state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
			omap3_enter_idle_bm : omap3_enter_idle;
		if (cx->type == OMAP3_STATE_C1)
			dev->safe_state = state;
		sprintf(state->name, "C%d", count+1);
		count++;
	}

	if (!count)
		return -EINVAL;
	dev->state_count = count;

522 523 524 525
	if (enable_off_mode)
		omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
	else
		omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
526

527 528 529 530 531 532 533 534
	if (cpuidle_register_device(dev)) {
		printk(KERN_ERR "%s: CPUidle register device failed\n",
		       __func__);
		return -EIO;
	}

	return 0;
}
535 536 537 538 539
#else
int __init omap3_idle_init(void)
{
	return 0;
}
540
#endif /* CONFIG_CPU_IDLE */