pm34xx.c 30.6 KB
Newer Older
1 2 3 4 5 6 7
/*
 * OMAP3 Power Management Routines
 *
 * Copyright (C) 2006-2008 Nokia Corporation
 * Tony Lindgren <tony@atomide.com>
 * Jouni Hogander
 *
8 9 10
 * Copyright (C) 2007 Texas Instruments, Inc.
 * Rajendra Nayak <rnayak@ti.com>
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 * Copyright (C) 2005 Texas Instruments, Inc.
 * Richard Woodruff <r-woodruff2@ti.com>
 *
 * Based on pm.c for omap1
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/gpio.h>
28
#include <linux/clk.h>
29
#include <linux/delay.h>
30
#include <linux/slab.h>
31
#include <linux/console.h>
32

33
#include <plat/sram.h>
34
#include "clockdomain.h"
35 36
#include <plat/powerdomain.h>
#include <plat/serial.h>
R
Rajendra Nayak 已提交
37
#include <plat/sdrc.h>
38 39
#include <plat/prcm.h>
#include <plat/gpmc.h>
40
#include <plat/dma.h>
41

42 43
#include <asm/tlbflush.h>

44
#include "cm2xxx_3xxx.h"
45 46 47
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"

48
#include "prm2xxx_3xxx.h"
49
#include "pm.h"
50
#include "sdrc.h"
51
#include "control.h"
52

53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state = PM_SUSPEND_ON;
static inline bool is_suspending(void)
{
	return (suspend_state != PM_SUSPEND_ON);
}
#else
static inline bool is_suspending(void)
{
	return false;
}
#endif

66
/* Scratchpad offsets */
67 68 69
#define OMAP343X_TABLE_ADDRESS_OFFSET	   0xc4
#define OMAP343X_TABLE_VALUE_OFFSET	   0xc0
#define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
70

71 72 73
/* pm34xx errata defined in pm.h */
u16 pm34xx_errata;

74 75 76
struct power_state {
	struct powerdomain *pwrdm;
	u32 next_state;
77
#ifdef CONFIG_SUSPEND
78
	u32 saved_state;
79
#endif
80 81 82 83 84 85 86
	struct list_head node;
};

static LIST_HEAD(pwrst_list);

static void (*_omap_sram_idle)(u32 *addr, int save_state);

87 88
static int (*_omap_save_secure_sram)(u32 *addr);

89 90
static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
static struct powerdomain *core_pwrdm, *per_pwrdm;
91
static struct powerdomain *cam_pwrdm;
92

93 94 95 96 97 98 99 100 101 102
static inline void omap3_per_save_context(void)
{
	omap_gpio_save_context();
}

static inline void omap3_per_restore_context(void)
{
	omap_gpio_restore_context();
}

103 104 105 106 107
static void omap3_enable_io_chain(void)
{
	int timeout = 0;

	if (omap_rev() >= OMAP3430_REV_ES3_1) {
108
		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
109
				     PM_WKEN);
110
		/* Do a readback to assure write has been done */
111
		omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
112

113
		while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
114
			 OMAP3430_ST_IO_CHAIN_MASK)) {
115 116 117 118 119 120
			timeout++;
			if (timeout > 1000) {
				printk(KERN_ERR "Wake up daisy chain "
				       "activation failed.\n");
				return;
			}
121
			omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
122
					     WKUP_MOD, PM_WKEN);
123 124 125 126 127 128 129
		}
	}
}

static void omap3_disable_io_chain(void)
{
	if (omap_rev() >= OMAP3430_REV_ES3_1)
130
		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
131
				       PM_WKEN);
132 133
}

134 135 136 137 138 139 140 141 142
static void omap3_core_save_context(void)
{
	u32 control_padconf_off;

	/* Save the padconf registers */
	control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
	control_padconf_off |= START_PADCONF_SAVE;
	omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
	/* wait for the save to complete */
R
Roel Kluin 已提交
143 144
	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
			& PADCONF_SAVE_DONE))
145 146 147 148
		udelay(1);

	/*
	 * Force write last pad into memory, as this can fail in some
149
	 * cases according to errata 1.157, 1.185
150 151 152 153
	 */
	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);

154 155 156 157 158 159
	/* Save the Interrupt controller context */
	omap_intc_save_context();
	/* Save the GPMC context */
	omap3_gpmc_save_context();
	/* Save the system control module context, padconf already save above*/
	omap3_control_save_context();
160
	omap_dma_global_context_save();
161 162 163 164 165 166 167 168 169 170
}

static void omap3_core_restore_context(void)
{
	/* Restore the control module context, padconf restored by h/w */
	omap3_control_restore_context();
	/* Restore the GPMC context */
	omap3_gpmc_restore_context();
	/* Restore the interrupt controller context */
	omap_intc_restore_context();
171
	omap_dma_global_context_restore();
172 173
}

174 175 176 177 178 179
/*
 * FIXME: This function should be called before entering off-mode after
 * OMAP3 secure services have been accessed. Currently it is only called
 * once during boot sequence, but this works as we are not using secure
 * services.
 */
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
static void omap3_save_secure_ram_context(u32 target_mpu_state)
{
	u32 ret;

	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
		/*
		 * MPU next state must be set to POWER_ON temporarily,
		 * otherwise the WFI executed inside the ROM code
		 * will hang the system.
		 */
		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
		ret = _omap_save_secure_sram((u32 *)
				__pa(omap3_secure_ram_storage));
		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
		/* Following is for error tracking, it should not happen */
		if (ret) {
			printk(KERN_ERR "save_secure_sram() returns %08x\n",
				ret);
			while (1)
				;
		}
	}
}

204 205 206 207 208 209 210 211 212 213
/*
 * PRCM Interrupt Handler Helper Function
 *
 * The purpose of this function is to clear any wake-up events latched
 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
 * may occur whilst attempting to clear a PM_WKST_x register and thus
 * set another bit in this register. A while loop is used to ensure
 * that any peripheral wake-up events occurring while attempting to
 * clear the PM_WKST_x are detected and cleared.
 */
214
static int prcm_clear_mod_irqs(s16 module, u8 regs)
215
{
216
	u32 wkst, fclk, iclk, clken;
217 218 219
	u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
	u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
	u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
220 221
	u16 grpsel_off = (regs == 3) ?
		OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
222
	int c = 0;
223

224 225
	wkst = omap2_prm_read_mod_reg(module, wkst_off);
	wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
226
	if (wkst) {
227 228
		iclk = omap2_cm_read_mod_reg(module, iclk_off);
		fclk = omap2_cm_read_mod_reg(module, fclk_off);
229
		while (wkst) {
230
			clken = wkst;
231
			omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
232 233 234 235 236 237
			/*
			 * For USBHOST, we don't know whether HOST1 or
			 * HOST2 woke us up, so enable both f-clocks
			 */
			if (module == OMAP3430ES2_USBHOST_MOD)
				clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
238 239 240
			omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
			omap2_prm_write_mod_reg(wkst, module, wkst_off);
			wkst = omap2_prm_read_mod_reg(module, wkst_off);
241
			c++;
242
		}
243 244
		omap2_cm_write_mod_reg(iclk, module, iclk_off);
		omap2_cm_write_mod_reg(fclk, module, fclk_off);
245
	}
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262

	return c;
}

static int _prcm_int_handle_wakeup(void)
{
	int c;

	c = prcm_clear_mod_irqs(WKUP_MOD, 1);
	c += prcm_clear_mod_irqs(CORE_MOD, 1);
	c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
	if (omap_rev() > OMAP3430_REV_ES1_0) {
		c += prcm_clear_mod_irqs(CORE_MOD, 3);
		c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
	}

	return c;
263
}
264

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/*
 * PRCM Interrupt Handler
 *
 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
 * interrupts from the PRCM for the MPU. These bits must be cleared in
 * order to clear the PRCM interrupt. The PRCM interrupt handler is
 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
 * register indicates that a wake-up event is pending for the MPU and
 * this bit can only be cleared if the all the wake-up events latched
 * in the various PM_WKST_x registers have been cleared. The interrupt
 * handler is implemented using a do-while loop so that if a wake-up
 * event occurred during the processing of the prcm interrupt handler
 * (setting a bit in the corresponding PM_WKST_x register and thus
 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
 * this would be handled.
 */
static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
{
284
	u32 irqenable_mpu, irqstatus_mpu;
285
	int c = 0;
286

287
	irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
288
					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
289
	irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
290 291
					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
	irqstatus_mpu &= irqenable_mpu;
292

293
	do {
294 295
		if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
				     OMAP3430_IO_ST_MASK)) {
296 297 298 299 300 301 302 303 304 305 306 307 308 309
			c = _prcm_int_handle_wakeup();

			/*
			 * Is the MPU PRCM interrupt handler racing with the
			 * IVA2 PRCM interrupt handler ?
			 */
			WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
			     "but no wakeup sources are marked\n");
		} else {
			/* XXX we need to expand our PRCM interrupt handler */
			WARN(1, "prcm: WARNING: PRCM interrupt received, but "
			     "no code to handle it (%08x)\n", irqstatus_mpu);
		}

310
		omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
311
					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
312

313
		irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
314 315 316 317
					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
		irqstatus_mpu &= irqenable_mpu;

	} while (irqstatus_mpu);
318 319 320 321

	return IRQ_HANDLED;
}

322 323 324 325 326 327 328 329
static void restore_control_register(u32 val)
{
	__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
}

/* Function to restore the table entry that was modified for enabling MMU */
static void restore_table_entry(void)
{
330
	void __iomem *scratchpad_address;
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	u32 previous_value, control_reg_value;
	u32 *address;

	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);

	/* Get address of entry that was modified */
	address = (u32 *)__raw_readl(scratchpad_address +
				     OMAP343X_TABLE_ADDRESS_OFFSET);
	/* Get the previous value which needs to be restored */
	previous_value = __raw_readl(scratchpad_address +
				     OMAP343X_TABLE_VALUE_OFFSET);
	address = __va(address);
	*address = previous_value;
	flush_tlb_all();
	control_reg_value = __raw_readl(scratchpad_address
					+ OMAP343X_CONTROL_REG_VALUE_OFFSET);
	/* This will enable caches and prediction */
	restore_control_register(control_reg_value);
}

351
void omap_sram_idle(void)
352 353 354 355 356 357 358
{
	/* Variable to tell what needs to be saved and restored
	 * in omap_sram_idle*/
	/* save_state = 0 => Nothing to save and restored */
	/* save_state = 1 => Only L1 and logic lost */
	/* save_state = 2 => Only L2 lost */
	/* save_state = 3 => L1, L2 and logic lost */
359 360 361 362
	int save_state = 0;
	int mpu_next_state = PWRDM_POWER_ON;
	int per_next_state = PWRDM_POWER_ON;
	int core_next_state = PWRDM_POWER_ON;
363
	int core_prev_state, per_prev_state;
364
	u32 sdrc_pwr = 0;
365 366 367 368

	if (!_omap_sram_idle)
		return;

369 370 371 372 373
	pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
	pwrdm_clear_all_prev_pwrst(neon_pwrdm);
	pwrdm_clear_all_prev_pwrst(core_pwrdm);
	pwrdm_clear_all_prev_pwrst(per_pwrdm);

374 375
	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
	switch (mpu_next_state) {
376
	case PWRDM_POWER_ON:
377 378 379 380
	case PWRDM_POWER_RET:
		/* No need to save context */
		save_state = 0;
		break;
R
Rajendra Nayak 已提交
381 382 383
	case PWRDM_POWER_OFF:
		save_state = 3;
		break;
384 385 386 387 388
	default:
		/* Invalid state */
		printk(KERN_ERR "Invalid mpu state in sram_idle\n");
		return;
	}
389 390
	pwrdm_pre_transition();

391 392
	/* NEON control */
	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
393
		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
394

395
	/* Enable IO-PAD and IO-CHAIN wakeups */
396
	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
397
	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
398 399 400
	if (omap3_has_io_wakeup() &&
	    (per_next_state < PWRDM_POWER_ON ||
	     core_next_state < PWRDM_POWER_ON)) {
401
		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
402 403 404
		omap3_enable_io_chain();
	}

405
	/* Block console output in case it is on one of the OMAP UARTs */
406 407 408 409 410
	if (!is_suspending())
		if (per_next_state < PWRDM_POWER_ON ||
		    core_next_state < PWRDM_POWER_ON)
			if (try_acquire_console_sem())
				goto console_still_active;
411

412
	/* PER */
413 414
	if (per_next_state < PWRDM_POWER_ON) {
		omap_uart_prepare_idle(2);
415
		omap_uart_prepare_idle(3);
416
		omap2_gpio_prepare_for_idle(per_next_state);
417
		if (per_next_state == PWRDM_POWER_OFF)
418
				omap3_per_save_context();
419 420 421
	}

	/* CORE */
422 423 424
	if (core_next_state < PWRDM_POWER_ON) {
		omap_uart_prepare_idle(0);
		omap_uart_prepare_idle(1);
425 426
		if (core_next_state == PWRDM_POWER_OFF) {
			omap3_core_save_context();
427
			omap3_cm_save_context();
428
		}
429
	}
430

431
	omap3_intc_prepare_idle();
432

433
	/*
434 435
	* On EMU/HS devices ROM code restores a SRDC value
	* from scratchpad which has automatic self refresh on timeout
436
	* of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
437 438
	* Hence store/restore the SDRC_POWER register here.
	*/
439 440
	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
441
	    core_next_state == PWRDM_POWER_OFF)
442 443
		sdrc_pwr = sdrc_read_reg(SDRC_POWER);

R
Rajendra Nayak 已提交
444 445 446 447 448 449
	/*
	 * omap3_arm_context is the location where ARM registers
	 * get saved. The restore path then reads from this
	 * location and restores them back.
	 */
	_omap_sram_idle(omap3_arm_context, save_state);
450 451
	cpu_init();

452
	/* Restore normal SDRC POWER settings */
453 454 455 456 457
	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
	    core_next_state == PWRDM_POWER_OFF)
		sdrc_write_reg(sdrc_pwr, SDRC_POWER);

458 459 460 461
	/* Restore table entry modified during MMU restoration */
	if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
		restore_table_entry();

462
	/* CORE */
463
	if (core_next_state < PWRDM_POWER_ON) {
464 465 466
		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
		if (core_prev_state == PWRDM_POWER_OFF) {
			omap3_core_restore_context();
467
			omap3_cm_restore_context();
468
			omap3_sram_restore_context();
469
			omap2_sms_restore_context();
470
		}
471 472 473
		omap_uart_resume_idle(0);
		omap_uart_resume_idle(1);
		if (core_next_state == PWRDM_POWER_OFF)
474
			omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
475 476 477
					       OMAP3430_GR_MOD,
					       OMAP3_PRM_VOLTCTRL_OFFSET);
	}
478
	omap3_intc_resume_idle();
479 480 481 482

	/* PER */
	if (per_next_state < PWRDM_POWER_ON) {
		per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
483 484
		omap2_gpio_resume_after_idle();
		if (per_prev_state == PWRDM_POWER_OFF)
485
			omap3_per_restore_context();
486
		omap_uart_resume_idle(2);
487
		omap_uart_resume_idle(3);
488
	}
489

490 491
	if (!is_suspending())
		release_console_sem();
492 493

console_still_active:
494
	/* Disable IO-PAD and IO-CHAIN wakeup */
495 496 497
	if (omap3_has_io_wakeup() &&
	    (per_next_state < PWRDM_POWER_ON ||
	     core_next_state < PWRDM_POWER_ON)) {
498 499
		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
					     PM_WKEN);
500 501
		omap3_disable_io_chain();
	}
502

503 504
	pwrdm_post_transition();

505
	omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
506 507
}

508
int omap3_can_sleep(void)
509
{
510 511
	if (!sleep_while_idle)
		return 0;
512 513
	if (!omap_uart_can_sleep())
		return 0;
514 515 516 517 518 519 520 521 522 523 524
	return 1;
}

static void omap3_pm_idle(void)
{
	local_irq_disable();
	local_fiq_disable();

	if (!omap3_can_sleep())
		goto out;

525
	if (omap_irq_pending() || need_resched())
526 527 528 529 530 531 532 533 534
		goto out;

	omap_sram_idle();

out:
	local_fiq_enable();
	local_irq_enable();
}

535
#ifdef CONFIG_SUSPEND
536 537 538 539 540
static int omap3_pm_suspend(void)
{
	struct power_state *pwrst;
	int state, ret = 0;

541 542 543
	if (wakeup_timer_seconds || wakeup_timer_milliseconds)
		omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
					 wakeup_timer_milliseconds);
544

545 546 547 548 549
	/* Read current next_pwrsts */
	list_for_each_entry(pwrst, &pwrst_list, node)
		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
	/* Set ones wanted by suspend */
	list_for_each_entry(pwrst, &pwrst_list, node) {
550
		if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
551 552 553 554 555
			goto restore;
		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
			goto restore;
	}

556
	omap_uart_prepare_suspend();
557 558
	omap3_intc_suspend();

559 560 561 562 563 564 565 566 567 568 569 570
	omap_sram_idle();

restore:
	/* Restore next_pwrsts */
	list_for_each_entry(pwrst, &pwrst_list, node) {
		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
		if (state > pwrst->next_state) {
			printk(KERN_INFO "Powerdomain (%s) didn't enter "
			       "target state %d\n",
			       pwrst->pwrdm->name, pwrst->next_state);
			ret = -1;
		}
571
		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
572 573 574 575 576 577 578 579 580 581
	}
	if (ret)
		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
	else
		printk(KERN_INFO "Successfully put all powerdomains "
		       "to target state\n");

	return ret;
}

582
static int omap3_pm_enter(suspend_state_t unused)
583 584 585
{
	int ret = 0;

586
	switch (suspend_state) {
587 588 589 590 591 592 593 594 595 596 597
	case PM_SUSPEND_STANDBY:
	case PM_SUSPEND_MEM:
		ret = omap3_pm_suspend();
		break;
	default:
		ret = -EINVAL;
	}

	return ret;
}

598 599 600
/* Hooks to enable / disable UART interrupts during suspend */
static int omap3_pm_begin(suspend_state_t state)
{
601
	disable_hlt();
602 603 604 605 606 607 608 609 610
	suspend_state = state;
	omap_uart_enable_irqs(0);
	return 0;
}

static void omap3_pm_end(void)
{
	suspend_state = PM_SUSPEND_ON;
	omap_uart_enable_irqs(1);
611
	enable_hlt();
612 613 614
	return;
}

615
static struct platform_suspend_ops omap_pm_ops = {
616 617
	.begin		= omap3_pm_begin,
	.end		= omap3_pm_end,
618 619 620
	.enter		= omap3_pm_enter,
	.valid		= suspend_valid_only_mem,
};
621
#endif /* CONFIG_SUSPEND */
622

623 624 625 626 627 628 629 630 631 632 633 634 635 636

/**
 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
 *                   retention
 *
 * In cases where IVA2 is activated by bootcode, it may prevent
 * full-chip retention or off-mode because it is not idle.  This
 * function forces the IVA2 into idle state so it can go
 * into retention/off and thus allow full-chip retention/off.
 *
 **/
static void __init omap3_iva_idle(void)
{
	/* ensure IVA2 clock is disabled */
637
	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
638 639

	/* if no clock activity, nothing else to do */
640
	if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
641 642 643 644
	      OMAP3430_CLKACTIVITY_IVA2_MASK))
		return;

	/* Reset IVA2 */
645
	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
646 647
			  OMAP3430_RST2_IVA2_MASK |
			  OMAP3430_RST3_IVA2_MASK,
648
			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
649 650

	/* Enable IVA2 clock */
651
	omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
652 653 654 655 656 657 658
			 OMAP3430_IVA2_MOD, CM_FCLKEN);

	/* Set IVA2 boot mode to 'idle' */
	omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
			 OMAP343X_CONTROL_IVA2_BOOTMOD);

	/* Un-reset IVA2 */
659
	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
660 661

	/* Disable IVA2 clock */
662
	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
663 664

	/* Reset IVA2 */
665
	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
666 667
			  OMAP3430_RST2_IVA2_MASK |
			  OMAP3430_RST3_IVA2_MASK,
668
			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
669 670
}

671
static void __init omap3_d2d_idle(void)
672
{
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	u16 mask, padconf;

	/* In a stand alone OMAP3430 where there is not a stacked
	 * modem for the D2D Idle Ack and D2D MStandby must be pulled
	 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
	 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
	mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
	padconf |= mask;
	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);

	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
	padconf |= mask;
	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);

688
	/* reset modem */
689
	omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
690
			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
691
			  CORE_MOD, OMAP2_RM_RSTCTRL);
692
	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
693
}
694

695 696
static void __init prcm_setup_regs(void)
{
697 698 699 700 701 702 703 704
	u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
					OMAP3630_AUTO_UART4_MASK : 0;
	u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
					OMAP3630_EN_UART4_MASK : 0;
	u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
					OMAP3630_GRPSEL_UART4_MASK : 0;


705 706
	/* XXX Reset all wkdeps. This should be done when initializing
	 * powerdomains */
707 708 709 710 711 712
	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
	omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
	omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
	omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
	omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
713
	if (omap_rev() > OMAP3430_REV_ES1_0) {
714 715
		omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
		omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
716
	} else
717
		omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
718 719 720 721 722

	/*
	 * Enable interface clock autoidle for all modules.
	 * Note that in the long run this should be done by clockfw
	 */
723
	omap2_cm_write_mod_reg(
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
		OMAP3430_AUTO_MODEM_MASK |
		OMAP3430ES2_AUTO_MMC3_MASK |
		OMAP3430ES2_AUTO_ICR_MASK |
		OMAP3430_AUTO_AES2_MASK |
		OMAP3430_AUTO_SHA12_MASK |
		OMAP3430_AUTO_DES2_MASK |
		OMAP3430_AUTO_MMC2_MASK |
		OMAP3430_AUTO_MMC1_MASK |
		OMAP3430_AUTO_MSPRO_MASK |
		OMAP3430_AUTO_HDQ_MASK |
		OMAP3430_AUTO_MCSPI4_MASK |
		OMAP3430_AUTO_MCSPI3_MASK |
		OMAP3430_AUTO_MCSPI2_MASK |
		OMAP3430_AUTO_MCSPI1_MASK |
		OMAP3430_AUTO_I2C3_MASK |
		OMAP3430_AUTO_I2C2_MASK |
		OMAP3430_AUTO_I2C1_MASK |
		OMAP3430_AUTO_UART2_MASK |
		OMAP3430_AUTO_UART1_MASK |
		OMAP3430_AUTO_GPT11_MASK |
		OMAP3430_AUTO_GPT10_MASK |
		OMAP3430_AUTO_MCBSP5_MASK |
		OMAP3430_AUTO_MCBSP1_MASK |
		OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
		OMAP3430_AUTO_MAILBOXES_MASK |
		OMAP3430_AUTO_OMAPCTRL_MASK |
		OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
		OMAP3430_AUTO_HSOTGUSB_MASK |
		OMAP3430_AUTO_SAD2D_MASK |
		OMAP3430_AUTO_SSI_MASK,
754 755
		CORE_MOD, CM_AUTOIDLE1);

756
	omap2_cm_write_mod_reg(
757 758 759 760 761
		OMAP3430_AUTO_PKA_MASK |
		OMAP3430_AUTO_AES1_MASK |
		OMAP3430_AUTO_RNG_MASK |
		OMAP3430_AUTO_SHA11_MASK |
		OMAP3430_AUTO_DES1_MASK,
762 763 764
		CORE_MOD, CM_AUTOIDLE2);

	if (omap_rev() > OMAP3430_REV_ES1_0) {
765
		omap2_cm_write_mod_reg(
766 767
			OMAP3430_AUTO_MAD2D_MASK |
			OMAP3430ES2_AUTO_USBTLL_MASK,
768 769 770
			CORE_MOD, CM_AUTOIDLE3);
	}

771
	omap2_cm_write_mod_reg(
772 773 774 775 776 777
		OMAP3430_AUTO_WDT2_MASK |
		OMAP3430_AUTO_WDT1_MASK |
		OMAP3430_AUTO_GPIO1_MASK |
		OMAP3430_AUTO_32KSYNC_MASK |
		OMAP3430_AUTO_GPT12_MASK |
		OMAP3430_AUTO_GPT1_MASK,
778 779
		WKUP_MOD, CM_AUTOIDLE);

780
	omap2_cm_write_mod_reg(
781
		OMAP3430_AUTO_DSS_MASK,
782 783 784
		OMAP3430_DSS_MOD,
		CM_AUTOIDLE);

785
	omap2_cm_write_mod_reg(
786
		OMAP3430_AUTO_CAM_MASK,
787 788 789
		OMAP3430_CAM_MOD,
		CM_AUTOIDLE);

790
	omap2_cm_write_mod_reg(
791
		omap3630_auto_uart4_mask |
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
		OMAP3430_AUTO_GPIO6_MASK |
		OMAP3430_AUTO_GPIO5_MASK |
		OMAP3430_AUTO_GPIO4_MASK |
		OMAP3430_AUTO_GPIO3_MASK |
		OMAP3430_AUTO_GPIO2_MASK |
		OMAP3430_AUTO_WDT3_MASK |
		OMAP3430_AUTO_UART3_MASK |
		OMAP3430_AUTO_GPT9_MASK |
		OMAP3430_AUTO_GPT8_MASK |
		OMAP3430_AUTO_GPT7_MASK |
		OMAP3430_AUTO_GPT6_MASK |
		OMAP3430_AUTO_GPT5_MASK |
		OMAP3430_AUTO_GPT4_MASK |
		OMAP3430_AUTO_GPT3_MASK |
		OMAP3430_AUTO_GPT2_MASK |
		OMAP3430_AUTO_MCBSP4_MASK |
		OMAP3430_AUTO_MCBSP3_MASK |
		OMAP3430_AUTO_MCBSP2_MASK,
810 811 812 813
		OMAP3430_PER_MOD,
		CM_AUTOIDLE);

	if (omap_rev() > OMAP3430_REV_ES1_0) {
814
		omap2_cm_write_mod_reg(
815
			OMAP3430ES2_AUTO_USBHOST_MASK,
816 817 818 819
			OMAP3430ES2_USBHOST_MOD,
			CM_AUTOIDLE);
	}

820
	omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
821

822 823 824 825
	/*
	 * Set all plls to autoidle. This is needed until autoidle is
	 * enabled by clockfw
	 */
826
	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
827
			 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
828
	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
829 830
			 MPU_MOD,
			 CM_AUTOIDLE2);
831
	omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
832 833 834
			 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
			 PLL_MOD,
			 CM_AUTOIDLE);
835
	omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
836 837 838 839 840 841 842 843
			 PLL_MOD,
			 CM_AUTOIDLE2);

	/*
	 * Enable control of expternal oscillator through
	 * sys_clkreq. In the long run clock framework should
	 * take care of this.
	 */
844
	omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
845 846 847 848 849
			     1 << OMAP_AUTOEXTCLKMODE_SHIFT,
			     OMAP3430_GR_MOD,
			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);

	/* setup wakup source */
850
	omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
851
			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
852 853
			  WKUP_MOD, PM_WKEN);
	/* No need to write EN_IO, that is always enabled */
854
	omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
855 856
			  OMAP3430_GRPSEL_GPT1_MASK |
			  OMAP3430_GRPSEL_GPT12_MASK,
857 858 859
			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
	/* For some reason IO doesn't generate wakeup event even if
	 * it is selected to mpu wakeup goup */
860
	omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
861
			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
862

863
	/* Enable PM_WKEN to support DSS LPR */
864
	omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
865 866
				OMAP3430_DSS_MOD, PM_WKEN);

867
	/* Enable wakeups in PER */
868
	omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
869
			  OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
870 871 872 873
			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
			  OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
			  OMAP3430_EN_MCBSP4_MASK,
874
			  OMAP3430_PER_MOD, PM_WKEN);
875
	/* and allow them to wake up MPU */
876
	omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
877
			  OMAP3430_GRPSEL_GPIO2_MASK |
878 879 880 881 882 883 884 885
			  OMAP3430_GRPSEL_GPIO3_MASK |
			  OMAP3430_GRPSEL_GPIO4_MASK |
			  OMAP3430_GRPSEL_GPIO5_MASK |
			  OMAP3430_GRPSEL_GPIO6_MASK |
			  OMAP3430_GRPSEL_UART3_MASK |
			  OMAP3430_GRPSEL_MCBSP2_MASK |
			  OMAP3430_GRPSEL_MCBSP3_MASK |
			  OMAP3430_GRPSEL_MCBSP4_MASK,
886 887
			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);

888
	/* Don't attach IVA interrupts */
889 890 891 892
	omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
893

894
	/* Clear any pending 'reset' flags */
895 896 897 898 899 900 901
	omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
	omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
902

903
	/* Clear any pending PRCM interrupts */
904
	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
905

906
	omap3_iva_idle();
907
	omap3_d2d_idle();
908 909
}

910 911 912 913 914 915 916 917 918 919
void omap3_pm_off_mode_enable(int enable)
{
	struct power_state *pwrst;
	u32 state;

	if (enable)
		state = PWRDM_POWER_OFF;
	else
		state = PWRDM_POWER_RET;

920
#ifdef CONFIG_CPU_IDLE
921 922 923 924 925 926 927 928 929
	/*
	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
	 * enable OFF mode in a stable form for previous revisions, restrict
	 * instead to RET
	 */
	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
		omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
	else
		omap3_cpuidle_update_states(state, state);
930 931
#endif

932
	list_for_each_entry(pwrst, &pwrst_list, node) {
933 934 935 936 937 938 939 940 941 942 943
		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
				pwrst->pwrdm == core_pwrdm &&
				state == PWRDM_POWER_OFF) {
			pwrst->next_state = PWRDM_POWER_RET;
			WARN_ONCE(1,
				"%s: Core OFF disabled due to errata i583\n",
				__func__);
		} else {
			pwrst->next_state = state;
		}
		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
944 945 946
	}
}

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
{
	struct power_state *pwrst;

	list_for_each_entry(pwrst, &pwrst_list, node) {
		if (pwrst->pwrdm == pwrdm)
			return pwrst->next_state;
	}
	return -EINVAL;
}

int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
{
	struct power_state *pwrst;

	list_for_each_entry(pwrst, &pwrst_list, node) {
		if (pwrst->pwrdm == pwrdm) {
			pwrst->next_state = state;
			return 0;
		}
	}
	return -EINVAL;
}

971
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
972 973 974 975 976 977
{
	struct power_state *pwrst;

	if (!pwrdm->pwrsts)
		return 0;

978
	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
979 980 981 982 983 984 985 986 987
	if (!pwrst)
		return -ENOMEM;
	pwrst->pwrdm = pwrdm;
	pwrst->next_state = PWRDM_POWER_RET;
	list_add(&pwrst->node, &pwrst_list);

	if (pwrdm_has_hdwr_sar(pwrdm))
		pwrdm_enable_hdwr_sar(pwrdm);

988
	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
989 990 991 992 993 994 995
}

/*
 * Enable hw supervised mode for all clockdomains if it's
 * supported. Initiate sleep transition for other clockdomains, if
 * they are not used
 */
996
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
997 998 999 1000 1001 1002 1003 1004 1005
{
	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
		omap2_clkdm_allow_idle(clkdm);
	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
		 atomic_read(&clkdm->usecount) == 0)
		omap2_clkdm_sleep(clkdm);
	return 0;
}

1006 1007 1008 1009
void omap_push_sram_idle(void)
{
	_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
					omap34xx_cpu_suspend_sz);
1010 1011 1012
	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
				save_secure_ram_context_sz);
1013 1014
}

1015 1016
static void __init pm_errata_configure(void)
{
1017
	if (cpu_is_omap3630()) {
1018
		pm34xx_errata |= PM_RTA_ERRATUM_i608;
1019 1020
		/* Enable the l2 cache toggling in sleep logic */
		enable_omap3630_toggle_l2_on_restore();
1021 1022
		if (omap_rev() < OMAP3630_REV_ES1_2)
			pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
1023
	}
1024 1025
}

1026
static int __init omap3_pm_init(void)
1027 1028
{
	struct power_state *pwrst, *tmp;
1029
	struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
1030 1031 1032 1033 1034
	int ret;

	if (!cpu_is_omap34xx())
		return -ENODEV;

1035 1036
	pm_errata_configure();

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	printk(KERN_ERR "Power Management for TI OMAP3.\n");

	/* XXX prcm_setup_regs needs to be before enabling hw
	 * supervised mode for powerdomains */
	prcm_setup_regs();

	ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
			  (irq_handler_t)prcm_interrupt_handler,
			  IRQF_DISABLED, "prcm", NULL);
	if (ret) {
		printk(KERN_ERR "request_irq failed to register for 0x%x\n",
		       INT_34XX_PRCM_MPU_IRQ);
		goto err1;
	}

1052
	ret = pwrdm_for_each(pwrdms_setup, NULL);
1053 1054 1055 1056 1057
	if (ret) {
		printk(KERN_ERR "Failed to setup powerdomains\n");
		goto err2;
	}

1058
	(void) clkdm_for_each(clkdms_setup, NULL);
1059 1060 1061 1062 1063 1064 1065

	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
	if (mpu_pwrdm == NULL) {
		printk(KERN_ERR "Failed to get mpu_pwrdm\n");
		goto err2;
	}

1066 1067 1068
	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
	per_pwrdm = pwrdm_lookup("per_pwrdm");
	core_pwrdm = pwrdm_lookup("core_pwrdm");
1069
	cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1070

1071 1072 1073 1074 1075
	neon_clkdm = clkdm_lookup("neon_clkdm");
	mpu_clkdm = clkdm_lookup("mpu_clkdm");
	per_clkdm = clkdm_lookup("per_clkdm");
	core_clkdm = clkdm_lookup("core_clkdm");

1076
	omap_push_sram_idle();
1077
#ifdef CONFIG_SUSPEND
1078
	suspend_set_ops(&omap_pm_ops);
1079
#endif /* CONFIG_SUSPEND */
1080 1081

	pm_idle = omap3_pm_idle;
1082
	omap3_idle_init();
1083

1084 1085 1086 1087 1088 1089 1090 1091
	/*
	 * RTA is disabled during initialization as per erratum i608
	 * it is safer to disable RTA by the bootloader, but we would like
	 * to be doubly sure here and prevent any mishaps.
	 */
	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
		omap3630_ctrl_disable_rta();

1092
	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1093 1094 1095 1096 1097 1098
	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
		omap3_secure_ram_storage =
			kmalloc(0x803F, GFP_KERNEL);
		if (!omap3_secure_ram_storage)
			printk(KERN_ERR "Memory allocation failed when"
					"allocating for secure sram context\n");
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108

		local_irq_disable();
		local_fiq_disable();

		omap_dma_global_context_save();
		omap3_save_secure_ram_context(PWRDM_POWER_ON);
		omap_dma_global_context_restore();

		local_irq_enable();
		local_fiq_enable();
1109 1110
	}

1111
	omap3_save_scratchpad_contents();
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
err1:
	return ret;
err2:
	free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
		list_del(&pwrst->node);
		kfree(pwrst);
	}
	return ret;
}

late_initcall(omap3_pm_init);