omap-wakeupgen.c 16.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * OMAP WakeupGen Source file
 *
 * OMAP WakeupGen is the interrupt controller extension used along
 * with ARM GIC to wake the CPU out from low power states on
 * external interrupts. It is responsible for generating wakeup
 * event from the incoming interrupts and enable bits. It is
 * implemented in MPU always ON power domain. During normal operation,
 * WakeupGen delivers external interrupts directly to the GIC.
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 *	Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
23
#include <linux/irqchip.h>
24 25
#include <linux/irqdomain.h>
#include <linux/of_address.h>
26 27
#include <linux/platform_device.h>
#include <linux/cpu.h>
28 29
#include <linux/notifier.h>
#include <linux/cpu_pm.h>
30

31
#include "omap-wakeupgen.h"
32
#include "omap-secure.h"
33

34
#include "soc.h"
35 36
#include "omap4-sar-layout.h"
#include "common.h"
37
#include "pm.h"
38

39 40 41 42 43 44
#define AM43XX_NR_REG_BANKS	7
#define AM43XX_IRQS		224
#define MAX_NR_REG_BANKS	AM43XX_NR_REG_BANKS
#define MAX_IRQS		AM43XX_IRQS
#define DEFAULT_NR_REG_BANKS	5
#define DEFAULT_IRQS		160
45 46 47 48 49
#define WKG_MASK_ALL		0x00000000
#define WKG_UNMASK_ALL		0xffffffff
#define CPU_ENA_OFFSET		0x400
#define CPU0_ID			0x0
#define CPU1_ID			0x1
50 51
#define OMAP4_NR_BANKS		4
#define OMAP4_NR_IRQS		128
52

53 54 55
#define SYS_NIRQ1_EXT_SYS_IRQ_1	7
#define SYS_NIRQ2_EXT_SYS_IRQ_2	119

56
static void __iomem *wakeupgen_base;
57
static void __iomem *sar_base;
58
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
59
static unsigned int irq_target_cpu[MAX_IRQS];
60 61
static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
static unsigned int max_irqs = DEFAULT_IRQS;
62
static unsigned int omap_secure_apis;
63

64 65 66 67 68 69 70 71 72 73 74
#ifdef CONFIG_CPU_PM
static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
#endif

struct omap_wakeupgen_ops {
	void (*save_context)(void);
	void (*restore_context)(void);
};

static struct omap_wakeupgen_ops *wakeupgen_ops;

75 76 77 78 79
/*
 * Static helper functions.
 */
static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
{
80
	return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
81 82 83 84 85
				(cpu * CPU_ENA_OFFSET) + (idx * 4));
}

static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
{
86
	writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
87 88 89
				(cpu * CPU_ENA_OFFSET) + (idx * 4));
}

90 91
static inline void sar_writel(u32 val, u32 offset, u8 idx)
{
92
	writel_relaxed(val, sar_base + offset + (idx * 4));
93 94
}

95 96 97 98 99 100
static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
{
	/*
	 * Each WakeupGen register controls 32 interrupt.
	 * i.e. 1 bit per SPI IRQ
	 */
101 102
	*reg_index = irq >> 5;
	*bit_posn = irq %= 32;
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

	return 0;
}

static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
{
	u32 val, bit_number;
	u8 i;

	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
		return;

	val = wakeupgen_readl(i, cpu);
	val &= ~BIT(bit_number);
	wakeupgen_writel(val, i, cpu);
}

static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
{
	u32 val, bit_number;
	u8 i;

	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
		return;

	val = wakeupgen_readl(i, cpu);
	val |= BIT(bit_number);
	wakeupgen_writel(val, i, cpu);
}

/*
 * Architecture specific Mask extension
 */
static void wakeupgen_mask(struct irq_data *d)
{
	unsigned long flags;

140
	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
141
	_wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
142
	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
143
	irq_chip_mask_parent(d);
144 145 146 147 148 149 150 151 152
}

/*
 * Architecture specific Unmask extension
 */
static void wakeupgen_unmask(struct irq_data *d)
{
	unsigned long flags;

153
	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
154
	_wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
155
	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
156
	irq_chip_unmask_parent(d);
157 158
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * The sys_nirq pins bypass peripheral modules and are wired directly
 * to MPUSS wakeupgen. They get automatically inverted for GIC.
 */
static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
{
	bool inverted = false;

	switch (type) {
	case IRQ_TYPE_LEVEL_LOW:
		type &= ~IRQ_TYPE_LEVEL_MASK;
		type |= IRQ_TYPE_LEVEL_HIGH;
		inverted = true;
		break;
	case IRQ_TYPE_EDGE_FALLING:
		type &= ~IRQ_TYPE_EDGE_BOTH;
		type |= IRQ_TYPE_EDGE_RISING;
		inverted = true;
		break;
	default:
		break;
	}

	if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
	    d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
		pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
			d->hwirq);

	return irq_chip_set_type_parent(d, type);
}

190
#ifdef CONFIG_HOTPLUG_CPU
191
static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
192 193 194 195 196

static void _wakeupgen_save_masks(unsigned int cpu)
{
	u8 i;

197
	for (i = 0; i < irq_banks; i++)
198 199 200 201 202 203 204
		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
}

static void _wakeupgen_restore_masks(unsigned int cpu)
{
	u8 i;

205
	for (i = 0; i < irq_banks; i++)
206 207 208 209 210 211 212
		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
}

static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
{
	u8 i;

213
	for (i = 0; i < irq_banks; i++)
214 215 216
		wakeupgen_writel(reg, i, cpu);
}

217 218 219 220 221 222 223 224 225 226 227
/*
 * Mask or unmask all interrupts on given CPU.
 *	0 = Mask all interrupts on the 'cpu'
 *	1 = Unmask all interrupts on the 'cpu'
 * Ensure that the initial mask is maintained. This is faster than
 * iterating through GIC registers to arrive at the correct masks.
 */
static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
{
	unsigned long flags;

228
	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
229 230 231 232 233 234 235
	if (set) {
		_wakeupgen_save_masks(cpu);
		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
	} else {
		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
		_wakeupgen_restore_masks(cpu);
	}
236
	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
237
}
238
#endif
239

240
#ifdef CONFIG_CPU_PM
241
static inline void omap4_irq_save_context(void)
242 243 244 245 246 247
{
	u32 i, val;

	if (omap_rev() == OMAP4430_REV_ES1_0)
		return;

248
	for (i = 0; i < irq_banks; i++) {
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
		/* Save the CPUx interrupt mask for IRQ 0 to 127 */
		val = wakeupgen_readl(i, 0);
		sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
		val = wakeupgen_readl(i, 1);
		sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);

		/*
		 * Disable the secure interrupts for CPUx. The restore
		 * code blindly restores secure and non-secure interrupt
		 * masks from SAR RAM. Secure interrupts are not suppose
		 * to be enabled from HLOS. So overwrite the SAR location
		 * so that the secure interrupt remains disabled.
		 */
		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
	}

	/* Save AuxBoot* registers */
267 268 269 270
	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
	writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
	writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
271 272

	/* Save SyncReq generation logic */
273 274 275 276
	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
	writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
	writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
277 278

	/* Set the Backup Bit Mask status */
279
	val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
280
	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
281
	writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

}

static inline void omap5_irq_save_context(void)
{
	u32 i, val;

	for (i = 0; i < irq_banks; i++) {
		/* Save the CPUx interrupt mask for IRQ 0 to 159 */
		val = wakeupgen_readl(i, 0);
		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
		val = wakeupgen_readl(i, 1);
		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
	}

	/* Save AuxBoot* registers */
300 301 302 303
	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
304 305

	/* Set the Backup Bit Mask status */
306
	val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
307
	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
308
	writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
309 310 311

}

312 313 314 315 316 317 318 319 320 321
static inline void am43xx_irq_save_context(void)
{
	u32 i;

	for (i = 0; i < irq_banks; i++) {
		wakeupgen_context[i] = wakeupgen_readl(i, 0);
		wakeupgen_writel(0, i, CPU0_ID);
	}
}

322 323 324 325 326 327 328 329 330 331
/*
 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
 * ROM code. WakeupGen IP is integrated along with GIC to manage the
 * interrupt wakeups from CPU low power states. It manages
 * masking/unmasking of Shared peripheral interrupts(SPI). So the
 * interrupt enable/disable control should be in sync and consistent
 * at WakeupGen and GIC so that interrupts are not lost.
 */
static void irq_save_context(void)
{
332 333 334 335
	/* DRA7 has no SAR to save */
	if (soc_is_dra7xx())
		return;

336 337
	if (wakeupgen_ops && wakeupgen_ops->save_context)
		wakeupgen_ops->save_context();
338 339 340 341 342
}

/*
 * Clear WakeupGen SAR backup status.
 */
343
static void irq_sar_clear(void)
344 345
{
	u32 val;
346
	u32 offset = SAR_BACKUP_STATUS_OFFSET;
347 348 349
	/* DRA7 has no SAR to save */
	if (soc_is_dra7xx())
		return;
350 351 352 353

	if (soc_is_omap54xx())
		offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;

354
	val = readl_relaxed(sar_base + offset);
355
	val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
356
	writel_relaxed(val, sar_base + offset);
357 358
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372
static void am43xx_irq_restore_context(void)
{
	u32 i;

	for (i = 0; i < irq_banks; i++)
		wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
}

static void irq_restore_context(void)
{
	if (wakeupgen_ops && wakeupgen_ops->restore_context)
		wakeupgen_ops->restore_context();
}

373 374 375 376 377 378 379 380 381 382 383 384 385
/*
 * Save GIC and Wakeupgen interrupt context using secure API
 * for HS/EMU devices.
 */
static void irq_save_secure_context(void)
{
	u32 ret;
	ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
				FLAG_START_CRITICAL,
				0, 0, 0, 0, 0);
	if (ret != API_HAL_RET_VALUE_OK)
		pr_err("GIC and Wakeupgen context save failed\n");
}
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405

/* Define ops for context save and restore for each SoC */
static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
	.save_context = omap4_irq_save_context,
	.restore_context = irq_sar_clear,
};

static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
	.save_context = omap5_irq_save_context,
	.restore_context = irq_sar_clear,
};

static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
	.save_context = am43xx_irq_save_context,
	.restore_context = am43xx_irq_restore_context,
};
#else
static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
406 407
#endif

408
#ifdef CONFIG_HOTPLUG_CPU
409
static int omap_wakeupgen_cpu_online(unsigned int cpu)
410
{
411 412
	wakeupgen_irqmask_all(cpu, 0);
	return 0;
413 414
}

415 416 417 418 419
static int omap_wakeupgen_cpu_dead(unsigned int cpu)
{
	wakeupgen_irqmask_all(cpu, 1);
	return 0;
}
420 421 422

static void __init irq_hotplug_init(void)
{
423 424 425 426 427
	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
				  omap_wakeupgen_cpu_online, NULL);
	cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
				  "arm/omap-wake:dead", NULL,
				  omap_wakeupgen_cpu_dead);
428 429 430 431 432 433
}
#else
static void __init irq_hotplug_init(void)
{}
#endif

434 435 436 437 438 439 440 441 442 443 444 445
#ifdef CONFIG_CPU_PM
static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
{
	switch (cmd) {
	case CPU_CLUSTER_PM_ENTER:
		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
			irq_save_context();
		else
			irq_save_secure_context();
		break;
	case CPU_CLUSTER_PM_EXIT:
		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
446
			irq_restore_context();
447 448 449 450 451 452 453 454 455 456 457
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block irq_notifier_block = {
	.notifier_call = irq_notifier,
};

static void __init irq_pm_init(void)
{
458
	/* FIXME: Remove this when MPU OSWR support is added */
459
	if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
460
		cpu_pm_register_notifier(&irq_notifier_block);
461 462 463 464 465 466
}
#else
static void __init irq_pm_init(void)
{}
#endif

467 468 469 470 471 472 473 474 475 476
void __iomem *omap_get_wakeupgen_base(void)
{
	return wakeupgen_base;
}

int omap_secure_apis_support(void)
{
	return omap_secure_apis;
}

477 478 479 480 481 482
static struct irq_chip wakeupgen_chip = {
	.name			= "WUGEN",
	.irq_eoi		= irq_chip_eoi_parent,
	.irq_mask		= wakeupgen_mask,
	.irq_unmask		= wakeupgen_unmask,
	.irq_retrigger		= irq_chip_retrigger_hierarchy,
483
	.irq_set_type		= wakeupgen_irq_set_type,
484 485 486 487 488 489
	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
	.irq_set_affinity	= irq_chip_set_affinity_parent,
#endif
};

490 491 492 493
static int wakeupgen_domain_translate(struct irq_domain *d,
				      struct irq_fwspec *fwspec,
				      unsigned long *hwirq,
				      unsigned int *type)
494
{
495 496 497
	if (is_of_node(fwspec->fwnode)) {
		if (fwspec->param_count != 3)
			return -EINVAL;
498

499 500 501 502 503 504 505 506 507 508
		/* No PPI should point to this domain */
		if (fwspec->param[0] != 0)
			return -EINVAL;

		*hwirq = fwspec->param[1];
		*type = fwspec->param[2];
		return 0;
	}

	return -EINVAL;
509 510 511 512 513 514
}

static int wakeupgen_domain_alloc(struct irq_domain *domain,
				  unsigned int virq,
				  unsigned int nr_irqs, void *data)
{
515 516
	struct irq_fwspec *fwspec = data;
	struct irq_fwspec parent_fwspec;
517 518 519
	irq_hw_number_t hwirq;
	int i;

520
	if (fwspec->param_count != 3)
521
		return -EINVAL;	/* Not GIC compliant */
522
	if (fwspec->param[0] != 0)
523 524
		return -EINVAL;	/* No PPI should point to this domain */

525
	hwirq = fwspec->param[1];
526 527 528 529 530 531 532
	if (hwirq >= MAX_IRQS)
		return -EINVAL;	/* Can't deal with this */

	for (i = 0; i < nr_irqs; i++)
		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
					      &wakeupgen_chip, NULL);

533 534 535 536
	parent_fwspec = *fwspec;
	parent_fwspec.fwnode = domain->parent->fwnode;
	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
					    &parent_fwspec);
537 538
}

539
static const struct irq_domain_ops wakeupgen_domain_ops = {
540 541 542
	.translate	= wakeupgen_domain_translate,
	.alloc		= wakeupgen_domain_alloc,
	.free		= irq_domain_free_irqs_common,
543 544
};

545 546 547
/*
 * Initialise the wakeupgen module.
 */
548 549
static int __init wakeupgen_init(struct device_node *node,
				 struct device_node *parent)
550
{
551
	struct irq_domain *parent_domain, *domain;
552 553
	int i;
	unsigned int boot_cpu = smp_processor_id();
554
	u32 val;
555

556
	if (!parent) {
557
		pr_err("%pOF: no parent, giving up\n", node);
558 559 560 561 562
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
563
		pr_err("%pOF: unable to obtain parent domain\n", node);
564 565
		return -ENXIO;
	}
566 567 568 569 570 571 572
	/* Not supported on OMAP4 ES1.0 silicon */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
		return -EPERM;
	}

	/* Static mapping, never released */
573
	wakeupgen_base = of_iomap(node, 0);
574 575 576
	if (WARN_ON(!wakeupgen_base))
		return -ENOMEM;

577 578 579 580
	if (cpu_is_omap44xx()) {
		irq_banks = OMAP4_NR_BANKS;
		max_irqs = OMAP4_NR_IRQS;
		omap_secure_apis = 1;
581 582 583
		wakeupgen_ops = &omap4_wakeupgen_ops;
	} else if (soc_is_omap54xx()) {
		wakeupgen_ops = &omap5_wakeupgen_ops;
584 585 586
	} else if (soc_is_am43xx()) {
		irq_banks = AM43XX_NR_REG_BANKS;
		max_irqs = AM43XX_IRQS;
587
		wakeupgen_ops = &am43xx_wakeupgen_ops;
588 589
	}

590 591 592 593 594 595 596 597
	domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
					  node, &wakeupgen_domain_ops,
					  NULL);
	if (!domain) {
		iounmap(wakeupgen_base);
		return -ENOMEM;
	}

598
	/* Clear all IRQ bitmasks at wakeupGen level */
599
	for (i = 0; i < irq_banks; i++) {
600
		wakeupgen_writel(0, i, CPU0_ID);
601 602
		if (!soc_is_am43xx())
			wakeupgen_writel(0, i, CPU1_ID);
603 604 605 606 607 608 609 610
	}

	/*
	 * FIXME: Add support to set_smp_affinity() once the core
	 * GIC code has necessary hooks in place.
	 */

	/* Associate all the IRQs to boot CPU like GIC init does. */
611
	for (i = 0; i < max_irqs; i++)
612 613
		irq_target_cpu[i] = boot_cpu;

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	/*
	 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
	 * 0x0:	ES1 behavior, CPU cores would enter and exit OFF mode together.
	 * 0x1:	ES2 behavior, CPU cores are allowed to enter/exit OFF mode
	 * independently.
	 * This needs to be set one time thanks to always ON domain.
	 *
	 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
	 * ES2.0, and the same is applicable for DRA7.
	 */
	if (soc_is_omap54xx() || soc_is_dra7xx()) {
		val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
		val |= BIT(5);
		omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
	}

630
	irq_hotplug_init();
631
	irq_pm_init();
632

633 634
	sar_base = omap4_get_sar_ram_base();

635 636
	return 0;
}
637
IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);