irq-omap-intc.c 9.5 KB
Newer Older
1
/*
2
 * linux/arch/arm/mach-omap2/irq.c
3 4 5 6 7 8 9 10 11 12 13
 *
 * Interrupt handler for OMAP2 boards.
 *
 * Copyright (C) 2005 Nokia Corporation
 * Author: Paul Mundt <paul.mundt@nokia.com>
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License. See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
14
#include <linux/module.h>
15 16
#include <linux/init.h>
#include <linux/interrupt.h>
17
#include <linux/io.h>
18

19
#include <asm/exception.h>
20
#include <linux/irqchip.h>
21 22 23
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
24
#include <linux/of_irq.h>
25

26 27
#include <linux/irqchip/irq-omap-intc.h>

28 29 30 31 32
/* selected INTC register offsets */

#define INTC_REVISION		0x0000
#define INTC_SYSCONFIG		0x0010
#define INTC_SYSSTATUS		0x0014
33
#define INTC_SIR		0x0040
34
#define INTC_CONTROL		0x0048
35 36 37 38
#define INTC_PROTECTION		0x004C
#define INTC_IDLE		0x0050
#define INTC_THRESHOLD		0x0068
#define INTC_MIR0		0x0084
39 40 41
#define INTC_MIR_CLEAR0		0x0088
#define INTC_MIR_SET0		0x008c
#define INTC_PENDING_IRQ0	0x0098
42 43 44
#define INTC_PENDING_IRQ1	0x00b8
#define INTC_PENDING_IRQ2	0x00d8
#define INTC_PENDING_IRQ3	0x00f8
45
#define INTC_ILR0		0x0100
46

47
#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
48
#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
49
#define INTCPS_NR_ILR_REGS	128
50
#define INTCPS_NR_MIR_REGS	4
51

52 53 54
#define INTC_IDLE_FUNCIDLE	(1 << 0)
#define INTC_IDLE_TURBO		(1 << 1)

55 56
#define INTC_PROTECTION_ENABLE	(1 << 0)

57
struct omap_intc_regs {
58 59 60 61
	u32 sysconfig;
	u32 protection;
	u32 idle;
	u32 threshold;
62
	u32 ilr[INTCPS_NR_ILR_REGS];
63 64
	u32 mir[INTCPS_NR_MIR_REGS];
};
65 66 67 68
static struct omap_intc_regs intc_context;

static struct irq_domain *domain;
static void __iomem *omap_irq_base;
69 70
static int omap_nr_pending;
static int omap_nr_irqs;
71

72
static void intc_writel(u32 reg, u32 val)
73
{
74
	writel_relaxed(val, omap_irq_base + reg);
75 76
}

77
static u32 intc_readl(u32 reg)
78
{
79
	return readl_relaxed(omap_irq_base + reg);
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
void omap_intc_save_context(void)
{
	int i;

	intc_context.sysconfig =
		intc_readl(INTC_SYSCONFIG);
	intc_context.protection =
		intc_readl(INTC_PROTECTION);
	intc_context.idle =
		intc_readl(INTC_IDLE);
	intc_context.threshold =
		intc_readl(INTC_THRESHOLD);

	for (i = 0; i < omap_nr_irqs; i++)
		intc_context.ilr[i] =
			intc_readl((INTC_ILR0 + 0x4 * i));
	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
		intc_context.mir[i] =
			intc_readl(INTC_MIR0 + (0x20 * i));
}

void omap_intc_restore_context(void)
{
	int i;

	intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
	intc_writel(INTC_PROTECTION, intc_context.protection);
	intc_writel(INTC_IDLE, intc_context.idle);
	intc_writel(INTC_THRESHOLD, intc_context.threshold);

	for (i = 0; i < omap_nr_irqs; i++)
		intc_writel(INTC_ILR0 + 0x4 * i,
				intc_context.ilr[i]);

	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
		intc_writel(INTC_MIR0 + 0x20 * i,
			intc_context.mir[i]);
	/* MIRs are saved and restore with other PRCM registers */
}

void omap3_intc_prepare_idle(void)
{
	/*
	 * Disable autoidle as it can stall interrupt controller,
	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
	 */
	intc_writel(INTC_SYSCONFIG, 0);
129
	intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
130 131 132 133 134 135
}

void omap3_intc_resume_idle(void)
{
	/* Re-enable autoidle */
	intc_writel(INTC_SYSCONFIG, 1);
136
	intc_writel(INTC_IDLE, 0);
137 138
}

139
/* XXX: FIQ and additional INTC support (only MPU at the moment) */
140
static void omap_ack_irq(struct irq_data *d)
141
{
142
	intc_writel(INTC_CONTROL, 0x1);
143 144
}

145
static void omap_mask_ack_irq(struct irq_data *d)
146
{
147
	irq_gc_mask_disable_reg(d);
148
	omap_ack_irq(d);
149 150
}

151
static void __init omap_irq_soft_reset(void)
152 153 154
{
	unsigned long tmp;

155
	tmp = intc_readl(INTC_REVISION) & 0xff;
156

P
Paul Walmsley 已提交
157
	pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
158
		omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
159

160
	tmp = intc_readl(INTC_SYSCONFIG);
161
	tmp |= 1 << 1;	/* soft reset */
162
	intc_writel(INTC_SYSCONFIG, tmp);
163

164
	while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
165
		/* Wait for reset to complete */;
166 167

	/* Enable autoidle */
168
	intc_writel(INTC_SYSCONFIG, 1 << 0);
169 170
}

171 172
int omap_irq_pending(void)
{
173
	int i;
174

175 176
	for (i = 0; i < omap_nr_pending; i++)
		if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
177
			return 1;
178 179 180
	return 0;
}

181 182 183 184 185 186
void omap3_intc_suspend(void)
{
	/* A pending interrupt would prevent OMAP from entering suspend */
	omap_ack_irq(NULL);
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
{
	int ret;
	int i;

	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
			IRQ_LEVEL, 0);
	if (ret) {
		pr_warn("Failed to allocate irq chips\n");
		return ret;
	}

	for (i = 0; i < omap_nr_pending; i++) {
		struct irq_chip_generic *gc;
		struct irq_chip_type *ct;

		gc = irq_get_domain_generic_chip(d, 32 * i);
		gc->reg_base = base;
		ct = gc->chip_types;

		ct->type = IRQ_TYPE_LEVEL_MASK;

		ct->chip.irq_ack = omap_mask_ack_irq;
		ct->chip.irq_mask = irq_gc_mask_disable_reg;
		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;

		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;

		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
	}

	return 0;
}

static void __init omap_alloc_gc_legacy(void __iomem *base,
		unsigned int irq_start, unsigned int num)
225 226 227 228 229
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;

	gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
230
			handle_level_irq);
231 232 233 234
	ct = gc->chip_types;
	ct->chip.irq_ack = omap_mask_ack_irq;
	ct->chip.irq_mask = irq_gc_mask_disable_reg;
	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
235
	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
236 237 238 239

	ct->regs.enable = INTC_MIR_CLEAR0;
	ct->regs.disable = INTC_MIR_SET0;
	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
240
			IRQ_NOREQUEST | IRQ_NOPROBE, 0);
241 242
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
static int __init omap_init_irq_of(struct device_node *node)
{
	int ret;

	omap_irq_base = of_iomap(node, 0);
	if (WARN_ON(!omap_irq_base))
		return -ENOMEM;

	domain = irq_domain_add_linear(node, omap_nr_irqs,
			&irq_generic_chip_ops, NULL);

	omap_irq_soft_reset();

	ret = omap_alloc_gc_of(domain, omap_irq_base);
	if (ret < 0)
		irq_domain_remove(domain);

	return ret;
}

263
static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
264
{
265
	int j, irq_base;
266

267 268
	omap_irq_base = ioremap(base, SZ_4K);
	if (WARN_ON(!omap_irq_base))
269
		return -ENOMEM;
270

271
	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
272 273 274 275 276
	if (irq_base < 0) {
		pr_warn("Couldn't allocate IRQ numbers\n");
		irq_base = 0;
	}

277
	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
278
			&irq_domain_simple_ops, NULL);
279

280
	omap_irq_soft_reset();
281

282
	for (j = 0; j < omap_nr_irqs; j += 32)
283 284 285 286 287
		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);

	return 0;
}

288 289 290 291 292 293 294 295 296
static void __init omap_irq_enable_protection(void)
{
	u32 reg;

	reg = intc_readl(INTC_PROTECTION);
	reg |= INTC_PROTECTION_ENABLE;
	intc_writel(INTC_PROTECTION, reg);
}

297 298
static int __init omap_init_irq(u32 base, struct device_node *node)
{
299 300
	int ret;

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	/*
	 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
	 * depends is still not ready for linear IRQ domains; because of that
	 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
	 * linear IRQ Domain until that driver is finally fixed.
	 */
	if (of_device_is_compatible(node, "ti,omap2-intc") ||
			of_device_is_compatible(node, "ti,omap3-intc")) {
		struct resource res;

		if (of_address_to_resource(node, 0, &res))
			return -ENOMEM;

		base = res.start;
		ret = omap_init_irq_legacy(base, node);
	} else if (node) {
317
		ret = omap_init_irq_of(node);
318 319 320
	} else {
		ret = omap_init_irq_legacy(base, NULL);
	}
321 322 323 324 325

	if (ret == 0)
		omap_irq_enable_protection();

	return ret;
326 327
}

328 329
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
330
{
331
	extern unsigned long irq_err_count;
332
	u32 irqnr;
333

334
	irqnr = intc_readl(INTC_SIR);
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

	/*
	 * A spurious IRQ can result if interrupt that triggered the
	 * sorting is no longer active during the sorting (10 INTC
	 * functional clock cycles after interrupt assertion). Or a
	 * change in interrupt mask affected the result during sorting
	 * time. There is no special handling required except ignoring
	 * the SIR register value just read and retrying.
	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
	 *
	 * Many a times, a spurious interrupt situation has been fixed
	 * by adding a flush for the posted write acking the IRQ in
	 * the device driver. Typically, this is going be the device
	 * driver whose interrupt was handled just before the spurious
	 * IRQ occurred. Pay attention to those device drivers if you
	 * run into hitting the spurious IRQ condition below.
	 */
	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
		pr_err_once("%s: spurious irq!\n", __func__);
		irq_err_count++;
		omap_ack_irq(NULL);
		return;
	}

359 360
	irqnr &= ACTIVEIRQ_MASK;
	handle_domain_irq(domain, irqnr, regs);
361 362
}

363
static int __init intc_of_init(struct device_node *node,
364 365
			     struct device_node *parent)
{
366
	int ret;
367

368
	omap_nr_pending = 3;
369
	omap_nr_irqs = 96;
370 371 372 373

	if (WARN_ON(!node))
		return -ENODEV;

374 375 376
	if (of_device_is_compatible(node, "ti,dm814-intc") ||
	    of_device_is_compatible(node, "ti,dm816-intc") ||
	    of_device_is_compatible(node, "ti,am33xx-intc")) {
377
		omap_nr_irqs = 128;
378 379
		omap_nr_pending = 4;
	}
380

381 382 383
	ret = omap_init_irq(-1, of_node_get(node));
	if (ret < 0)
		return ret;
384

385
	set_handle_irq(omap_intc_handle_irq);
386

387 388 389
	return 0;
}

390 391
IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
392 393
IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
394
IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);