irq.c 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 *
 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
 */

#include <linux/interrupt.h>
#include <linux/ioport.h>
12 13 14 15 16
#include <linux/sched.h>
#include <linux/irqdomain.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
17 18 19 20 21 22 23

#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>

#include <lantiq_soc.h>
#include <irq.h>

24
/* register definitions - internal irqs */
25 26 27 28 29 30 31 32
#define LTQ_ICU_IM0_ISR		0x0000
#define LTQ_ICU_IM0_IER		0x0008
#define LTQ_ICU_IM0_IOSR	0x0010
#define LTQ_ICU_IM0_IRSR	0x0018
#define LTQ_ICU_IM0_IMR		0x0020
#define LTQ_ICU_IM1_ISR		0x0028
#define LTQ_ICU_OFFSET		(LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)

33
/* register definitions - external irqs */
34 35
#define LTQ_EIU_EXIN_C		0x0000
#define LTQ_EIU_EXIN_INIC	0x0004
36
#define LTQ_EIU_EXIN_INC	0x0008
37 38
#define LTQ_EIU_EXIN_INEN	0x000C

39
/* number of external interrupts */
40 41
#define MAX_EIU			6

42 43 44
/* the performance counter */
#define LTQ_PERF_IRQ		(INT_NUM_IM4_IRL0 + 31)

45 46
/*
 * irqs generated by devices attached to the EBU need to be acked in
47 48 49 50
 * a special manner
 */
#define LTQ_ICU_EBU_IRQ		22

51 52
#define ltq_icu_w32(m, x, y)	ltq_w32((x), ltq_icu_membase[m] + (y))
#define ltq_icu_r32(m, x)	ltq_r32(ltq_icu_membase[m] + (x))
53 54 55 56

#define ltq_eiu_w32(x, y)	ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x)		ltq_r32(ltq_eiu_membase + (x))

57 58 59 60
/* our 2 ipi interrupts for VSMP */
#define MIPS_CPU_IPI_RESCHED_IRQ	0
#define MIPS_CPU_IPI_CALL_IRQ		1

61 62 63
/* we have a cascade of 8 irqs */
#define MIPS_CPU_IRQ_CASCADE		8

R
Ralf Baechle 已提交
64
#ifdef CONFIG_MIPS_MT_SMP
65 66 67
int gic_present;
#endif

68
static int exin_avail;
69
static struct resource ltq_eiu_irq[MAX_EIU];
70
static void __iomem *ltq_icu_membase[MAX_IM];
71
static void __iomem *ltq_eiu_membase;
72
static struct irq_domain *ltq_domain;
73

74 75 76 77 78 79 80
int ltq_eiu_get_irq(int exin)
{
	if (exin < exin_avail)
		return ltq_eiu_irq[exin].start;
	return -1;
}

81 82 83
void ltq_disable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
84
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
85
	int im = offset / INT_NUM_IM_OFFSET;
86

87
	offset %= INT_NUM_IM_OFFSET;
88
	ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
89 90 91 92 93 94
}

void ltq_mask_and_ack_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
	u32 isr = LTQ_ICU_IM0_ISR;
95
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
96
	int im = offset / INT_NUM_IM_OFFSET;
97

98
	offset %= INT_NUM_IM_OFFSET;
99 100
	ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
	ltq_icu_w32(im, BIT(offset), isr);
101 102 103 104 105
}

static void ltq_ack_irq(struct irq_data *d)
{
	u32 isr = LTQ_ICU_IM0_ISR;
106
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
107
	int im = offset / INT_NUM_IM_OFFSET;
108

109
	offset %= INT_NUM_IM_OFFSET;
110
	ltq_icu_w32(im, BIT(offset), isr);
111 112 113 114 115
}

void ltq_enable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
116
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
117
	int im = offset / INT_NUM_IM_OFFSET;
118

119
	offset %= INT_NUM_IM_OFFSET;
120
	ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
121 122
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
	int i;

	for (i = 0; i < MAX_EIU; i++) {
		if (d->hwirq == ltq_eiu_irq[i].start) {
			int val = 0;
			int edge = 0;

			switch (type) {
			case IRQF_TRIGGER_NONE:
				break;
			case IRQF_TRIGGER_RISING:
				val = 1;
				edge = 1;
				break;
			case IRQF_TRIGGER_FALLING:
				val = 2;
				edge = 1;
				break;
			case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
				val = 3;
				edge = 1;
				break;
			case IRQF_TRIGGER_HIGH:
				val = 5;
				break;
			case IRQF_TRIGGER_LOW:
				val = 6;
				break;
			default:
				pr_err("invalid type %d for irq %ld\n",
					type, d->hwirq);
				return -EINVAL;
			}

			if (edge)
				irq_set_handler(d->hwirq, handle_edge_irq);

			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
				(val << (i * 4)), LTQ_EIU_EXIN_C);
		}
	}

	return 0;
}

170 171 172 173 174 175
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_enable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
176 177 178
		if (d->hwirq == ltq_eiu_irq[i].start) {
			/* by default we are low level triggered */
			ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
179
			/* clear all pending */
180 181
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
				LTQ_EIU_EXIN_INC);
182
			/* enable */
183
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
184 185 186 187 188 189 190 191 192 193 194 195 196 197
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}

	return 0;
}

static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_disable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
198
		if (d->hwirq == ltq_eiu_irq[i].start) {
199
			/* disable */
200
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}
}

static struct irq_chip ltq_irq_type = {
	"icu",
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
};

static struct irq_chip ltq_eiu_type = {
	"eiu",
	.irq_startup = ltq_startup_eiu_irq,
	.irq_shutdown = ltq_shutdown_eiu_irq,
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
227
	.irq_set_type = ltq_eiu_settype,
228 229 230 231 232 233
};

static void ltq_hw_irqdispatch(int module)
{
	u32 irq;

234
	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
235 236 237
	if (irq == 0)
		return;

238 239
	/*
	 * silicon bug causes only the msb set to 1 to be valid. all
240 241 242
	 * other bits might be bogus
	 */
	irq = __fls(irq);
243
	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
244 245

	/* if this is a EBU irq, we need to ack it or get a deadlock */
246
	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
		ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
			LTQ_EBU_PCC_ISTAT);
}

#define DEFINE_HWx_IRQDISPATCH(x)					\
	static void ltq_hw ## x ## _irqdispatch(void)			\
	{								\
		ltq_hw_irqdispatch(x);					\
	}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)

262
#if MIPS_CPU_TIMER_IRQ == 7
263 264 265 266
static void ltq_hw5_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_TIMER_IRQ);
}
267 268 269
#else
DEFINE_HWx_IRQDISPATCH(5)
#endif
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
#ifdef CONFIG_MIPS_MT_SMP
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
	setup_irq(irq, action);
	irq_set_handler(irq, handle_percpu_irq);
}

static void ltq_sw0_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}

static void ltq_sw1_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();
	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
	smp_call_function_interrupt();
	return IRQ_HANDLED;
}

static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI_resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI_call"
};
#endif

312 313 314 315 316
asmlinkage void plat_irq_dispatch(void)
{
	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
	unsigned int i;

317
	if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
318 319 320
		do_IRQ(MIPS_CPU_TIMER_IRQ);
		goto out;
	} else {
321
		for (i = 0; i < MAX_IM; i++) {
322 323 324 325 326 327 328 329 330 331 332 333
			if (pending & (CAUSEF_IP2 << i)) {
				ltq_hw_irqdispatch(i);
				goto out;
			}
		}
	}
	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());

out:
	return;
}

334 335 336 337 338
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
	struct irq_chip *chip = &ltq_irq_type;
	int i;

339 340 341
	if (hw < MIPS_CPU_IRQ_CASCADE)
		return 0;

342
	for (i = 0; i < exin_avail; i++)
343
		if (hw == ltq_eiu_irq[i].start)
344 345 346 347 348 349 350 351 352 353 354 355
			chip = &ltq_eiu_type;

	irq_set_chip_and_handler(hw, chip, handle_level_irq);

	return 0;
}

static const struct irq_domain_ops irq_domain_ops = {
	.xlate = irq_domain_xlate_onetwocell,
	.map = icu_map,
};

356 357 358 359 360
static struct irqaction cascade = {
	.handler = no_action,
	.name = "cascade",
};

361
int __init icu_of_init(struct device_node *node, struct device_node *parent)
362
{
363 364
	struct device_node *eiu_node;
	struct resource res;
365
	int i, ret;
366

367 368 369
	for (i = 0; i < MAX_IM; i++) {
		if (of_address_to_resource(node, i, &res))
			panic("Failed to get icu memory range");
370

371 372 373
		if (request_mem_region(res.start, resource_size(&res),
					res.name) < 0)
			pr_err("Failed to request icu memory");
374

375 376 377 378 379
		ltq_icu_membase[i] = ioremap_nocache(res.start,
					resource_size(&res));
		if (!ltq_icu_membase[i])
			panic("Failed to remap icu memory");
	}
380

381
	/* the external interrupts are optional and xway only */
382
	eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
383
	if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
384
		/* find out how many external irq sources we have */
385
		exin_avail = of_irq_count(eiu_node);
386 387 388 389

		if (exin_avail > MAX_EIU)
			exin_avail = MAX_EIU;

390 391 392
		ret = of_irq_to_resource_table(eiu_node,
						ltq_eiu_irq, exin_avail);
		if (ret != exin_avail)
393
			panic("failed to load external irq resources");
394

395 396 397 398 399 400 401 402 403
		if (request_mem_region(res.start, resource_size(&res),
							res.name) < 0)
			pr_err("Failed to request eiu memory");

		ltq_eiu_membase = ioremap_nocache(res.start,
							resource_size(&res));
		if (!ltq_eiu_membase)
			panic("Failed to remap eiu memory");
	}
404

405
	/* turn off all irqs by default */
406
	for (i = 0; i < MAX_IM; i++) {
407
		/* make sure all irqs are turned off by default */
408
		ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
409
		/* clear all possibly pending interrupts */
410
		ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
411
	}
412 413 414

	mips_cpu_irq_init();

415 416
	for (i = 0; i < MAX_IM; i++)
		setup_irq(i + 2, &cascade);
417 418 419 420 421 422 423 424 425 426 427

	if (cpu_has_vint) {
		pr_info("Setting up vectored interrupts\n");
		set_vi_handler(2, ltq_hw0_irqdispatch);
		set_vi_handler(3, ltq_hw1_irqdispatch);
		set_vi_handler(4, ltq_hw2_irqdispatch);
		set_vi_handler(5, ltq_hw3_irqdispatch);
		set_vi_handler(6, ltq_hw4_irqdispatch);
		set_vi_handler(7, ltq_hw5_irqdispatch);
	}

428
	ltq_domain = irq_domain_add_linear(node,
429
		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
430
		&irq_domain_ops, 0);
431

432 433 434 435 436 437 438 439 440 441 442
#if defined(CONFIG_MIPS_MT_SMP)
	if (cpu_has_vint) {
		pr_info("Setting up IPI vectored interrupts\n");
		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
	}
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
		&irq_resched);
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
#endif

R
Ralf Baechle 已提交
443
#ifndef CONFIG_MIPS_MT_SMP
444 445 446 447 448 449
	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
450 451

	/* tell oprofile which irq to use */
452
	cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
453 454 455 456 457 458 459 460

	/*
	 * if the timer irq is not one of the mips irqs we need to
	 * create a mapping
	 */
	if (MIPS_CPU_TIMER_IRQ != 7)
		irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);

461
	return 0;
462 463
}

464
unsigned int get_c0_compare_int(void)
465
{
466
	return MIPS_CPU_TIMER_IRQ;
467
}
468 469 470 471 472 473 474 475 476 477

static struct of_device_id __initdata of_irq_ids[] = {
	{ .compatible = "lantiq,icu", .data = icu_of_init },
	{},
};

void __init arch_init_irq(void)
{
	of_irq_init(of_irq_ids);
}