irq.c 11.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 *
 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
 */

#include <linux/interrupt.h>
#include <linux/ioport.h>
12 13 14 15 16
#include <linux/sched.h>
#include <linux/irqdomain.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
17 18 19 20 21 22 23

#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>

#include <lantiq_soc.h>
#include <irq.h>

24
/* register definitions - internal irqs */
25 26 27 28 29 30 31 32
#define LTQ_ICU_IM0_ISR		0x0000
#define LTQ_ICU_IM0_IER		0x0008
#define LTQ_ICU_IM0_IOSR	0x0010
#define LTQ_ICU_IM0_IRSR	0x0018
#define LTQ_ICU_IM0_IMR		0x0020
#define LTQ_ICU_IM1_ISR		0x0028
#define LTQ_ICU_OFFSET		(LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)

33
/* register definitions - external irqs */
34 35
#define LTQ_EIU_EXIN_C		0x0000
#define LTQ_EIU_EXIN_INIC	0x0004
36
#define LTQ_EIU_EXIN_INC	0x0008
37 38
#define LTQ_EIU_EXIN_INEN	0x000C

39
/* number of external interrupts */
40 41
#define MAX_EIU			6

42 43 44
/* the performance counter */
#define LTQ_PERF_IRQ		(INT_NUM_IM4_IRL0 + 31)

45 46
/*
 * irqs generated by devices attached to the EBU need to be acked in
47 48 49 50
 * a special manner
 */
#define LTQ_ICU_EBU_IRQ		22

51 52
#define ltq_icu_w32(m, x, y)	ltq_w32((x), ltq_icu_membase[m] + (y))
#define ltq_icu_r32(m, x)	ltq_r32(ltq_icu_membase[m] + (x))
53 54 55 56

#define ltq_eiu_w32(x, y)	ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x)		ltq_r32(ltq_eiu_membase + (x))

57 58 59 60
/* our 2 ipi interrupts for VSMP */
#define MIPS_CPU_IPI_RESCHED_IRQ	0
#define MIPS_CPU_IPI_CALL_IRQ		1

61 62 63
/* we have a cascade of 8 irqs */
#define MIPS_CPU_IRQ_CASCADE		8

R
Ralf Baechle 已提交
64
#ifdef CONFIG_MIPS_MT_SMP
65 66 67
int gic_present;
#endif

68
static int exin_avail;
69
static struct resource ltq_eiu_irq[MAX_EIU];
70
static void __iomem *ltq_icu_membase[MAX_IM];
71
static void __iomem *ltq_eiu_membase;
72
static struct irq_domain *ltq_domain;
73
static int ltq_perfcount_irq;
74

75 76 77 78 79 80 81
int ltq_eiu_get_irq(int exin)
{
	if (exin < exin_avail)
		return ltq_eiu_irq[exin].start;
	return -1;
}

82 83 84
void ltq_disable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
85
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
86
	int im = offset / INT_NUM_IM_OFFSET;
87

88
	offset %= INT_NUM_IM_OFFSET;
89
	ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
90 91 92 93 94 95
}

void ltq_mask_and_ack_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
	u32 isr = LTQ_ICU_IM0_ISR;
96
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
97
	int im = offset / INT_NUM_IM_OFFSET;
98

99
	offset %= INT_NUM_IM_OFFSET;
100 101
	ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
	ltq_icu_w32(im, BIT(offset), isr);
102 103 104 105 106
}

static void ltq_ack_irq(struct irq_data *d)
{
	u32 isr = LTQ_ICU_IM0_ISR;
107
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
108
	int im = offset / INT_NUM_IM_OFFSET;
109

110
	offset %= INT_NUM_IM_OFFSET;
111
	ltq_icu_w32(im, BIT(offset), isr);
112 113 114 115 116
}

void ltq_enable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
117
	int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
118
	int im = offset / INT_NUM_IM_OFFSET;
119

120
	offset %= INT_NUM_IM_OFFSET;
121
	ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
122 123
}

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
	int i;

	for (i = 0; i < MAX_EIU; i++) {
		if (d->hwirq == ltq_eiu_irq[i].start) {
			int val = 0;
			int edge = 0;

			switch (type) {
			case IRQF_TRIGGER_NONE:
				break;
			case IRQF_TRIGGER_RISING:
				val = 1;
				edge = 1;
				break;
			case IRQF_TRIGGER_FALLING:
				val = 2;
				edge = 1;
				break;
			case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
				val = 3;
				edge = 1;
				break;
			case IRQF_TRIGGER_HIGH:
				val = 5;
				break;
			case IRQF_TRIGGER_LOW:
				val = 6;
				break;
			default:
				pr_err("invalid type %d for irq %ld\n",
					type, d->hwirq);
				return -EINVAL;
			}

			if (edge)
				irq_set_handler(d->hwirq, handle_edge_irq);

			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
				(val << (i * 4)), LTQ_EIU_EXIN_C);
		}
	}

	return 0;
}

171 172 173 174 175 176
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_enable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
177 178 179
		if (d->hwirq == ltq_eiu_irq[i].start) {
			/* by default we are low level triggered */
			ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
180
			/* clear all pending */
181 182
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
				LTQ_EIU_EXIN_INC);
183
			/* enable */
184
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
185 186 187 188 189 190 191 192 193 194 195 196 197 198
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}

	return 0;
}

static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_disable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
199
		if (d->hwirq == ltq_eiu_irq[i].start) {
200
			/* disable */
201
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}
}

static struct irq_chip ltq_irq_type = {
	"icu",
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
};

static struct irq_chip ltq_eiu_type = {
	"eiu",
	.irq_startup = ltq_startup_eiu_irq,
	.irq_shutdown = ltq_shutdown_eiu_irq,
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
228
	.irq_set_type = ltq_eiu_settype,
229 230 231 232 233 234
};

static void ltq_hw_irqdispatch(int module)
{
	u32 irq;

235
	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
236 237 238
	if (irq == 0)
		return;

239 240
	/*
	 * silicon bug causes only the msb set to 1 to be valid. all
241 242 243
	 * other bits might be bogus
	 */
	irq = __fls(irq);
244
	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
245 246

	/* if this is a EBU irq, we need to ack it or get a deadlock */
247
	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
		ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
			LTQ_EBU_PCC_ISTAT);
}

#define DEFINE_HWx_IRQDISPATCH(x)					\
	static void ltq_hw ## x ## _irqdispatch(void)			\
	{								\
		ltq_hw_irqdispatch(x);					\
	}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)

263
#if MIPS_CPU_TIMER_IRQ == 7
264 265 266 267
static void ltq_hw5_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_TIMER_IRQ);
}
268 269 270
#else
DEFINE_HWx_IRQDISPATCH(5)
#endif
271

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
#ifdef CONFIG_MIPS_MT_SMP
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
	setup_irq(irq, action);
	irq_set_handler(irq, handle_percpu_irq);
}

static void ltq_sw0_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}

static void ltq_sw1_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();
	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
	smp_call_function_interrupt();
	return IRQ_HANDLED;
}

static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI_resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI_call"
};
#endif

313 314 315 316 317
asmlinkage void plat_irq_dispatch(void)
{
	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
	unsigned int i;

318
	if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
319 320 321
		do_IRQ(MIPS_CPU_TIMER_IRQ);
		goto out;
	} else {
322
		for (i = 0; i < MAX_IM; i++) {
323 324 325 326 327 328 329 330 331 332 333 334
			if (pending & (CAUSEF_IP2 << i)) {
				ltq_hw_irqdispatch(i);
				goto out;
			}
		}
	}
	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());

out:
	return;
}

335 336 337 338 339
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
	struct irq_chip *chip = &ltq_irq_type;
	int i;

340 341 342
	if (hw < MIPS_CPU_IRQ_CASCADE)
		return 0;

343
	for (i = 0; i < exin_avail; i++)
344
		if (hw == ltq_eiu_irq[i].start)
345 346 347 348 349 350 351 352 353 354 355 356
			chip = &ltq_eiu_type;

	irq_set_chip_and_handler(hw, chip, handle_level_irq);

	return 0;
}

static const struct irq_domain_ops irq_domain_ops = {
	.xlate = irq_domain_xlate_onetwocell,
	.map = icu_map,
};

357 358 359 360 361
static struct irqaction cascade = {
	.handler = no_action,
	.name = "cascade",
};

362
int __init icu_of_init(struct device_node *node, struct device_node *parent)
363
{
364 365
	struct device_node *eiu_node;
	struct resource res;
366
	int i, ret;
367

368 369 370
	for (i = 0; i < MAX_IM; i++) {
		if (of_address_to_resource(node, i, &res))
			panic("Failed to get icu memory range");
371

372 373 374
		if (request_mem_region(res.start, resource_size(&res),
					res.name) < 0)
			pr_err("Failed to request icu memory");
375

376 377 378 379 380
		ltq_icu_membase[i] = ioremap_nocache(res.start,
					resource_size(&res));
		if (!ltq_icu_membase[i])
			panic("Failed to remap icu memory");
	}
381

382
	/* the external interrupts are optional and xway only */
383
	eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
384
	if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
385
		/* find out how many external irq sources we have */
386
		exin_avail = of_irq_count(eiu_node);
387 388 389 390

		if (exin_avail > MAX_EIU)
			exin_avail = MAX_EIU;

391 392 393
		ret = of_irq_to_resource_table(eiu_node,
						ltq_eiu_irq, exin_avail);
		if (ret != exin_avail)
394
			panic("failed to load external irq resources");
395

396 397 398 399 400 401 402 403 404
		if (request_mem_region(res.start, resource_size(&res),
							res.name) < 0)
			pr_err("Failed to request eiu memory");

		ltq_eiu_membase = ioremap_nocache(res.start,
							resource_size(&res));
		if (!ltq_eiu_membase)
			panic("Failed to remap eiu memory");
	}
405

406
	/* turn off all irqs by default */
407
	for (i = 0; i < MAX_IM; i++) {
408
		/* make sure all irqs are turned off by default */
409
		ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
410
		/* clear all possibly pending interrupts */
411
		ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
412
	}
413 414 415

	mips_cpu_irq_init();

416 417
	for (i = 0; i < MAX_IM; i++)
		setup_irq(i + 2, &cascade);
418 419 420 421 422 423 424 425 426 427 428

	if (cpu_has_vint) {
		pr_info("Setting up vectored interrupts\n");
		set_vi_handler(2, ltq_hw0_irqdispatch);
		set_vi_handler(3, ltq_hw1_irqdispatch);
		set_vi_handler(4, ltq_hw2_irqdispatch);
		set_vi_handler(5, ltq_hw3_irqdispatch);
		set_vi_handler(6, ltq_hw4_irqdispatch);
		set_vi_handler(7, ltq_hw5_irqdispatch);
	}

429
	ltq_domain = irq_domain_add_linear(node,
430
		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
431
		&irq_domain_ops, 0);
432

433 434 435 436 437 438 439 440 441 442 443
#if defined(CONFIG_MIPS_MT_SMP)
	if (cpu_has_vint) {
		pr_info("Setting up IPI vectored interrupts\n");
		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
	}
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
		&irq_resched);
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
#endif

R
Ralf Baechle 已提交
444
#ifndef CONFIG_MIPS_MT_SMP
445 446 447 448 449 450
	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
451 452

	/* tell oprofile which irq to use */
453
	ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
454 455 456 457 458 459 460 461

	/*
	 * if the timer irq is not one of the mips irqs we need to
	 * create a mapping
	 */
	if (MIPS_CPU_TIMER_IRQ != 7)
		irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);

462
	return 0;
463 464
}

465 466 467 468 469
int get_c0_perfcount_int(void)
{
	return ltq_perfcount_irq;
}

470
unsigned int get_c0_compare_int(void)
471
{
472
	return MIPS_CPU_TIMER_IRQ;
473
}
474 475 476 477 478 479 480 481 482 483

static struct of_device_id __initdata of_irq_ids[] = {
	{ .compatible = "lantiq,icu", .data = icu_of_init },
	{},
};

void __init arch_init_irq(void)
{
	of_irq_init(of_irq_ids);
}