irq.c 13.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
14
#include <linux/irq.h>
15
#include <linux/spinlock.h>
16 17 18 19 20 21 22
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_irq.h>

23

24 25 26
static DEFINE_SPINLOCK(ipic_lock);
static DEFINE_SPINLOCK(epic_lock);

27 28
static u32 irq_stat_addr[2];
static u32 irq_mask_addr[2];
29
static void (*dispatch_internal)(int cpu);
30
static int is_ext_irq_cascaded;
31
static unsigned int ext_irq_count;
32
static unsigned int ext_irq_start, ext_irq_end;
33
static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
34 35
static void (*internal_irq_mask)(struct irq_data *d);
static void (*internal_irq_unmask)(struct irq_data *d);
36 37


38 39 40 41 42 43 44
static inline u32 get_ext_irq_perf_reg(int irq)
{
	if (irq < 4)
		return ext_irq_cfg_reg1;
	return ext_irq_cfg_reg2;
}

45 46
static inline void handle_internal(int intbit)
{
47 48 49 50 51
	if (is_ext_irq_cascaded &&
	    intbit >= ext_irq_start && intbit <= ext_irq_end)
		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
	else
		do_IRQ(intbit + IRQ_INTERNAL_BASE);
52 53
}

54 55 56 57 58 59 60
/*
 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 * prioritize any interrupt relatively to another. the static counter
 * will resume the loop where it ended the last time we left this
 * function.
 */

61
#define BUILD_IPIC_INTERNAL(width)					\
62
void __dispatch_internal_##width(int cpu)				\
63 64 65 66
{									\
	u32 pending[width / 32];					\
	unsigned int src, tgt;						\
	bool irqs_pending = false;					\
67 68
	static unsigned int i[2];					\
	unsigned int *next = &i[cpu];					\
69
	unsigned long flags;						\
70 71
									\
	/* read registers in reverse order */				\
72
	spin_lock_irqsave(&ipic_lock, flags);				\
73 74 75
	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
		u32 val;						\
									\
76 77
		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
78 79 80 81 82
		pending[--tgt] = val;					\
									\
		if (val)						\
			irqs_pending = true;				\
	}								\
83
	spin_unlock_irqrestore(&ipic_lock, flags);			\
84 85 86 87 88
									\
	if (!irqs_pending)						\
		return;							\
									\
	while (1) {							\
89
		unsigned int to_call = *next;				\
90
									\
91
		*next = (*next + 1) & (width - 1);			\
92 93 94 95 96 97 98
		if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {	\
			handle_internal(to_call);			\
			break;						\
		}							\
	}								\
}									\
									\
99
static void __internal_irq_mask_##width(struct irq_data *d)		\
100 101
{									\
	u32 val;							\
102
	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
103 104
	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
	unsigned bit = irq & 0x1f;					\
105
	unsigned long flags;						\
106
	int cpu;							\
107
									\
108
	spin_lock_irqsave(&ipic_lock, flags);				\
109 110 111 112 113 114 115 116
	for_each_present_cpu(cpu) {					\
		if (!irq_mask_addr[cpu])				\
			break;						\
									\
		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
		val &= ~(1 << bit);					\
		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
	}								\
117
	spin_unlock_irqrestore(&ipic_lock, flags);			\
118 119
}									\
									\
120
static void __internal_irq_unmask_##width(struct irq_data *d)		\
121 122
{									\
	u32 val;							\
123
	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
124 125
	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
	unsigned bit = irq & 0x1f;					\
126
	unsigned long flags;						\
127
	int cpu;							\
128
									\
129
	spin_lock_irqsave(&ipic_lock, flags);				\
130 131 132 133 134 135 136 137 138 139 140
	for_each_present_cpu(cpu) {					\
		if (!irq_mask_addr[cpu])				\
			break;						\
									\
		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
		if (cpu_online(cpu))					\
			val |= (1 << bit);				\
		else							\
			val &= ~(1 << bit);				\
		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
	}								\
141
	spin_unlock_irqrestore(&ipic_lock, flags);			\
142 143
}

144 145
BUILD_IPIC_INTERNAL(32);
BUILD_IPIC_INTERNAL(64);
146

147 148 149 150 151 152 153 154 155 156 157 158
asmlinkage void plat_irq_dispatch(void)
{
	u32 cause;

	do {
		cause = read_c0_cause() & read_c0_status() & ST0_IM;

		if (!cause)
			break;

		if (cause & CAUSEF_IP7)
			do_IRQ(7);
159 160 161 162
		if (cause & CAUSEF_IP0)
			do_IRQ(0);
		if (cause & CAUSEF_IP1)
			do_IRQ(1);
163
		if (cause & CAUSEF_IP2)
164
			dispatch_internal(0);
165 166 167 168
		if (is_ext_irq_cascaded) {
			if (cause & CAUSEF_IP3)
				dispatch_internal(1);
		} else {
169 170 171 172 173 174 175 176 177
			if (cause & CAUSEF_IP3)
				do_IRQ(IRQ_EXT_0);
			if (cause & CAUSEF_IP4)
				do_IRQ(IRQ_EXT_1);
			if (cause & CAUSEF_IP5)
				do_IRQ(IRQ_EXT_2);
			if (cause & CAUSEF_IP6)
				do_IRQ(IRQ_EXT_3);
		}
178 179 180 181 182 183 184
	} while (1);
}

/*
 * internal IRQs operations: only mask/unmask on PERF irq mask
 * register.
 */
185 186
static void bcm63xx_internal_irq_mask(struct irq_data *d)
{
187
	internal_irq_mask(d);
188 189 190 191
}

static void bcm63xx_internal_irq_unmask(struct irq_data *d)
{
192
	internal_irq_unmask(d);
193 194
}

195 196 197 198
/*
 * external IRQs operations: mask/unmask and clear on PERF external
 * irq control register.
 */
199
static void bcm63xx_external_irq_mask(struct irq_data *d)
200
{
201
	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
202
	u32 reg, regaddr;
203
	unsigned long flags;
204

205
	regaddr = get_ext_irq_perf_reg(irq);
206
	spin_lock_irqsave(&epic_lock, flags);
207 208 209 210 211 212 213 214
	reg = bcm_perf_readl(regaddr);

	if (BCMCPU_IS_6348())
		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
	else
		reg &= ~EXTIRQ_CFG_MASK(irq % 4);

	bcm_perf_writel(reg, regaddr);
215 216
	spin_unlock_irqrestore(&epic_lock, flags);

217
	if (is_ext_irq_cascaded)
218
		internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
219 220
}

221
static void bcm63xx_external_irq_unmask(struct irq_data *d)
222
{
223
	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
224
	u32 reg, regaddr;
225
	unsigned long flags;
226 227

	regaddr = get_ext_irq_perf_reg(irq);
228
	spin_lock_irqsave(&epic_lock, flags);
229 230 231 232 233 234 235 236
	reg = bcm_perf_readl(regaddr);

	if (BCMCPU_IS_6348())
		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
	else
		reg |= EXTIRQ_CFG_MASK(irq % 4);

	bcm_perf_writel(reg, regaddr);
237
	spin_unlock_irqrestore(&epic_lock, flags);
238

239
	if (is_ext_irq_cascaded)
240
		internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start));
241 242
}

243
static void bcm63xx_external_irq_clear(struct irq_data *d)
244
{
245
	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
246
	u32 reg, regaddr;
247
	unsigned long flags;
248 249

	regaddr = get_ext_irq_perf_reg(irq);
250
	spin_lock_irqsave(&epic_lock, flags);
251
	reg = bcm_perf_readl(regaddr);
252

253 254 255 256 257 258
	if (BCMCPU_IS_6348())
		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
	else
		reg |= EXTIRQ_CFG_CLEAR(irq % 4);

	bcm_perf_writel(reg, regaddr);
259
	spin_unlock_irqrestore(&epic_lock, flags);
260 261
}

262
static int bcm63xx_external_irq_set_type(struct irq_data *d,
263 264
					 unsigned int flow_type)
{
265
	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
266 267
	u32 reg, regaddr;
	int levelsense, sense, bothedge;
268
	unsigned long flags;
269 270 271 272 273 274

	flow_type &= IRQ_TYPE_SENSE_MASK;

	if (flow_type == IRQ_TYPE_NONE)
		flow_type = IRQ_TYPE_LEVEL_LOW;

275
	levelsense = sense = bothedge = 0;
276 277
	switch (flow_type) {
	case IRQ_TYPE_EDGE_BOTH:
278
		bothedge = 1;
279 280 281
		break;

	case IRQ_TYPE_EDGE_RISING:
282
		sense = 1;
283 284 285 286 287 288
		break;

	case IRQ_TYPE_EDGE_FALLING:
		break;

	case IRQ_TYPE_LEVEL_HIGH:
289 290
		levelsense = 1;
		sense = 1;
291 292 293
		break;

	case IRQ_TYPE_LEVEL_LOW:
294
		levelsense = 1;
295 296 297 298 299 300
		break;

	default:
		printk(KERN_ERR "bogus flow type combination given !\n");
		return -EINVAL;
	}
301 302

	regaddr = get_ext_irq_perf_reg(irq);
303
	spin_lock_irqsave(&epic_lock, flags);
304 305 306
	reg = bcm_perf_readl(regaddr);
	irq %= 4;

307 308
	switch (bcm63xx_get_cpu_id()) {
	case BCM6348_CPU_ID:
309 310 311 312 313 314 315 316 317 318 319 320
		if (levelsense)
			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
		if (sense)
			reg |= EXTIRQ_CFG_SENSE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
		if (bothedge)
			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
		else
			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
321
		break;
322

323
	case BCM3368_CPU_ID:
324 325 326 327
	case BCM6328_CPU_ID:
	case BCM6338_CPU_ID:
	case BCM6345_CPU_ID:
	case BCM6358_CPU_ID:
328
	case BCM6362_CPU_ID:
329
	case BCM6368_CPU_ID:
330 331 332 333 334 335 336 337 338 339 340 341
		if (levelsense)
			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
		else
			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
		if (sense)
			reg |= EXTIRQ_CFG_SENSE(irq);
		else
			reg &= ~EXTIRQ_CFG_SENSE(irq);
		if (bothedge)
			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
		else
			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
342 343 344
		break;
	default:
		BUG();
345 346 347
	}

	bcm_perf_writel(reg, regaddr);
348
	spin_unlock_irqrestore(&epic_lock, flags);
349

350 351 352 353 354
	irqd_set_trigger_type(d, flow_type);
	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
		__irq_set_handler_locked(d->irq, handle_level_irq);
	else
		__irq_set_handler_locked(d->irq, handle_edge_irq);
355

356
	return IRQ_SET_MASK_OK_NOCOPY;
357 358 359 360
}

static struct irq_chip bcm63xx_internal_irq_chip = {
	.name		= "bcm63xx_ipic",
361 362
	.irq_mask	= bcm63xx_internal_irq_mask,
	.irq_unmask	= bcm63xx_internal_irq_unmask,
363 364 365 366
};

static struct irq_chip bcm63xx_external_irq_chip = {
	.name		= "bcm63xx_epic",
367
	.irq_ack	= bcm63xx_external_irq_clear,
368

369 370
	.irq_mask	= bcm63xx_external_irq_mask,
	.irq_unmask	= bcm63xx_external_irq_unmask,
371

372
	.irq_set_type	= bcm63xx_external_irq_set_type,
373 374 375 376 377
};

static struct irqaction cpu_ip2_cascade_action = {
	.handler	= no_action,
	.name		= "cascade_ip2",
378
	.flags		= IRQF_NO_THREAD,
379 380
};

381 382 383 384 385 386 387 388
#ifdef CONFIG_SMP
static struct irqaction cpu_ip3_cascade_action = {
	.handler	= no_action,
	.name		= "cascade_ip3",
	.flags		= IRQF_NO_THREAD,
};
#endif

389 390 391 392 393 394
static struct irqaction cpu_ext_cascade_action = {
	.handler	= no_action,
	.name		= "cascade_extirq",
	.flags		= IRQF_NO_THREAD,
};

395 396 397 398
static void bcm63xx_init_irq(void)
{
	int irq_bits;

399 400
	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
401 402
	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
403 404 405

	switch (bcm63xx_get_cpu_id()) {
	case BCM3368_CPU_ID:
406 407
		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
408 409
		irq_stat_addr[1] = 0;
		irq_stat_addr[1] = 0;
410 411 412 413 414
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
		break;
	case BCM6328_CPU_ID:
415 416
		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
417 418
		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
		irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
419 420 421 422 423 424 425 426
		irq_bits = 64;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
		break;
	case BCM6338_CPU_ID:
427 428
		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
429 430
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
431 432 433 434 435
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
		break;
	case BCM6345_CPU_ID:
436 437
		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
438 439
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
440 441 442 443 444
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
		break;
	case BCM6348_CPU_ID:
445 446
		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
447 448
		irq_stat_addr[1] = 0;
		irq_mask_addr[1] = 0;
449 450 451 452 453
		irq_bits = 32;
		ext_irq_count = 4;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
		break;
	case BCM6358_CPU_ID:
454 455
		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
456 457
		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
458 459 460 461 462 463 464 465
		irq_bits = 32;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
		break;
	case BCM6362_CPU_ID:
466 467
		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
468 469
		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
470 471 472 473 474 475 476 477
		irq_bits = 64;
		ext_irq_count = 4;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
		break;
	case BCM6368_CPU_ID:
478 479
		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
480 481
		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
		irq_bits = 64;
		ext_irq_count = 6;
		is_ext_irq_cascaded = 1;
		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
		break;
	default:
		BUG();
	}

	if (irq_bits == 32) {
		dispatch_internal = __dispatch_internal_32;
		internal_irq_mask = __internal_irq_mask_32;
		internal_irq_unmask = __internal_irq_unmask_32;
	} else {
		dispatch_internal = __dispatch_internal_64;
		internal_irq_mask = __internal_irq_mask_64;
		internal_irq_unmask = __internal_irq_unmask_64;
	}
}

505 506 507 508
void __init arch_init_irq(void)
{
	int i;

509
	bcm63xx_init_irq();
510 511
	mips_cpu_irq_init();
	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
512
		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
513 514
					 handle_level_irq);

515
	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
516
		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
517 518
					 handle_edge_irq);

519
	if (!is_ext_irq_cascaded) {
520
		for (i = 3; i < 3 + ext_irq_count; ++i)
521 522 523 524
			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
	}

	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
525 526 527 528
#ifdef CONFIG_SMP
	if (is_ext_irq_cascaded)
		setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
#endif
529
}