irq.c 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 *
 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
 */

#include <linux/interrupt.h>
#include <linux/ioport.h>

#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>

#include <lantiq_soc.h>
#include <irq.h>

/* register definitions */
#define LTQ_ICU_IM0_ISR		0x0000
#define LTQ_ICU_IM0_IER		0x0008
#define LTQ_ICU_IM0_IOSR	0x0010
#define LTQ_ICU_IM0_IRSR	0x0018
#define LTQ_ICU_IM0_IMR		0x0020
#define LTQ_ICU_IM1_ISR		0x0028
#define LTQ_ICU_OFFSET		(LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)

#define LTQ_EIU_EXIN_C		0x0000
#define LTQ_EIU_EXIN_INIC	0x0004
#define LTQ_EIU_EXIN_INEN	0x000C

/* irq numbers used by the external interrupt unit (EIU) */
#define LTQ_EIU_IR0		(INT_NUM_IM4_IRL0 + 30)
#define LTQ_EIU_IR1		(INT_NUM_IM3_IRL0 + 31)
#define LTQ_EIU_IR2		(INT_NUM_IM1_IRL0 + 26)
#define LTQ_EIU_IR3		INT_NUM_IM1_IRL0
#define LTQ_EIU_IR4		(INT_NUM_IM1_IRL0 + 1)
#define LTQ_EIU_IR5		(INT_NUM_IM1_IRL0 + 2)
#define LTQ_EIU_IR6		(INT_NUM_IM2_IRL0 + 30)

#define MAX_EIU			6

43 44 45
/* the performance counter */
#define LTQ_PERF_IRQ		(INT_NUM_IM4_IRL0 + 31)

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/* irqs generated by device attached to the EBU need to be acked in
 * a special manner
 */
#define LTQ_ICU_EBU_IRQ		22

#define ltq_icu_w32(x, y)	ltq_w32((x), ltq_icu_membase + (y))
#define ltq_icu_r32(x)		ltq_r32(ltq_icu_membase + (x))

#define ltq_eiu_w32(x, y)	ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x)		ltq_r32(ltq_eiu_membase + (x))

static unsigned short ltq_eiu_irq[MAX_EIU] = {
	LTQ_EIU_IR0,
	LTQ_EIU_IR1,
	LTQ_EIU_IR2,
	LTQ_EIU_IR3,
	LTQ_EIU_IR4,
	LTQ_EIU_IR5,
};

static struct resource ltq_icu_resource = {
	.name	= "icu",
	.start	= LTQ_ICU_BASE_ADDR,
	.end	= LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
	.flags	= IORESOURCE_MEM,
};

static struct resource ltq_eiu_resource = {
	.name	= "eiu",
	.start	= LTQ_EIU_BASE_ADDR,
	.end	= LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
	.flags	= IORESOURCE_MEM,
};

static void __iomem *ltq_icu_membase;
static void __iomem *ltq_eiu_membase;

void ltq_disable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
	int irq_nr = d->irq - INT_NUM_IRQ0;

	ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
	irq_nr %= INT_NUM_IM_OFFSET;
	ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
}

void ltq_mask_and_ack_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
	u32 isr = LTQ_ICU_IM0_ISR;
	int irq_nr = d->irq - INT_NUM_IRQ0;

	ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
	isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
	irq_nr %= INT_NUM_IM_OFFSET;
	ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
	ltq_icu_w32((1 << irq_nr), isr);
}

static void ltq_ack_irq(struct irq_data *d)
{
	u32 isr = LTQ_ICU_IM0_ISR;
	int irq_nr = d->irq - INT_NUM_IRQ0;

	isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
	irq_nr %= INT_NUM_IM_OFFSET;
	ltq_icu_w32((1 << irq_nr), isr);
}

void ltq_enable_irq(struct irq_data *d)
{
	u32 ier = LTQ_ICU_IM0_IER;
	int irq_nr = d->irq - INT_NUM_IRQ0;

	ier += LTQ_ICU_OFFSET  * (irq_nr / INT_NUM_IM_OFFSET);
	irq_nr %= INT_NUM_IM_OFFSET;
	ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
}

static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_enable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
132
		if (d->irq == ltq_eiu_irq[i]) {
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
			/* low level - we should really handle set_type */
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
				(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
			/* clear all pending */
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
				LTQ_EIU_EXIN_INIC);
			/* enable */
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}

	return 0;
}

static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
	int i;

	ltq_disable_irq(d);
	for (i = 0; i < MAX_EIU; i++) {
155
		if (d->irq == ltq_eiu_irq[i]) {
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
			/* disable */
			ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
				LTQ_EIU_EXIN_INEN);
			break;
		}
	}
}

static struct irq_chip ltq_irq_type = {
	"icu",
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
};

static struct irq_chip ltq_eiu_type = {
	"eiu",
	.irq_startup = ltq_startup_eiu_irq,
	.irq_shutdown = ltq_shutdown_eiu_irq,
	.irq_enable = ltq_enable_irq,
	.irq_disable = ltq_disable_irq,
	.irq_unmask = ltq_enable_irq,
	.irq_ack = ltq_ack_irq,
	.irq_mask = ltq_disable_irq,
	.irq_mask_ack = ltq_mask_and_ack_irq,
};

static void ltq_hw_irqdispatch(int module)
{
	u32 irq;

	irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET));
	if (irq == 0)
		return;

	/* silicon bug causes only the msb set to 1 to be valid. all
	 * other bits might be bogus
	 */
	irq = __fls(irq);
	do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));

	/* if this is a EBU irq, we need to ack it or get a deadlock */
	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
		ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
			LTQ_EBU_PCC_ISTAT);
}

#define DEFINE_HWx_IRQDISPATCH(x)					\
	static void ltq_hw ## x ## _irqdispatch(void)			\
	{								\
		ltq_hw_irqdispatch(x);					\
	}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)

static void ltq_hw5_irqdispatch(void)
{
	do_IRQ(MIPS_CPU_TIMER_IRQ);
}

asmlinkage void plat_irq_dispatch(void)
{
	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
	unsigned int i;

	if (pending & CAUSEF_IP7) {
		do_IRQ(MIPS_CPU_TIMER_IRQ);
		goto out;
	} else {
		for (i = 0; i < 5; i++) {
			if (pending & (CAUSEF_IP2 << i)) {
				ltq_hw_irqdispatch(i);
				goto out;
			}
		}
	}
	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());

out:
	return;
}

static struct irqaction cascade = {
	.handler = no_action,
	.name = "cascade",
};

void __init arch_init_irq(void)
{
	int i;

	if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
254
		panic("Failed to insert icu memory");
255 256 257

	if (request_mem_region(ltq_icu_resource.start,
			resource_size(&ltq_icu_resource), "icu") < 0)
258
		panic("Failed to request icu memory");
259 260 261 262

	ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
				resource_size(&ltq_icu_resource));
	if (!ltq_icu_membase)
263
		panic("Failed to remap icu memory");
264 265

	if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
266
		panic("Failed to insert eiu memory");
267 268 269

	if (request_mem_region(ltq_eiu_resource.start,
			resource_size(&ltq_eiu_resource), "eiu") < 0)
270
		panic("Failed to request eiu memory");
271 272 273 274

	ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
				resource_size(&ltq_eiu_resource));
	if (!ltq_eiu_membase)
275
		panic("Failed to remap eiu memory");
276

277 278 279
	/* turn off all irqs by default */
	for (i = 0; i < 5; i++) {
		/* make sure all irqs are turned off by default */
280
		ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
281 282 283
		/* clear all possibly pending interrupts */
		ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
	}
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

	mips_cpu_irq_init();

	for (i = 2; i <= 6; i++)
		setup_irq(i, &cascade);

	if (cpu_has_vint) {
		pr_info("Setting up vectored interrupts\n");
		set_vi_handler(2, ltq_hw0_irqdispatch);
		set_vi_handler(3, ltq_hw1_irqdispatch);
		set_vi_handler(4, ltq_hw2_irqdispatch);
		set_vi_handler(5, ltq_hw3_irqdispatch);
		set_vi_handler(6, ltq_hw4_irqdispatch);
		set_vi_handler(7, ltq_hw5_irqdispatch);
	}

	for (i = INT_NUM_IRQ0;
		i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
		if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
			(i == LTQ_EIU_IR2))
			irq_set_chip_and_handler(i, &ltq_eiu_type,
				handle_level_irq);
		/* EIU3-5 only exist on ar9 and vr9 */
		else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
			(i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
			irq_set_chip_and_handler(i, &ltq_eiu_type,
				handle_level_irq);
		else
			irq_set_chip_and_handler(i, &ltq_irq_type,
				handle_level_irq);

#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
322 323 324

	/* tell oprofile which irq to use */
	cp0_perfcount_irq = LTQ_PERF_IRQ;
325 326 327 328 329 330
}

unsigned int __cpuinit get_c0_compare_int(void)
{
	return CP0_LEGACY_COMPARE_IRQ;
}