irq-mips-gic.c 21.5 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip/mips-gic.h>
15
#include <linux/of_address.h>
16
#include <linux/sched.h>
17
#include <linux/smp.h>
18

19
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
20 21
#include <asm/setup.h>
#include <asm/traps.h>
22

23 24 25 26
#include <dt-bindings/interrupt-controller/mips-gic.h>

#include "irqchip.h"

27
unsigned int gic_present;
S
Steven J. Hill 已提交
28

29
struct gic_pcpu_mask {
30
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 32
};

33
static void __iomem *gic_base;
34
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35
static DEFINE_SPINLOCK(gic_lock);
36
static struct irq_domain *gic_irq_domain;
37
static int gic_shared_intrs;
38
static int gic_vpes;
39
static unsigned int gic_cpu_pin;
40
static unsigned int timer_cpu_pin;
41
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42

43 44
static void __gic_irq_dispatch(void);

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
static inline unsigned int gic_read(unsigned int reg)
{
	return __raw_readl(gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned int val)
{
	__raw_writel(val, gic_base + reg);
}

static inline void gic_update_bits(unsigned int reg, unsigned int mask,
				   unsigned int val)
{
	unsigned int regval;

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
		  1 << GIC_INTR_BIT(intr));
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
		  1 << GIC_INTR_BIT(intr));
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
			pol << GIC_INTR_BIT(intr));
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
			trig << GIC_INTR_BIT(intr));
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
			1 << GIC_INTR_BIT(intr),
			dual << GIC_INTR_BIT(intr));
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

112
#ifdef CONFIG_CLKSRC_MIPS_GIC
113 114 115 116 117
cycle_t gic_read_count(void)
{
	unsigned int hi, hi2, lo;

	do {
118 119 120
		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
121 122 123 124
	} while (hi2 != hi);

	return (((cycle_t) hi) << 32) + lo;
}
125

126 127 128 129
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

130
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
131 132 133 134 135 136
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

137 138
void gic_write_compare(cycle_t cnt)
{
139
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
140
				(int)(cnt >> 32));
141
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
142 143 144
				(int)(cnt & 0xffffffff));
}

145 146 147 148 149 150
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

151 152
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
153
				(int)(cnt >> 32));
154
	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
155 156 157 158 159
				(int)(cnt & 0xffffffff));

	local_irq_restore(flags);
}

160 161 162 163
cycle_t gic_read_compare(void)
{
	unsigned int hi, lo;

164 165
	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
166 167 168

	return (((cycle_t) hi) << 32) + lo;
}
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

190 191
#endif

192 193 194 195 196 197 198 199
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

200
	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

216
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
217 218 219 220 221
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
222 223
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
224 225
}

226 227
void gic_send_ipi(unsigned int intr)
{
228
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
229 230
}

231 232 233 234 235 236 237 238 239 240 241
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
242
		/* Is the performance counter shared with the timer? */
243 244 245 246 247 248 249 250
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	/*
	 * Some cores claim the FDC is routable but it doesn't actually seem to
	 * be connected.
	 */
	switch (current_cpu_type()) {
	case CPU_INTERAPTIV:
	case CPU_PROAPTIV:
		return -1;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

274
static void gic_handle_shared_int(bool chained)
275
{
276
	unsigned int i, intr, virq;
277
	unsigned long *pcpu_mask;
278
	unsigned long pending_reg, intrmask_reg;
279 280
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
281 282 283 284

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

285 286
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
287

288
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
289 290 291 292
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
		pending_reg += 0x4;
		intrmask_reg += 0x4;
293 294
	}

295 296
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
297

298 299 300 301
	intr = find_first_bit(pending, gic_shared_intrs);
	while (intr != gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
302 303 304 305
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
306 307 308 309 310

		/* go to next pending bit */
		bitmap_clear(pending, intr, 1);
		intr = find_first_bit(pending, gic_shared_intrs);
	}
311 312
}

313
static void gic_mask_irq(struct irq_data *d)
314
{
315
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
316 317
}

318
static void gic_unmask_irq(struct irq_data *d)
319
{
320
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
321 322
}

323 324
static void gic_ack_irq(struct irq_data *d)
{
325
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
326

327
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
328 329
}

330 331
static int gic_set_type(struct irq_data *d, unsigned int type)
{
332
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
333 334 335 336 337 338
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
339 340 341
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
342 343 344
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
345 346 347
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
348 349 350 351
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
352 353
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
354 355 356
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
357 358 359
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
360 361 362 363
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
364 365 366
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
367 368 369 370 371
		is_edge = false;
		break;
	}

	if (is_edge) {
372 373 374
		__irq_set_chip_handler_name_locked(d->irq,
						   &gic_edge_irq_controller,
						   handle_edge_irq, NULL);
375
	} else {
376 377 378
		__irq_set_chip_handler_name_locked(d->irq,
						   &gic_level_irq_controller,
						   handle_level_irq, NULL);
379 380
	}
	spin_unlock_irqrestore(&gic_lock, flags);
381

382 383 384 385
	return 0;
}

#ifdef CONFIG_SMP
386 387
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
388
{
389
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
390 391 392 393
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

394
	cpumask_and(&tmp, cpumask, cpu_online_mask);
395
	if (cpumask_empty(&tmp))
396
		return -EINVAL;
397 398 399 400

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

401
	/* Re-route this IRQ */
402
	gic_map_to_vpe(irq, cpumask_first(&tmp));
403 404 405 406

	/* Update the pcpu_masks */
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
407
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
408

409
	cpumask_copy(d->affinity, cpumask);
410 411
	spin_unlock_irqrestore(&gic_lock, flags);

412
	return IRQ_SET_MASK_OK_NOCOPY;
413 414 415
}
#endif

416 417 418 419 420 421 422 423 424 425 426
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
427
	.name			=	"MIPS GIC",
428
	.irq_ack		=	gic_ack_irq,
429 430
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
431
	.irq_set_type		=	gic_set_type,
432
#ifdef CONFIG_SMP
433
	.irq_set_affinity	=	gic_set_affinity,
434 435 436
#endif
};

437
static void gic_handle_local_int(bool chained)
438 439
{
	unsigned long pending, masked;
440
	unsigned int intr, virq;
441

442 443
	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
444 445 446

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

447 448 449 450
	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	while (intr != GIC_NUM_LOCAL_INTRS) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
451 452 453 454
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
455 456 457 458 459

		/* go to next pending bit */
		bitmap_clear(&pending, intr, 1);
		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	}
460 461 462 463 464 465
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

466
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
467 468 469 470 471 472
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

473
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
490 491
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
492 493 494 495 496 497 498 499 500 501 502 503
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
504 505
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
506 507 508 509 510 511 512 513 514 515
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

516
static void __gic_irq_dispatch(void)
517
{
518 519
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
520
}
521

522 523
static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
{
524 525
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
526 527 528 529 530 531 532 533 534 535
}

#ifdef CONFIG_MIPS_GIC_IPI
static int gic_resched_int_base;
static int gic_call_int_base;

unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
{
	return gic_resched_int_base + cpu;
}
536

537 538 539 540
unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
{
	return gic_call_int_base + cpu;
}
541

542 543 544 545 546 547 548 549 550 551 552 553 554
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();

	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
	smp_call_function_interrupt();

	return IRQ_HANDLED;
}
J
Jeffrey Deans 已提交
555

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI call"
};

static __init void gic_ipi_init_one(unsigned int intr, int cpu,
				    struct irqaction *action)
{
571 572
	int virq = irq_create_mapping(gic_irq_domain,
				      GIC_SHARED_TO_HWIRQ(intr));
573 574
	int i;

575
	gic_map_to_vpe(intr, cpu);
576 577
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
J
Jeffrey Deans 已提交
578 579
	set_bit(intr, pcpu_masks[cpu].pcpu_mask);

580 581 582 583
	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);

	irq_set_handler(virq, handle_percpu_irq);
	setup_irq(virq, action);
584 585
}

586
static __init void gic_ipi_init(void)
587
{
588 589 590
	int i;

	/* Use last 2 * NR_CPUS interrupts as IPIs */
591
	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
592 593 594 595 596 597 598 599 600 601 602 603 604
	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;

	for (i = 0; i < nr_cpu_ids; i++) {
		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
	}
}
#else
static inline void gic_ipi_init(void)
{
}
#endif

605
static void __init gic_basic_init(void)
606 607
{
	unsigned int i;
S
Steven J. Hill 已提交
608 609

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
610 611

	/* Setup defaults */
612
	for (i = 0; i < gic_shared_intrs; i++) {
613 614 615
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
616 617
	}

618 619 620
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

621
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
622 623 624
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
625
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
626 627
		}
	}
628 629
}

630 631
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
632
{
633 634 635 636 637 638 639 640 641 642 643 644 645
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	/*
	 * HACK: These are all really percpu interrupts, but the rest
	 * of the MIPS kernel code does not use the percpu IRQ API for
	 * the CP0 timer and performance counter interrupts.
	 */
646 647 648 649 650 651 652 653 654
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		irq_set_chip_and_handler(virq,
					 &gic_all_vpes_local_irq_controller,
					 handle_percpu_irq);
		break;
	default:
655 656 657 658
		irq_set_chip_and_handler(virq,
					 &gic_local_irq_controller,
					 handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
659
		break;
660 661 662 663 664 665
	}

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

666
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
667 668 669

		switch (intr) {
		case GIC_LOCAL_INT_WD:
670
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
671 672
			break;
		case GIC_LOCAL_INT_COMPARE:
673
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
674 675
			break;
		case GIC_LOCAL_INT_TIMER:
676 677
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
678
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
679 680
			break;
		case GIC_LOCAL_INT_PERFCTR:
681
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
682 683
			break;
		case GIC_LOCAL_INT_SWINT0:
684
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
685 686
			break;
		case GIC_LOCAL_INT_SWINT1:
687
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
688 689
			break;
		case GIC_LOCAL_INT_FDC:
690
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
				     irq_hw_number_t hw)
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
707 708
	unsigned long flags;

709 710
	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
				 handle_level_irq);
711 712

	spin_lock_irqsave(&gic_lock, flags);
713
	gic_map_to_pin(intr, gic_cpu_pin);
714
	/* Map to VPE 0 by default */
715
	gic_map_to_vpe(intr, 0);
716
	set_bit(intr, pcpu_masks[0].pcpu_mask);
717 718 719 720 721
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

722 723 724 725 726 727 728 729
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hw)
{
	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
		return gic_local_irq_domain_map(d, virq, hw);
	return gic_shared_irq_domain_map(d, virq, hw);
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

749
static const struct irq_domain_ops gic_irq_domain_ops = {
750
	.map = gic_irq_domain_map,
751
	.xlate = gic_irq_domain_xlate,
752 753
};

754 755 756 757
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
758 759 760
{
	unsigned int gicconfig;

761
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
762

763
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
764
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
765
		   GIC_SH_CONFIG_NUMINTRS_SHF;
766
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
767

768
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
769
		  GIC_SH_CONFIG_NUMVPES_SHF;
770
	gic_vpes = gic_vpes + 1;
771

772 773 774
	if (cpu_has_veic) {
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
775
		timer_cpu_pin = gic_cpu_pin;
776 777 778 779 780 781
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
805 806
	}

807
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
808
					       gic_shared_intrs, irqbase,
809 810 811
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
812

813
	gic_basic_init();
814 815

	gic_ipi_init();
816
}
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);