irq-mips-gic.c 22.3 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
S
Steven J. Hill 已提交
27

28
struct gic_pcpu_mask {
29
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 31
};

32
static void __iomem *gic_base;
33
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
34
static DEFINE_SPINLOCK(gic_lock);
35
static struct irq_domain *gic_irq_domain;
36
static int gic_shared_intrs;
37
static int gic_vpes;
38
static unsigned int gic_cpu_pin;
39
static unsigned int timer_cpu_pin;
40
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41

42 43
static void __gic_irq_dispatch(void);

44
static inline u32 gic_read32(unsigned int reg)
45 46 47 48
{
	return __raw_readl(gic_base + reg);
}

49
static inline u64 gic_read64(unsigned int reg)
50
{
51
	return __raw_readq(gic_base + reg);
52 53
}

54
static inline unsigned long gic_read(unsigned int reg)
55
{
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
	return __raw_writel(val, gic_base + reg);
}

static inline void gic_write64(unsigned int reg, u64 val)
{
	return __raw_writeq(val, gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

static inline void gic_update_bits(unsigned int reg, unsigned long mask,
				   unsigned long val)
{
	unsigned long regval;
84 85 86 87 88 89 90 91 92 93

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
94
		  1ul << GIC_INTR_BIT(intr));
95 96 97 98 99
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
100
		  1ul << GIC_INTR_BIT(intr));
101 102 103 104 105
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
106 107
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)pol << GIC_INTR_BIT(intr));
108 109 110 111 112
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
113 114
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)trig << GIC_INTR_BIT(intr));
115 116 117 118 119
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
120 121
			1ul << GIC_INTR_BIT(intr),
			(unsigned long)dual << GIC_INTR_BIT(intr));
122 123 124 125
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
126 127
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
128 129 130 131 132 133 134 135 136
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

137
#ifdef CONFIG_CLKSRC_MIPS_GIC
138 139 140 141
cycle_t gic_read_count(void)
{
	unsigned int hi, hi2, lo;

142 143 144
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));

145
	do {
146 147 148
		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
149 150 151 152
	} while (hi2 != hi);

	return (((cycle_t) hi) << 32) + lo;
}
153

154 155 156 157
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

158
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
159 160 161 162 163 164
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

165 166
void gic_write_compare(cycle_t cnt)
{
167 168 169 170 171 172 173 174
	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
175 176
}

177 178 179 180 181 182
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

183
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
184 185 186 187 188 189 190 191 192

	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
193 194 195 196

	local_irq_restore(flags);
}

197 198 199 200
cycle_t gic_read_compare(void)
{
	unsigned int hi, lo;

201 202 203
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));

204 205
	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
206 207 208

	return (((cycle_t) hi) << 32) + lo;
}
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

230 231
#endif

232 233 234 235 236 237 238 239
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

240
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

256
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
257 258 259 260 261
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
262 263
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
264 265
}

266 267
void gic_send_ipi(unsigned int intr)
{
268
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
269 270
}

271 272 273 274 275 276 277 278 279 280 281
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
282
		/* Is the performance counter shared with the timer? */
283 284 285 286 287 288 289 290
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

291 292 293 294 295 296 297 298 299 300 301 302 303
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

304
static void gic_handle_shared_int(bool chained)
305
{
306
	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
307
	unsigned long *pcpu_mask;
308
	unsigned long pending_reg, intrmask_reg;
309 310
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
311 312 313 314

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

315 316
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
317

318
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
319 320
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
321 322
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
323 324
	}

325 326
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
327

328 329 330 331
	intr = find_first_bit(pending, gic_shared_intrs);
	while (intr != gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
332 333 334 335
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
336 337 338 339 340

		/* go to next pending bit */
		bitmap_clear(pending, intr, 1);
		intr = find_first_bit(pending, gic_shared_intrs);
	}
341 342
}

343
static void gic_mask_irq(struct irq_data *d)
344
{
345
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
346 347
}

348
static void gic_unmask_irq(struct irq_data *d)
349
{
350
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
351 352
}

353 354
static void gic_ack_irq(struct irq_data *d)
{
355
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
356

357
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
358 359
}

360 361
static int gic_set_type(struct irq_data *d, unsigned int type)
{
362
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
363 364 365 366 367 368
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
369 370 371
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
372 373 374
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
375 376 377
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
378 379 380 381
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
382 383
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
384 385 386
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
387 388 389
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
390 391 392 393
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
394 395 396
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
397 398 399 400
		is_edge = false;
		break;
	}

401 402 403 404 405 406
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
407
	spin_unlock_irqrestore(&gic_lock, flags);
408

409 410 411 412
	return 0;
}

#ifdef CONFIG_SMP
413 414
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
415
{
416
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
417 418 419 420
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

421
	cpumask_and(&tmp, cpumask, cpu_online_mask);
422
	if (cpumask_empty(&tmp))
423
		return -EINVAL;
424 425 426 427

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

428
	/* Re-route this IRQ */
429
	gic_map_to_vpe(irq, cpumask_first(&tmp));
430 431 432 433

	/* Update the pcpu_masks */
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
434
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
435

436
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
437 438
	spin_unlock_irqrestore(&gic_lock, flags);

439
	return IRQ_SET_MASK_OK_NOCOPY;
440 441 442
}
#endif

443 444 445 446 447 448 449 450 451 452 453
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
454
	.name			=	"MIPS GIC",
455
	.irq_ack		=	gic_ack_irq,
456 457
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
458
	.irq_set_type		=	gic_set_type,
459
#ifdef CONFIG_SMP
460
	.irq_set_affinity	=	gic_set_affinity,
461 462 463
#endif
};

464
static void gic_handle_local_int(bool chained)
465 466
{
	unsigned long pending, masked;
467
	unsigned int intr, virq;
468

469 470
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
471 472 473

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

474 475 476 477
	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	while (intr != GIC_NUM_LOCAL_INTRS) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
478 479 480 481
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
482 483 484 485 486

		/* go to next pending bit */
		bitmap_clear(&pending, intr, 1);
		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	}
487 488 489 490 491 492
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

493
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
494 495 496 497 498 499
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

500
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
517
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
518
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
519 520 521 522 523 524 525 526 527 528 529 530
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
531
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
532
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
533 534 535 536 537 538 539 540 541 542
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

543
static void __gic_irq_dispatch(void)
544
{
545 546
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
547
}
548

549
static void gic_irq_dispatch(struct irq_desc *desc)
550
{
551 552
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
553 554 555 556 557 558 559 560 561 562
}

#ifdef CONFIG_MIPS_GIC_IPI
static int gic_resched_int_base;
static int gic_call_int_base;

unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
{
	return gic_resched_int_base + cpu;
}
563

564 565 566 567
unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
{
	return gic_call_int_base + cpu;
}
568

569 570 571 572 573 574 575 576 577
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();

	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
578
	generic_smp_call_function_interrupt();
579 580 581

	return IRQ_HANDLED;
}
J
Jeffrey Deans 已提交
582

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI call"
};

static __init void gic_ipi_init_one(unsigned int intr, int cpu,
				    struct irqaction *action)
{
598 599
	int virq = irq_create_mapping(gic_irq_domain,
				      GIC_SHARED_TO_HWIRQ(intr));
600 601
	int i;

602
	gic_map_to_vpe(intr, cpu);
603 604
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
J
Jeffrey Deans 已提交
605 606
	set_bit(intr, pcpu_masks[cpu].pcpu_mask);

607 608 609 610
	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);

	irq_set_handler(virq, handle_percpu_irq);
	setup_irq(virq, action);
611 612
}

613
static __init void gic_ipi_init(void)
614
{
615 616 617
	int i;

	/* Use last 2 * NR_CPUS interrupts as IPIs */
618
	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
619 620 621 622 623 624 625 626 627 628 629 630 631
	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;

	for (i = 0; i < nr_cpu_ids; i++) {
		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
	}
}
#else
static inline void gic_ipi_init(void)
{
}
#endif

632
static void __init gic_basic_init(void)
633 634
{
	unsigned int i;
S
Steven J. Hill 已提交
635 636

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
637 638

	/* Setup defaults */
639
	for (i = 0; i < gic_shared_intrs; i++) {
640 641 642
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
643 644
	}

645 646 647
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

648
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
649 650 651
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
652
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
653 654
		}
	}
655 656
}

657 658
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
659
{
660 661 662 663 664 665 666 667 668 669 670 671 672
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	/*
	 * HACK: These are all really percpu interrupts, but the rest
	 * of the MIPS kernel code does not use the percpu IRQ API for
	 * the CP0 timer and performance counter interrupts.
	 */
673 674 675 676 677 678 679 680 681
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		irq_set_chip_and_handler(virq,
					 &gic_all_vpes_local_irq_controller,
					 handle_percpu_irq);
		break;
	default:
682 683 684 685
		irq_set_chip_and_handler(virq,
					 &gic_local_irq_controller,
					 handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
686
		break;
687 688 689 690 691 692
	}

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

693
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
694 695 696

		switch (intr) {
		case GIC_LOCAL_INT_WD:
697
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
698 699
			break;
		case GIC_LOCAL_INT_COMPARE:
700 701
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
				    val);
702 703
			break;
		case GIC_LOCAL_INT_TIMER:
704 705
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
706 707
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
				    val);
708 709
			break;
		case GIC_LOCAL_INT_PERFCTR:
710 711
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
				    val);
712 713
			break;
		case GIC_LOCAL_INT_SWINT0:
714 715
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
				    val);
716 717
			break;
		case GIC_LOCAL_INT_SWINT1:
718 719
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
				    val);
720 721
			break;
		case GIC_LOCAL_INT_FDC:
722
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
				     irq_hw_number_t hw)
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
739 740
	unsigned long flags;

741 742
	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
				 handle_level_irq);
743 744

	spin_lock_irqsave(&gic_lock, flags);
745
	gic_map_to_pin(intr, gic_cpu_pin);
746
	/* Map to VPE 0 by default */
747
	gic_map_to_vpe(intr, 0);
748
	set_bit(intr, pcpu_masks[0].pcpu_mask);
749 750 751 752 753
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

754 755 756 757 758 759 760 761
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hw)
{
	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
		return gic_local_irq_domain_map(d, virq, hw);
	return gic_shared_irq_domain_map(d, virq, hw);
}

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

781
static const struct irq_domain_ops gic_irq_domain_ops = {
782
	.map = gic_irq_domain_map,
783
	.xlate = gic_irq_domain_xlate,
784 785
};

786 787 788 789
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
790 791 792
{
	unsigned int gicconfig;

793
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
794

795
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
796
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
797
		   GIC_SH_CONFIG_NUMINTRS_SHF;
798
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
799

800
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
801
		  GIC_SH_CONFIG_NUMVPES_SHF;
802
	gic_vpes = gic_vpes + 1;
803

804 805 806
	if (cpu_has_veic) {
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
807
		timer_cpu_pin = gic_cpu_pin;
808 809 810 811 812 813
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
814 815 816 817 818 819 820 821 822 823 824 825 826
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
827
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
828 829 830 831 832 833 834 835 836
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
837 838
	}

839
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
840
					       gic_shared_intrs, irqbase,
841 842 843
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
844

845
	gic_basic_init();
846 847

	gic_ipi_init();
848
}
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);