irq-mips-gic.c 22.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip/mips-gic.h>
15
#include <linux/of_address.h>
16
#include <linux/sched.h>
17
#include <linux/smp.h>
18

19
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
20 21
#include <asm/setup.h>
#include <asm/traps.h>
22

23 24 25 26
#include <dt-bindings/interrupt-controller/mips-gic.h>

#include "irqchip.h"

27
unsigned int gic_present;
S
Steven J. Hill 已提交
28

29
struct gic_pcpu_mask {
30
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 32
};

33
static void __iomem *gic_base;
34
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35
static DEFINE_SPINLOCK(gic_lock);
36
static struct irq_domain *gic_irq_domain;
37
static int gic_shared_intrs;
38
static int gic_vpes;
39
static unsigned int gic_cpu_pin;
40
static unsigned int timer_cpu_pin;
41
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42

43 44
static void __gic_irq_dispatch(void);

45
static inline u32 gic_read32(unsigned int reg)
46 47 48 49
{
	return __raw_readl(gic_base + reg);
}

50
static inline u64 gic_read64(unsigned int reg)
51
{
52
	return __raw_readq(gic_base + reg);
53 54
}

55
static inline unsigned long gic_read(unsigned int reg)
56
{
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
	return __raw_writel(val, gic_base + reg);
}

static inline void gic_write64(unsigned int reg, u64 val)
{
	return __raw_writeq(val, gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

static inline void gic_update_bits(unsigned int reg, unsigned long mask,
				   unsigned long val)
{
	unsigned long regval;
85 86 87 88 89 90 91 92 93 94

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
95
		  1ul << GIC_INTR_BIT(intr));
96 97 98 99 100
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
101
		  1ul << GIC_INTR_BIT(intr));
102 103 104 105 106
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
107 108
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)pol << GIC_INTR_BIT(intr));
109 110 111 112 113
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
114 115
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)trig << GIC_INTR_BIT(intr));
116 117 118 119 120
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
121 122
			1ul << GIC_INTR_BIT(intr),
			(unsigned long)dual << GIC_INTR_BIT(intr));
123 124 125 126
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
127 128
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
129 130 131 132 133 134 135 136 137
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

138
#ifdef CONFIG_CLKSRC_MIPS_GIC
139 140 141 142 143
cycle_t gic_read_count(void)
{
	unsigned int hi, hi2, lo;

	do {
144 145 146
		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
147 148 149 150
	} while (hi2 != hi);

	return (((cycle_t) hi) << 32) + lo;
}
151

152 153 154 155
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

156
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
157 158 159 160 161 162
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

163 164
void gic_write_compare(cycle_t cnt)
{
165
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
166
				(int)(cnt >> 32));
167
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
168 169 170
				(int)(cnt & 0xffffffff));
}

171 172 173 174 175 176
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

177 178
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
	gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
179
				(int)(cnt >> 32));
180
	gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
181 182 183 184 185
				(int)(cnt & 0xffffffff));

	local_irq_restore(flags);
}

186 187 188 189
cycle_t gic_read_compare(void)
{
	unsigned int hi, lo;

190 191
	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
192 193 194

	return (((cycle_t) hi) << 32) + lo;
}
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

216 217
#endif

218 219 220 221 222 223 224 225
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

226
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

242
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
243 244 245 246 247
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
248 249
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
250 251
}

252 253
void gic_send_ipi(unsigned int intr)
{
254
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
255 256
}

257 258 259 260 261 262 263 264 265 266 267
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
268
		/* Is the performance counter shared with the timer? */
269 270 271 272 273 274 275 276
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

277 278 279 280 281 282 283 284 285 286 287 288 289
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

290
static void gic_handle_shared_int(bool chained)
291
{
292
	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
293
	unsigned long *pcpu_mask;
294
	unsigned long pending_reg, intrmask_reg;
295 296
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
297 298 299 300

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

301 302
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
303

304
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
305 306
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
307 308
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
309 310
	}

311 312
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
313

314 315 316 317
	intr = find_first_bit(pending, gic_shared_intrs);
	while (intr != gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
318 319 320 321
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
322 323 324 325 326

		/* go to next pending bit */
		bitmap_clear(pending, intr, 1);
		intr = find_first_bit(pending, gic_shared_intrs);
	}
327 328
}

329
static void gic_mask_irq(struct irq_data *d)
330
{
331
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
332 333
}

334
static void gic_unmask_irq(struct irq_data *d)
335
{
336
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
337 338
}

339 340
static void gic_ack_irq(struct irq_data *d)
{
341
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
342

343
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
344 345
}

346 347
static int gic_set_type(struct irq_data *d, unsigned int type)
{
348
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
349 350 351 352 353 354
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
355 356 357
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
358 359 360
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
361 362 363
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
364 365 366 367
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
368 369
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
370 371 372
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
373 374 375
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
376 377 378 379
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
380 381 382
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
383 384 385 386 387
		is_edge = false;
		break;
	}

	if (is_edge) {
388 389 390
		__irq_set_chip_handler_name_locked(d->irq,
						   &gic_edge_irq_controller,
						   handle_edge_irq, NULL);
391
	} else {
392 393 394
		__irq_set_chip_handler_name_locked(d->irq,
						   &gic_level_irq_controller,
						   handle_level_irq, NULL);
395 396
	}
	spin_unlock_irqrestore(&gic_lock, flags);
397

398 399 400 401
	return 0;
}

#ifdef CONFIG_SMP
402 403
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
404
{
405
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
406 407 408 409
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

410
	cpumask_and(&tmp, cpumask, cpu_online_mask);
411
	if (cpumask_empty(&tmp))
412
		return -EINVAL;
413 414 415 416

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

417
	/* Re-route this IRQ */
418
	gic_map_to_vpe(irq, cpumask_first(&tmp));
419 420 421 422

	/* Update the pcpu_masks */
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
423
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
424

425
	cpumask_copy(d->affinity, cpumask);
426 427
	spin_unlock_irqrestore(&gic_lock, flags);

428
	return IRQ_SET_MASK_OK_NOCOPY;
429 430 431
}
#endif

432 433 434 435 436 437 438 439 440 441 442
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
443
	.name			=	"MIPS GIC",
444
	.irq_ack		=	gic_ack_irq,
445 446
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
447
	.irq_set_type		=	gic_set_type,
448
#ifdef CONFIG_SMP
449
	.irq_set_affinity	=	gic_set_affinity,
450 451 452
#endif
};

453
static void gic_handle_local_int(bool chained)
454 455
{
	unsigned long pending, masked;
456
	unsigned int intr, virq;
457

458 459
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
460 461 462

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

463 464 465 466
	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	while (intr != GIC_NUM_LOCAL_INTRS) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
467 468 469 470
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
471 472 473 474 475

		/* go to next pending bit */
		bitmap_clear(&pending, intr, 1);
		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	}
476 477 478 479 480 481
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

482
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
483 484 485 486 487 488
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

489
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
506
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
507
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
508 509 510 511 512 513 514 515 516 517 518 519
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
520
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
521
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
522 523 524 525 526 527 528 529 530 531
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

532
static void __gic_irq_dispatch(void)
533
{
534 535
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
536
}
537

538 539
static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
{
540 541
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
542 543 544 545 546 547 548 549 550 551
}

#ifdef CONFIG_MIPS_GIC_IPI
static int gic_resched_int_base;
static int gic_call_int_base;

unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
{
	return gic_resched_int_base + cpu;
}
552

553 554 555 556
unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
{
	return gic_call_int_base + cpu;
}
557

558 559 560 561 562 563 564 565 566
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();

	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
567
	generic_smp_call_function_interrupt();
568 569 570

	return IRQ_HANDLED;
}
J
Jeffrey Deans 已提交
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI call"
};

static __init void gic_ipi_init_one(unsigned int intr, int cpu,
				    struct irqaction *action)
{
587 588
	int virq = irq_create_mapping(gic_irq_domain,
				      GIC_SHARED_TO_HWIRQ(intr));
589 590
	int i;

591
	gic_map_to_vpe(intr, cpu);
592 593
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
J
Jeffrey Deans 已提交
594 595
	set_bit(intr, pcpu_masks[cpu].pcpu_mask);

596 597 598 599
	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);

	irq_set_handler(virq, handle_percpu_irq);
	setup_irq(virq, action);
600 601
}

602
static __init void gic_ipi_init(void)
603
{
604 605 606
	int i;

	/* Use last 2 * NR_CPUS interrupts as IPIs */
607
	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
608 609 610 611 612 613 614 615 616 617 618 619 620
	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;

	for (i = 0; i < nr_cpu_ids; i++) {
		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
	}
}
#else
static inline void gic_ipi_init(void)
{
}
#endif

621
static void __init gic_basic_init(void)
622 623
{
	unsigned int i;
S
Steven J. Hill 已提交
624 625

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
626 627

	/* Setup defaults */
628
	for (i = 0; i < gic_shared_intrs; i++) {
629 630 631
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
632 633
	}

634 635 636
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

637
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
638 639 640
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
641
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
642 643
		}
	}
644 645
}

646 647
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
648
{
649 650 651 652 653 654 655 656 657 658 659 660 661
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	/*
	 * HACK: These are all really percpu interrupts, but the rest
	 * of the MIPS kernel code does not use the percpu IRQ API for
	 * the CP0 timer and performance counter interrupts.
	 */
662 663 664 665 666 667 668 669 670
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		irq_set_chip_and_handler(virq,
					 &gic_all_vpes_local_irq_controller,
					 handle_percpu_irq);
		break;
	default:
671 672 673 674
		irq_set_chip_and_handler(virq,
					 &gic_local_irq_controller,
					 handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
675
		break;
676 677 678 679 680 681
	}

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

682
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
683 684 685

		switch (intr) {
		case GIC_LOCAL_INT_WD:
686
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
687 688
			break;
		case GIC_LOCAL_INT_COMPARE:
689 690
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
				    val);
691 692
			break;
		case GIC_LOCAL_INT_TIMER:
693 694
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
695 696
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
				    val);
697 698
			break;
		case GIC_LOCAL_INT_PERFCTR:
699 700
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
				    val);
701 702
			break;
		case GIC_LOCAL_INT_SWINT0:
703 704
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
				    val);
705 706
			break;
		case GIC_LOCAL_INT_SWINT1:
707 708
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
				    val);
709 710
			break;
		case GIC_LOCAL_INT_FDC:
711
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
				     irq_hw_number_t hw)
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
728 729
	unsigned long flags;

730 731
	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
				 handle_level_irq);
732 733

	spin_lock_irqsave(&gic_lock, flags);
734
	gic_map_to_pin(intr, gic_cpu_pin);
735
	/* Map to VPE 0 by default */
736
	gic_map_to_vpe(intr, 0);
737
	set_bit(intr, pcpu_masks[0].pcpu_mask);
738 739 740 741 742
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

743 744 745 746 747 748 749 750
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hw)
{
	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
		return gic_local_irq_domain_map(d, virq, hw);
	return gic_shared_irq_domain_map(d, virq, hw);
}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

770
static const struct irq_domain_ops gic_irq_domain_ops = {
771
	.map = gic_irq_domain_map,
772
	.xlate = gic_irq_domain_xlate,
773 774
};

775 776 777 778
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
779 780 781
{
	unsigned int gicconfig;

782
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
783

784
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
785
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
786
		   GIC_SH_CONFIG_NUMINTRS_SHF;
787
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
788

789
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
790
		  GIC_SH_CONFIG_NUMVPES_SHF;
791
	gic_vpes = gic_vpes + 1;
792

793 794 795
	if (cpu_has_veic) {
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
796
		timer_cpu_pin = gic_cpu_pin;
797 798 799 800 801 802
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
803 804 805 806 807 808 809 810 811 812 813 814 815
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
816
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
817 818 819 820 821 822 823 824 825
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
826 827
	}

828
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
829
					       gic_shared_intrs, irqbase,
830 831 832
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
833

834
	gic_basic_init();
835 836

	gic_ipi_init();
837
}
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);