irq-mips-gic.c 21.3 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cps.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
27
void __iomem *mips_gic_base;
S
Steven J. Hill 已提交
28

29
struct gic_pcpu_mask {
30
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 32
};

33
static unsigned long __gic_base_addr;
34
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35
static DEFINE_SPINLOCK(gic_lock);
36
static struct irq_domain *gic_irq_domain;
37
static struct irq_domain *gic_ipi_domain;
38
static int gic_shared_intrs;
39
static int gic_vpes;
40
static unsigned int gic_cpu_pin;
41
static unsigned int timer_cpu_pin;
42
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
43
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
44
DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
45

46 47
static void __gic_irq_dispatch(void);

48
static inline u32 gic_read32(unsigned int reg)
49
{
50
	return __raw_readl(mips_gic_base + reg);
51 52
}

53
static inline u64 gic_read64(unsigned int reg)
54
{
55
	return __raw_readq(mips_gic_base + reg);
56 57
}

58
static inline unsigned long gic_read(unsigned int reg)
59
{
60 61 62 63 64 65 66 67
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
68
	return __raw_writel(val, mips_gic_base + reg);
69 70 71 72
}

static inline void gic_write64(unsigned int reg, u64 val)
{
73
	return __raw_writeq(val, mips_gic_base + reg);
74 75 76 77 78 79 80 81 82 83
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

84 85
static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
86 87
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
88 89 90 91 92 93 94 95 96
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

97 98 99 100 101 102 103 104
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

105
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

121
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
122 123 124 125 126
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
127 128
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
129 130
}

131
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
132
{
133 134 135
	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));

	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
136 137
}

138 139 140 141 142 143 144 145 146 147 148
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
149
		/* Is the performance counter shared with the timer? */
150 151 152 153 154 155 156 157
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

158 159 160 161 162 163 164 165 166 167 168 169 170
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

171 172 173 174 175 176 177 178 179 180 181
int gic_get_usm_range(struct resource *gic_usm_res)
{
	if (!gic_present)
		return -1;

	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);

	return 0;
}

182
static void gic_handle_shared_int(bool chained)
183
{
184
	unsigned int intr, virq;
185 186 187
	unsigned long *pcpu_mask;
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
188 189 190 191

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

192 193 194 195 196 197 198 199 200 201
	if (mips_cm_is64) {
		__ioread64_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
		__ioread64_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
	} else {
		__ioread32_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
		__ioread32_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
202 203
	}

204 205
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
206

207
	for_each_set_bit(intr, pending, gic_shared_intrs) {
208 209
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
210 211 212 213
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
214
	}
215 216
}

217
static void gic_mask_irq(struct irq_data *d)
218
{
219
	write_gic_rmask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
220 221
}

222
static void gic_unmask_irq(struct irq_data *d)
223
{
224
	write_gic_smask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
225 226
}

227 228
static void gic_ack_irq(struct irq_data *d)
{
229
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
230

231
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
232 233
}

234 235
static int gic_set_type(struct irq_data *d, unsigned int type)
{
236
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
237 238 239 240 241 242
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
243
		change_gic_pol(irq, GIC_POL_FALLING_EDGE);
244
		change_gic_trig(irq, GIC_TRIG_EDGE);
245
		change_gic_dual(irq, GIC_DUAL_SINGLE);
246 247 248
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
249
		change_gic_pol(irq, GIC_POL_RISING_EDGE);
250
		change_gic_trig(irq, GIC_TRIG_EDGE);
251
		change_gic_dual(irq, GIC_DUAL_SINGLE);
252 253 254 255
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
256
		change_gic_trig(irq, GIC_TRIG_EDGE);
257
		change_gic_dual(irq, GIC_DUAL_DUAL);
258 259 260
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
261
		change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
262
		change_gic_trig(irq, GIC_TRIG_LEVEL);
263
		change_gic_dual(irq, GIC_DUAL_SINGLE);
264 265 266 267
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
268
		change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
269
		change_gic_trig(irq, GIC_TRIG_LEVEL);
270
		change_gic_dual(irq, GIC_DUAL_SINGLE);
271 272 273 274
		is_edge = false;
		break;
	}

275 276 277 278 279 280
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
281
	spin_unlock_irqrestore(&gic_lock, flags);
282

283 284 285 286
	return 0;
}

#ifdef CONFIG_SMP
287 288
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
289
{
290
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
291 292 293 294
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

295
	cpumask_and(&tmp, cpumask, cpu_online_mask);
296
	if (cpumask_empty(&tmp))
297
		return -EINVAL;
298 299 300 301

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

302
	/* Re-route this IRQ */
303
	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
304 305

	/* Update the pcpu_masks */
306
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
307
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
308
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
309

310
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
311 312
	spin_unlock_irqrestore(&gic_lock, flags);

313
	return IRQ_SET_MASK_OK_NOCOPY;
314 315 316
}
#endif

317 318 319 320 321 322 323 324 325 326 327
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
328
	.name			=	"MIPS GIC",
329
	.irq_ack		=	gic_ack_irq,
330 331
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
332
	.irq_set_type		=	gic_set_type,
333
#ifdef CONFIG_SMP
334
	.irq_set_affinity	=	gic_set_affinity,
335
#endif
336
	.ipi_send_single	=	gic_send_ipi,
337 338
};

339
static void gic_handle_local_int(bool chained)
340 341
{
	unsigned long pending, masked;
342
	unsigned int intr, virq;
343

344 345
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
346 347 348

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

349
	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
350 351
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
352 353 354 355
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
356
	}
357 358 359 360 361 362
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

363
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
364 365 366 367 368 369
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

370
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
387 388
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
389
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
390 391 392 393 394 395 396 397 398 399 400 401
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
402 403
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
404
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
405 406 407 408 409 410 411 412 413 414
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

415
static void __gic_irq_dispatch(void)
416
{
417 418
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
419
}
420

421
static void gic_irq_dispatch(struct irq_desc *desc)
422
{
423 424
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
425 426
}

427
static void __init gic_basic_init(void)
428 429
{
	unsigned int i;
S
Steven J. Hill 已提交
430 431

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
432 433

	/* Setup defaults */
434
	for (i = 0; i < gic_shared_intrs; i++) {
435
		change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
436
		change_gic_trig(i, GIC_TRIG_LEVEL);
437
		write_gic_rmask(BIT(i));
438 439
	}

440 441 442
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

443 444
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
445 446 447
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
448
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
449 450
		}
	}
451 452
}

453 454
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
455
{
456 457 458
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int i;
	unsigned long flags;
459
	u32 val;
460 461 462 463

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

464 465 466 467
	if (intr > GIC_LOCAL_INT_FDC) {
		pr_err("Invalid local IRQ %d\n", intr);
		return -EINVAL;
	}
468

469 470 471 472 473 474
	if (intr == GIC_LOCAL_INT_TIMER) {
		/* CONFIG_MIPS_CMP workaround (see __gic_init) */
		val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
	} else {
		val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
	}
475

476 477 478 479
	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		write_gic_vl_other(mips_cm_vp_id(i));
		write_gic_vo_map(intr, val);
480 481 482
	}
	spin_unlock_irqrestore(&gic_lock, flags);

483
	return 0;
484 485 486
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
487
				     irq_hw_number_t hw, unsigned int vpe)
488 489
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
490
	unsigned long flags;
491
	int i;
492 493

	spin_lock_irqsave(&gic_lock, flags);
494
	gic_map_to_pin(intr, gic_cpu_pin);
495
	gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
496
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
497
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
498
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
499 500 501 502 503
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

504
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

523 524
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hwirq)
525
{
526
	int err;
527

528
	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
529 530 531
		/* verify that shared irqs don't conflict with an IPI irq */
		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
			return -EBUSY;
532

533 534 535 536 537 538 539
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_level_irq_controller,
						    NULL);
		if (err)
			return err;

		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
540 541
	}

542 543 544 545 546 547 548 549 550 551 552 553 554 555
	switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		/*
		 * HACK: These are all really percpu interrupts, but
		 * the rest of the MIPS kernel code does not use the
		 * percpu IRQ API for them.
		 */
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_all_vpes_local_irq_controller,
						    NULL);
		if (err)
			return err;
556

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		irq_set_handler(virq, handle_percpu_irq);
		break;

	default:
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_local_irq_controller,
						    NULL);
		if (err)
			return err;

		irq_set_handler(virq, handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
		break;
	}

	return gic_local_irq_domain_map(d, virq, hwirq);
573 574
}

575 576 577 578 579 580 581 582 583 584 585 586 587 588
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct irq_fwspec *fwspec = arg;
	irq_hw_number_t hwirq;

	if (fwspec->param[0] == GIC_SHARED)
		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
	else
		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);

	return gic_irq_domain_map(d, virq, hwirq);
}

589 590
void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
591 592 593
{
}

594 595 596 597
static const struct irq_domain_ops gic_irq_domain_ops = {
	.xlate = gic_irq_domain_xlate,
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
598
	.map = gic_irq_domain_map,
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
620 621
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;
622

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
	if (base_hwirq == gic_shared_intrs)
		return -ENOMEM;

	/* check that we have enough space */
	for (i = base_hwirq; i < nr_irqs; i++) {
		if (!test_bit(i, ipi_available))
			return -EBUSY;
	}
	bitmap_clear(ipi_available, base_hwirq, nr_irqs);

	/* map the hwirq for each cpu consecutively */
	i = 0;
	for_each_cpu(cpu, ipimask) {
		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;
644

645
		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
646 647 648 649 650 651 652 653
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
654 655 656 657 658 659

		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
		if (ret)
			goto error;

		i++;
660 661 662 663
	}

	return 0;
error:
664
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
665 666 667 668 669 670
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
671 672 673 674 675 676 677 678 679
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
680 681 682 683 684 685 686 687 688 689
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
690
		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
691 692 693 694 695 696
		break;
	default:
		return 0;
	}
}

697
static const struct irq_domain_ops gic_ipi_domain_ops = {
698 699 700 701
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
702 703
};

704 705 706 707
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
708
{
709
	unsigned int gicconfig, cpu;
710
	unsigned int v[2];
711

712 713
	__gic_base_addr = gic_base_addr;

714
	mips_gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
715

716
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
717
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
718
		   GIC_SH_CONFIG_NUMINTRS_SHF;
719
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
720

721
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
722
		  GIC_SH_CONFIG_NUMVPES_SHF;
723
	gic_vpes = gic_vpes + 1;
724

725
	if (cpu_has_veic) {
726 727 728 729 730 731 732 733
		/* Set EIC mode for all VPEs */
		for_each_present_cpu(cpu) {
			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
				  mips_cm_vp_id(cpu));
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
				  GIC_VPE_CTL_EIC_MODE_MSK);
		}

734 735
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
736
		timer_cpu_pin = gic_cpu_pin;
737 738 739 740 741 742
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
743 744 745 746 747 748 749 750 751 752 753 754 755
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
756
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
757 758 759 760 761 762 763 764 765
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
766 767
	}

768
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
769
					       gic_shared_intrs, irqbase,
770 771 772
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
773

774 775 776 777 778 779 780
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
	if (!gic_ipi_domain)
		panic("Failed to add GIC IPI domain");

781
	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
782

783 784 785 786 787 788 789 790 791
	if (node &&
	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
		bitmap_set(ipi_resrv, v[0], v[1]);
	} else {
		/* Make the last 2 * gic_vpes available for IPIs */
		bitmap_set(ipi_resrv,
			   gic_shared_intrs - 2 * gic_vpes,
			   2 * gic_vpes);
	}
792

793
	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
794
	gic_basic_init();
795
}
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
832
				~CM_GCR_GIC_BASE_GICEN;
833 834 835 836 837 838 839 840 841 842
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

843
	if (mips_cm_present()) {
844
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
845 846 847
		/* Ensure GIC region is enabled before trying to access it */
		__sync();
	}
848 849 850 851 852 853 854
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);