irq-mips-gic.c 19.3 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/of_address.h>
16
#include <linux/sched.h>
17
#include <linux/smp.h>
18

19
#include <asm/mips-cps.h>
S
Steven J. Hill 已提交
20 21
#include <asm/setup.h>
#include <asm/traps.h>
22

23 24
#include <dt-bindings/interrupt-controller/mips-gic.h>

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#define GIC_MAX_INTRS		256

/* Add 2 to convert GIC CPU pin to core interrupt */
#define GIC_CPU_PIN_OFFSET	2

/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET	1

/* Convert between local/shared IRQ number and GIC HW IRQ number. */
#define GIC_LOCAL_HWIRQ_BASE	0
#define GIC_LOCAL_TO_HWIRQ(x)	(GIC_LOCAL_HWIRQ_BASE + (x))
#define GIC_HWIRQ_TO_LOCAL(x)	((x) - GIC_LOCAL_HWIRQ_BASE)
#define GIC_SHARED_HWIRQ_BASE	GIC_NUM_LOCAL_INTRS
#define GIC_SHARED_TO_HWIRQ(x)	(GIC_SHARED_HWIRQ_BASE + (x))
#define GIC_HWIRQ_TO_SHARED(x)	((x) - GIC_SHARED_HWIRQ_BASE)

41
void __iomem *mips_gic_base;
S
Steven J. Hill 已提交
42

43
struct gic_pcpu_mask {
44
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
45 46
};

47
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
48
static DEFINE_SPINLOCK(gic_lock);
49
static struct irq_domain *gic_irq_domain;
50
static struct irq_domain *gic_ipi_domain;
51
static int gic_shared_intrs;
52
static int gic_vpes;
53
static unsigned int gic_cpu_pin;
54
static unsigned int timer_cpu_pin;
55
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
56
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
57
DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
58

59 60 61 62 63 64 65 66
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

67
	vpe_ctl = read_gic_vl_ctl();
68 69
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
70
		return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
71
	case GIC_LOCAL_INT_PERFCTR:
72
		return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
73
	case GIC_LOCAL_INT_FDC:
74
		return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
75 76
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
77
		return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
78 79 80 81 82
	default:
		return true;
	}
}

83
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
84 85 86 87 88
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
89
	write_gic_vl_eic_shadow_set(irq, set);
S
Steven J. Hill 已提交
90 91
}

92
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
93
{
94 95
	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));

96
	write_gic_wedge(GIC_WEDGE_RW | hwirq);
97 98
}

99 100 101 102 103 104 105 106 107 108 109
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
110
		/* Is the performance counter shared with the timer? */
111 112 113 114 115 116 117 118
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

119 120 121 122 123 124 125 126 127 128 129 130 131
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

132
static void gic_handle_shared_int(bool chained)
133
{
134
	unsigned int intr, virq;
135 136 137
	unsigned long *pcpu_mask;
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
138 139 140 141

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

142 143 144 145 146 147 148 149 150 151
	if (mips_cm_is64) {
		__ioread64_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
		__ioread64_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
	} else {
		__ioread32_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
		__ioread32_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
152 153
	}

154 155
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
156

157
	for_each_set_bit(intr, pending, gic_shared_intrs) {
158 159
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
160 161 162 163
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
164
	}
165 166
}

167
static void gic_mask_irq(struct irq_data *d)
168
{
169
	write_gic_rmask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
170 171
}

172
static void gic_unmask_irq(struct irq_data *d)
173
{
174
	write_gic_smask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
175 176
}

177 178
static void gic_ack_irq(struct irq_data *d)
{
179
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
180

181
	write_gic_wedge(irq);
182 183
}

184 185
static int gic_set_type(struct irq_data *d, unsigned int type)
{
186
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
187 188 189 190 191 192
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
193
		change_gic_pol(irq, GIC_POL_FALLING_EDGE);
194
		change_gic_trig(irq, GIC_TRIG_EDGE);
195
		change_gic_dual(irq, GIC_DUAL_SINGLE);
196 197 198
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
199
		change_gic_pol(irq, GIC_POL_RISING_EDGE);
200
		change_gic_trig(irq, GIC_TRIG_EDGE);
201
		change_gic_dual(irq, GIC_DUAL_SINGLE);
202 203 204 205
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
206
		change_gic_trig(irq, GIC_TRIG_EDGE);
207
		change_gic_dual(irq, GIC_DUAL_DUAL);
208 209 210
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
211
		change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
212
		change_gic_trig(irq, GIC_TRIG_LEVEL);
213
		change_gic_dual(irq, GIC_DUAL_SINGLE);
214 215 216 217
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
218
		change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
219
		change_gic_trig(irq, GIC_TRIG_LEVEL);
220
		change_gic_dual(irq, GIC_DUAL_SINGLE);
221 222 223 224
		is_edge = false;
		break;
	}

225 226 227 228 229 230
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
231
	spin_unlock_irqrestore(&gic_lock, flags);
232

233 234 235 236
	return 0;
}

#ifdef CONFIG_SMP
237 238
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
239
{
240
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
241 242 243 244
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

245
	cpumask_and(&tmp, cpumask, cpu_online_mask);
246
	if (cpumask_empty(&tmp))
247
		return -EINVAL;
248 249 250 251

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

252
	/* Re-route this IRQ */
253
	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpumask_first(&tmp))));
254 255

	/* Update the pcpu_masks */
256
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
257
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
258
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
259

260
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
261 262
	spin_unlock_irqrestore(&gic_lock, flags);

263
	return IRQ_SET_MASK_OK_NOCOPY;
264 265 266
}
#endif

267 268 269 270 271 272 273 274 275 276 277
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
278
	.name			=	"MIPS GIC",
279
	.irq_ack		=	gic_ack_irq,
280 281
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
282
	.irq_set_type		=	gic_set_type,
283
#ifdef CONFIG_SMP
284
	.irq_set_affinity	=	gic_set_affinity,
285
#endif
286
	.ipi_send_single	=	gic_send_ipi,
287 288
};

289
static void gic_handle_local_int(bool chained)
290 291
{
	unsigned long pending, masked;
292
	unsigned int intr, virq;
293

294 295
	pending = read_gic_vl_pend();
	masked = read_gic_vl_mask();
296 297 298

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

299
	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
300 301
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
302 303 304 305
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
306
	}
307 308 309 310 311 312
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

313
	write_gic_vl_rmask(BIT(intr));
314 315 316 317 318 319
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

320
	write_gic_vl_smask(BIT(intr));
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
337
		write_gic_vl_other(mips_cm_vp_id(i));
338
		write_gic_vo_rmask(BIT(intr));
339 340 341 342 343 344 345 346 347 348 349 350
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
351
		write_gic_vl_other(mips_cm_vp_id(i));
352
		write_gic_vo_smask(BIT(intr));
353 354 355 356 357 358 359 360 361 362
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

363
static void __gic_irq_dispatch(void)
364
{
365 366
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
367
}
368

369
static void gic_irq_dispatch(struct irq_desc *desc)
370
{
371 372
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
373 374
}

375 376
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
377
{
378 379 380
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int i;
	unsigned long flags;
381
	u32 val;
382 383 384 385

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

386 387 388 389
	if (intr > GIC_LOCAL_INT_FDC) {
		pr_err("Invalid local IRQ %d\n", intr);
		return -EINVAL;
	}
390

391 392 393 394 395 396
	if (intr == GIC_LOCAL_INT_TIMER) {
		/* CONFIG_MIPS_CMP workaround (see __gic_init) */
		val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
	} else {
		val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
	}
397

398 399 400 401
	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		write_gic_vl_other(mips_cm_vp_id(i));
		write_gic_vo_map(intr, val);
402 403 404
	}
	spin_unlock_irqrestore(&gic_lock, flags);

405
	return 0;
406 407 408
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
409
				     irq_hw_number_t hw, unsigned int vpe)
410 411
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
412
	unsigned long flags;
413
	int i;
414 415

	spin_lock_irqsave(&gic_lock, flags);
416
	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
417
	write_gic_map_vp(intr, BIT(mips_cm_vp_id(vpe)));
418
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
419
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
420
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
421 422 423 424 425
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

426
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

445 446
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hwirq)
447
{
448
	int err;
449

450
	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
451 452 453
		/* verify that shared irqs don't conflict with an IPI irq */
		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
			return -EBUSY;
454

455 456 457 458 459 460 461
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_level_irq_controller,
						    NULL);
		if (err)
			return err;

		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
462 463
	}

464 465 466 467 468 469 470 471 472 473 474 475 476 477
	switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		/*
		 * HACK: These are all really percpu interrupts, but
		 * the rest of the MIPS kernel code does not use the
		 * percpu IRQ API for them.
		 */
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_all_vpes_local_irq_controller,
						    NULL);
		if (err)
			return err;
478

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
		irq_set_handler(virq, handle_percpu_irq);
		break;

	default:
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_local_irq_controller,
						    NULL);
		if (err)
			return err;

		irq_set_handler(virq, handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
		break;
	}

	return gic_local_irq_domain_map(d, virq, hwirq);
495 496
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct irq_fwspec *fwspec = arg;
	irq_hw_number_t hwirq;

	if (fwspec->param[0] == GIC_SHARED)
		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
	else
		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);

	return gic_irq_domain_map(d, virq, hwirq);
}

511 512
void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
513 514 515
{
}

516 517 518 519
static const struct irq_domain_ops gic_irq_domain_ops = {
	.xlate = gic_irq_domain_xlate,
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
520
	.map = gic_irq_domain_map,
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
542 543
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;
544

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
	if (base_hwirq == gic_shared_intrs)
		return -ENOMEM;

	/* check that we have enough space */
	for (i = base_hwirq; i < nr_irqs; i++) {
		if (!test_bit(i, ipi_available))
			return -EBUSY;
	}
	bitmap_clear(ipi_available, base_hwirq, nr_irqs);

	/* map the hwirq for each cpu consecutively */
	i = 0;
	for_each_cpu(cpu, ipimask) {
		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;
566

567
		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
568 569 570 571 572 573 574 575
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
576 577 578 579 580 581

		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
		if (ret)
			goto error;

		i++;
582 583 584 585
	}

	return 0;
error:
586
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
587 588 589 590 591 592
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
593 594 595 596 597 598 599 600 601
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
602 603 604 605 606 607 608 609 610 611
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
612
		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
613 614 615 616 617 618
		break;
	default:
		return 0;
	}
}

619
static const struct irq_domain_ops gic_ipi_domain_ops = {
620 621 622 623
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
624 625
};

626 627 628

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
629
{
630
	unsigned int cpu_vec, i, j, reserved, gicconfig, cpu, v[2];
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	phys_addr_t gic_base;
	struct resource res;
	size_t gic_len;

	/* Find the first available CPU vector. */
	i = reserved = 0;
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}
666

667 668 669 670 671 672 673
	if (mips_cm_present()) {
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
		/* Ensure GIC region is enabled before trying to access it */
		__sync();
	}

	mips_gic_base = ioremap_nocache(gic_base, gic_len);
674

675 676 677 678
	gicconfig = read_gic_config();
	gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
	gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS);
	gic_shared_intrs = (gic_shared_intrs + 1) * 8;
679

680 681
	gic_vpes = gicconfig & GIC_CONFIG_PVPS;
	gic_vpes >>= __fls(GIC_CONFIG_PVPS);
682
	gic_vpes = gic_vpes + 1;
683

684
	if (cpu_has_veic) {
685 686
		/* Set EIC mode for all VPEs */
		for_each_present_cpu(cpu) {
687 688
			write_gic_vl_other(mips_cm_vp_id(cpu));
			write_gic_vo_ctl(GIC_VX_CTL_EIC);
689 690
		}

691 692
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
693
		timer_cpu_pin = gic_cpu_pin;
694 695 696 697 698 699
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
700 701 702 703 704 705 706 707 708 709 710 711 712
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
713
			timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
714 715 716 717 718 719 720
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
721 722
	}

723
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
724
					       gic_shared_intrs, 0,
725
					       &gic_irq_domain_ops, NULL);
726 727 728 729
	if (!gic_irq_domain) {
		pr_err("Failed to add GIC IRQ domain");
		return -ENXIO;
	}
730

731 732 733 734
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
735 736 737 738
	if (!gic_ipi_domain) {
		pr_err("Failed to add GIC IPI domain");
		return -ENXIO;
	}
739

740
	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
741

742 743 744 745 746 747 748 749 750
	if (node &&
	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
		bitmap_set(ipi_resrv, v[0], v[1]);
	} else {
		/* Make the last 2 * gic_vpes available for IPIs */
		bitmap_set(ipi_resrv,
			   gic_shared_intrs - 2 * gic_vpes,
			   2 * gic_vpes);
	}
751

752
	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;

	/* Setup defaults */
	for (i = 0; i < gic_shared_intrs; i++) {
		change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
		change_gic_trig(i, GIC_TRIG_LEVEL);
		write_gic_rmask(BIT(i));
	}

	for (i = 0; i < gic_vpes; i++) {
		write_gic_vl_other(mips_cm_vp_id(i));
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
			write_gic_vo_rmask(BIT(j));
		}
	}
771 772 773 774

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);