irq-mips-gic.c 21.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cps.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
27
void __iomem *mips_gic_base;
S
Steven J. Hill 已提交
28

29
struct gic_pcpu_mask {
30
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 32
};

33
static unsigned long __gic_base_addr;
34
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35
static DEFINE_SPINLOCK(gic_lock);
36
static struct irq_domain *gic_irq_domain;
37
static struct irq_domain *gic_ipi_domain;
38
static int gic_shared_intrs;
39
static int gic_vpes;
40
static unsigned int gic_cpu_pin;
41
static unsigned int timer_cpu_pin;
42
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
43
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
44
DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
45

46 47
static void __gic_irq_dispatch(void);

48
static inline u32 gic_read32(unsigned int reg)
49
{
50
	return __raw_readl(mips_gic_base + reg);
51 52
}

53
static inline u64 gic_read64(unsigned int reg)
54
{
55
	return __raw_readq(mips_gic_base + reg);
56 57
}

58
static inline unsigned long gic_read(unsigned int reg)
59
{
60 61 62 63 64 65 66 67
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
68
	return __raw_writel(val, mips_gic_base + reg);
69 70 71 72
}

static inline void gic_write64(unsigned int reg, u64 val)
{
73
	return __raw_writeq(val, mips_gic_base + reg);
74 75 76 77 78 79 80 81 82 83
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

84 85 86 87 88 89 90 91
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

92
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

108
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
109 110 111 112 113
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
114 115
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
116 117
}

118
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
119
{
120 121 122
	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));

	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
123 124
}

125 126 127 128 129 130 131 132 133 134 135
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
136
		/* Is the performance counter shared with the timer? */
137 138 139 140 141 142 143 144
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

145 146 147 148 149 150 151 152 153 154 155 156 157
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

158 159 160 161 162 163 164 165 166 167 168
int gic_get_usm_range(struct resource *gic_usm_res)
{
	if (!gic_present)
		return -1;

	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);

	return 0;
}

169
static void gic_handle_shared_int(bool chained)
170
{
171
	unsigned int intr, virq;
172 173 174
	unsigned long *pcpu_mask;
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
175 176 177 178

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

179 180 181 182 183 184 185 186 187 188
	if (mips_cm_is64) {
		__ioread64_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
		__ioread64_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
	} else {
		__ioread32_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
		__ioread32_copy(intrmask, addr_gic_mask(),
				DIV_ROUND_UP(gic_shared_intrs, 32));
189 190
	}

191 192
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
193

194
	for_each_set_bit(intr, pending, gic_shared_intrs) {
195 196
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
197 198 199 200
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
201
	}
202 203
}

204
static void gic_mask_irq(struct irq_data *d)
205
{
206
	write_gic_rmask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
207 208
}

209
static void gic_unmask_irq(struct irq_data *d)
210
{
211
	write_gic_smask(BIT(GIC_HWIRQ_TO_SHARED(d->hwirq)));
212 213
}

214 215
static void gic_ack_irq(struct irq_data *d)
{
216
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
217

218
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
219 220
}

221 222
static int gic_set_type(struct irq_data *d, unsigned int type)
{
223
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
224 225 226 227 228 229
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
230
		change_gic_pol(irq, GIC_POL_FALLING_EDGE);
231
		change_gic_trig(irq, GIC_TRIG_EDGE);
232
		change_gic_dual(irq, GIC_DUAL_SINGLE);
233 234 235
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
236
		change_gic_pol(irq, GIC_POL_RISING_EDGE);
237
		change_gic_trig(irq, GIC_TRIG_EDGE);
238
		change_gic_dual(irq, GIC_DUAL_SINGLE);
239 240 241 242
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
243
		change_gic_trig(irq, GIC_TRIG_EDGE);
244
		change_gic_dual(irq, GIC_DUAL_DUAL);
245 246 247
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
248
		change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
249
		change_gic_trig(irq, GIC_TRIG_LEVEL);
250
		change_gic_dual(irq, GIC_DUAL_SINGLE);
251 252 253 254
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
255
		change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
256
		change_gic_trig(irq, GIC_TRIG_LEVEL);
257
		change_gic_dual(irq, GIC_DUAL_SINGLE);
258 259 260 261
		is_edge = false;
		break;
	}

262 263 264 265 266 267
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
268
	spin_unlock_irqrestore(&gic_lock, flags);
269

270 271 272 273
	return 0;
}

#ifdef CONFIG_SMP
274 275
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
276
{
277
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
278 279 280 281
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

282
	cpumask_and(&tmp, cpumask, cpu_online_mask);
283
	if (cpumask_empty(&tmp))
284
		return -EINVAL;
285 286 287 288

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

289
	/* Re-route this IRQ */
290
	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpumask_first(&tmp))));
291 292

	/* Update the pcpu_masks */
293
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
294
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
295
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
296

297
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
298 299
	spin_unlock_irqrestore(&gic_lock, flags);

300
	return IRQ_SET_MASK_OK_NOCOPY;
301 302 303
}
#endif

304 305 306 307 308 309 310 311 312 313 314
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
315
	.name			=	"MIPS GIC",
316
	.irq_ack		=	gic_ack_irq,
317 318
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
319
	.irq_set_type		=	gic_set_type,
320
#ifdef CONFIG_SMP
321
	.irq_set_affinity	=	gic_set_affinity,
322
#endif
323
	.ipi_send_single	=	gic_send_ipi,
324 325
};

326
static void gic_handle_local_int(bool chained)
327 328
{
	unsigned long pending, masked;
329
	unsigned int intr, virq;
330

331 332
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
333 334 335

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

336
	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
337 338
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
339 340 341 342
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
343
	}
344 345 346 347 348 349
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

350
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
351 352 353 354 355 356
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

357
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
374 375
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
376
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
377 378 379 380 381 382 383 384 385 386 387 388
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
389 390
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
391
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
392 393 394 395 396 397 398 399 400 401
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

402
static void __gic_irq_dispatch(void)
403
{
404 405
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
406
}
407

408
static void gic_irq_dispatch(struct irq_desc *desc)
409
{
410 411
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
412 413
}

414
static void __init gic_basic_init(void)
415 416
{
	unsigned int i;
S
Steven J. Hill 已提交
417 418

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
419 420

	/* Setup defaults */
421
	for (i = 0; i < gic_shared_intrs; i++) {
422
		change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
423
		change_gic_trig(i, GIC_TRIG_LEVEL);
424
		write_gic_rmask(BIT(i));
425 426
	}

427 428 429
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

430 431
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
432 433 434
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
435
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
436 437
		}
	}
438 439
}

440 441
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
442
{
443 444 445
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int i;
	unsigned long flags;
446
	u32 val;
447 448 449 450

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

451 452 453 454
	if (intr > GIC_LOCAL_INT_FDC) {
		pr_err("Invalid local IRQ %d\n", intr);
		return -EINVAL;
	}
455

456 457 458 459 460 461
	if (intr == GIC_LOCAL_INT_TIMER) {
		/* CONFIG_MIPS_CMP workaround (see __gic_init) */
		val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
	} else {
		val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
	}
462

463 464 465 466
	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		write_gic_vl_other(mips_cm_vp_id(i));
		write_gic_vo_map(intr, val);
467 468 469
	}
	spin_unlock_irqrestore(&gic_lock, flags);

470
	return 0;
471 472 473
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
474
				     irq_hw_number_t hw, unsigned int vpe)
475 476
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
477
	unsigned long flags;
478
	int i;
479 480

	spin_lock_irqsave(&gic_lock, flags);
481
	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
482
	write_gic_map_vp(intr, BIT(mips_cm_vp_id(vpe)));
483
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
484
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
485
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
486 487 488 489 490
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

491
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

510 511
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hwirq)
512
{
513
	int err;
514

515
	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
516 517 518
		/* verify that shared irqs don't conflict with an IPI irq */
		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
			return -EBUSY;
519

520 521 522 523 524 525 526
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_level_irq_controller,
						    NULL);
		if (err)
			return err;

		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
527 528
	}

529 530 531 532 533 534 535 536 537 538 539 540 541 542
	switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		/*
		 * HACK: These are all really percpu interrupts, but
		 * the rest of the MIPS kernel code does not use the
		 * percpu IRQ API for them.
		 */
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_all_vpes_local_irq_controller,
						    NULL);
		if (err)
			return err;
543

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
		irq_set_handler(virq, handle_percpu_irq);
		break;

	default:
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_local_irq_controller,
						    NULL);
		if (err)
			return err;

		irq_set_handler(virq, handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
		break;
	}

	return gic_local_irq_domain_map(d, virq, hwirq);
560 561
}

562 563 564 565 566 567 568 569 570 571 572 573 574 575
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct irq_fwspec *fwspec = arg;
	irq_hw_number_t hwirq;

	if (fwspec->param[0] == GIC_SHARED)
		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
	else
		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);

	return gic_irq_domain_map(d, virq, hwirq);
}

576 577
void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
578 579 580
{
}

581 582 583 584
static const struct irq_domain_ops gic_irq_domain_ops = {
	.xlate = gic_irq_domain_xlate,
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
585
	.map = gic_irq_domain_map,
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
607 608
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;
609

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
	if (base_hwirq == gic_shared_intrs)
		return -ENOMEM;

	/* check that we have enough space */
	for (i = base_hwirq; i < nr_irqs; i++) {
		if (!test_bit(i, ipi_available))
			return -EBUSY;
	}
	bitmap_clear(ipi_available, base_hwirq, nr_irqs);

	/* map the hwirq for each cpu consecutively */
	i = 0;
	for_each_cpu(cpu, ipimask) {
		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;
631

632
		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
633 634 635 636 637 638 639 640
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
641 642 643 644 645 646

		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
		if (ret)
			goto error;

		i++;
647 648 649 650
	}

	return 0;
error:
651
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
652 653 654 655 656 657
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
658 659 660 661 662 663 664 665 666
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
667 668 669 670 671 672 673 674 675 676
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
677
		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
678 679 680 681 682 683
		break;
	default:
		return 0;
	}
}

684
static const struct irq_domain_ops gic_ipi_domain_ops = {
685 686 687 688
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
689 690
};

691 692 693 694
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
695
{
696
	unsigned int gicconfig, cpu;
697
	unsigned int v[2];
698

699 700
	__gic_base_addr = gic_base_addr;

701
	mips_gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
702

703
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
704
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
705
		   GIC_SH_CONFIG_NUMINTRS_SHF;
706
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
707

708
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
709
		  GIC_SH_CONFIG_NUMVPES_SHF;
710
	gic_vpes = gic_vpes + 1;
711

712
	if (cpu_has_veic) {
713 714 715 716 717 718 719 720
		/* Set EIC mode for all VPEs */
		for_each_present_cpu(cpu) {
			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
				  mips_cm_vp_id(cpu));
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
				  GIC_VPE_CTL_EIC_MODE_MSK);
		}

721 722
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
723
		timer_cpu_pin = gic_cpu_pin;
724 725 726 727 728 729
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
730 731 732 733 734 735 736 737 738 739 740 741 742
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
743
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
744 745 746 747 748 749 750 751 752
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
753 754
	}

755
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
756
					       gic_shared_intrs, irqbase,
757 758 759
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
760

761 762 763 764 765 766 767
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
	if (!gic_ipi_domain)
		panic("Failed to add GIC IPI domain");

768
	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
769

770 771 772 773 774 775 776 777 778
	if (node &&
	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
		bitmap_set(ipi_resrv, v[0], v[1]);
	} else {
		/* Make the last 2 * gic_vpes available for IPIs */
		bitmap_set(ipi_resrv,
			   gic_shared_intrs - 2 * gic_vpes,
			   2 * gic_vpes);
	}
779

780
	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
781
	gic_basic_init();
782
}
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
819
				~CM_GCR_GIC_BASE_GICEN;
820 821 822 823 824 825 826 827 828 829
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

830
	if (mips_cm_present()) {
831
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
832 833 834
		/* Ensure GIC region is enabled before trying to access it */
		__sync();
	}
835 836 837 838 839 840 841
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);