irq-mips-gic.c 25.6 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
S
Steven J. Hill 已提交
27

28
struct gic_pcpu_mask {
29
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 31
};

32
static unsigned long __gic_base_addr;
33

34
static void __iomem *gic_base;
35
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
36
static DEFINE_SPINLOCK(gic_lock);
37
static struct irq_domain *gic_irq_domain;
38
static struct irq_domain *gic_ipi_domain;
39
static int gic_shared_intrs;
40
static int gic_vpes;
41
static unsigned int gic_cpu_pin;
42
static unsigned int timer_cpu_pin;
43
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
44
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
45
DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
46

47 48
static void __gic_irq_dispatch(void);

49
static inline u32 gic_read32(unsigned int reg)
50 51 52 53
{
	return __raw_readl(gic_base + reg);
}

54
static inline u64 gic_read64(unsigned int reg)
55
{
56
	return __raw_readq(gic_base + reg);
57 58
}

59
static inline unsigned long gic_read(unsigned int reg)
60
{
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
	return __raw_writel(val, gic_base + reg);
}

static inline void gic_write64(unsigned int reg, u64 val)
{
	return __raw_writeq(val, gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

static inline void gic_update_bits(unsigned int reg, unsigned long mask,
				   unsigned long val)
{
	unsigned long regval;
89 90 91 92 93 94 95 96 97 98

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
99
		  1ul << GIC_INTR_BIT(intr));
100 101 102 103 104
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
105
		  1ul << GIC_INTR_BIT(intr));
106 107 108 109 110
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
111 112
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)pol << GIC_INTR_BIT(intr));
113 114 115 116 117
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
118 119
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)trig << GIC_INTR_BIT(intr));
120 121 122 123 124
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
125 126
			1ul << GIC_INTR_BIT(intr),
			(unsigned long)dual << GIC_INTR_BIT(intr));
127 128 129 130
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
131 132
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
133 134 135 136 137 138 139 140 141
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

142
#ifdef CONFIG_CLKSRC_MIPS_GIC
143
u64 gic_read_count(void)
144 145 146
{
	unsigned int hi, hi2, lo;

147
	if (mips_cm_is64)
148
		return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
149

150
	do {
151 152 153
		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
154 155
	} while (hi2 != hi);

156
	return (((u64) hi) << 32) + lo;
157
}
158

159 160 161 162
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

163
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
164 165 166 167 168 169
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

170
void gic_write_compare(u64 cnt)
171
{
172 173 174 175 176 177 178 179
	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
180 181
}

182
void gic_write_cpu_compare(u64 cnt, int cpu)
183 184 185 186 187
{
	unsigned long flags;

	local_irq_save(flags);

188
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
189 190 191 192 193 194 195 196 197

	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
198 199 200 201

	local_irq_restore(flags);
}

202
u64 gic_read_compare(void)
203 204 205
{
	unsigned int hi, lo;

206
	if (mips_cm_is64)
207
		return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
208

209 210
	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
211

212
	return (((u64) hi) << 32) + lo;
213
}
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

235 236
#endif

237 238 239 240 241 242 243 244
unsigned gic_read_local_vp_id(void)
{
	unsigned long ident;

	ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
	return ident & GIC_VP_IDENT_VCNUM_MSK;
}

245 246 247 248 249 250 251 252
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

253
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

269
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
270 271 272 273 274
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
275 276
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
277 278
}

279
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
280
{
281 282 283
	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));

	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
284 285
}

286 287 288 289 290 291 292 293 294 295 296
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
297
		/* Is the performance counter shared with the timer? */
298 299 300 301 302 303 304 305
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

306 307 308 309 310 311 312 313 314 315 316 317 318
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

319 320 321 322 323 324 325 326 327 328 329
int gic_get_usm_range(struct resource *gic_usm_res)
{
	if (!gic_present)
		return -1;

	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);

	return 0;
}

330
static void gic_handle_shared_int(bool chained)
331
{
332
	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
333
	unsigned long *pcpu_mask;
334
	unsigned long pending_reg, intrmask_reg;
335 336
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
337 338 339 340

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

341 342
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
343

344
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
345 346
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
347 348
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
349

350
		if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
351 352 353 354 355 356
			continue;

		pending[i] |= (u64)gic_read(pending_reg) << 32;
		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
357 358
	}

359 360
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
361

362
	for_each_set_bit(intr, pending, gic_shared_intrs) {
363 364
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
365 366 367 368
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
369
	}
370 371
}

372
static void gic_mask_irq(struct irq_data *d)
373
{
374
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
375 376
}

377
static void gic_unmask_irq(struct irq_data *d)
378
{
379
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
380 381
}

382 383
static void gic_ack_irq(struct irq_data *d)
{
384
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
385

386
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
387 388
}

389 390
static int gic_set_type(struct irq_data *d, unsigned int type)
{
391
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
392 393 394 395 396 397
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
398 399 400
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
401 402 403
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
404 405 406
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
407 408 409 410
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
411 412
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
413 414 415
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
416 417 418
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
419 420 421 422
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
423 424 425
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
426 427 428 429
		is_edge = false;
		break;
	}

430 431 432 433 434 435
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
436
	spin_unlock_irqrestore(&gic_lock, flags);
437

438 439 440 441
	return 0;
}

#ifdef CONFIG_SMP
442 443
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
444
{
445
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
446 447 448 449
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

450
	cpumask_and(&tmp, cpumask, cpu_online_mask);
451
	if (cpumask_empty(&tmp))
452
		return -EINVAL;
453 454 455 456

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

457
	/* Re-route this IRQ */
458
	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
459 460

	/* Update the pcpu_masks */
461
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
462
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
463
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
464

465
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
466 467
	spin_unlock_irqrestore(&gic_lock, flags);

468
	return IRQ_SET_MASK_OK_NOCOPY;
469 470 471
}
#endif

472 473 474 475 476 477 478 479 480 481 482
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
483
	.name			=	"MIPS GIC",
484
	.irq_ack		=	gic_ack_irq,
485 486
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
487
	.irq_set_type		=	gic_set_type,
488
#ifdef CONFIG_SMP
489
	.irq_set_affinity	=	gic_set_affinity,
490
#endif
491
	.ipi_send_single	=	gic_send_ipi,
492 493
};

494
static void gic_handle_local_int(bool chained)
495 496
{
	unsigned long pending, masked;
497
	unsigned int intr, virq;
498

499 500
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
501 502 503

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

504
	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
505 506
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
507 508 509 510
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
511
	}
512 513 514 515 516 517
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

518
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
519 520 521 522 523 524
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

525
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
542 543
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
544
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
545 546 547 548 549 550 551 552 553 554 555 556
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
557 558
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
559
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
560 561 562 563 564 565 566 567 568 569
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

570
static void __gic_irq_dispatch(void)
571
{
572 573
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
574
}
575

576
static void gic_irq_dispatch(struct irq_desc *desc)
577
{
578 579
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
580 581
}

582
static void __init gic_basic_init(void)
583 584
{
	unsigned int i;
S
Steven J. Hill 已提交
585 586

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
587 588

	/* Setup defaults */
589
	for (i = 0; i < gic_shared_intrs; i++) {
590 591 592
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
593 594
	}

595 596 597
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

598 599
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
600 601 602
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
603
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
604 605
		}
	}
606 607
}

608 609
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
610
{
611 612 613 614 615 616 617 618 619 620 621 622
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

623 624
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
625 626 627

		switch (intr) {
		case GIC_LOCAL_INT_WD:
628
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
629 630
			break;
		case GIC_LOCAL_INT_COMPARE:
631 632
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
				    val);
633 634
			break;
		case GIC_LOCAL_INT_TIMER:
635 636
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
637 638
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
				    val);
639 640
			break;
		case GIC_LOCAL_INT_PERFCTR:
641 642
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
				    val);
643 644
			break;
		case GIC_LOCAL_INT_SWINT0:
645 646
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
				    val);
647 648
			break;
		case GIC_LOCAL_INT_SWINT1:
649 650
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
				    val);
651 652
			break;
		case GIC_LOCAL_INT_FDC:
653
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
654 655 656 657 658 659 660 661 662 663 664 665 666
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
667
				     irq_hw_number_t hw, unsigned int vpe)
668 669
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
670
	unsigned long flags;
671
	int i;
672 673

	spin_lock_irqsave(&gic_lock, flags);
674
	gic_map_to_pin(intr, gic_cpu_pin);
675
	gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
676
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
677
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
678
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
679 680 681 682 683
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

684
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

703 704
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hwirq)
705
{
706
	int err;
707

708
	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
709 710 711
		/* verify that shared irqs don't conflict with an IPI irq */
		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
			return -EBUSY;
712

713 714 715 716 717 718 719
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_level_irq_controller,
						    NULL);
		if (err)
			return err;

		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
720 721
	}

722 723 724 725 726 727 728 729 730 731 732 733 734 735
	switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		/*
		 * HACK: These are all really percpu interrupts, but
		 * the rest of the MIPS kernel code does not use the
		 * percpu IRQ API for them.
		 */
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_all_vpes_local_irq_controller,
						    NULL);
		if (err)
			return err;
736

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
		irq_set_handler(virq, handle_percpu_irq);
		break;

	default:
		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						    &gic_local_irq_controller,
						    NULL);
		if (err)
			return err;

		irq_set_handler(virq, handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
		break;
	}

	return gic_local_irq_domain_map(d, virq, hwirq);
753 754
}

755 756 757 758 759 760 761 762 763 764 765 766 767 768
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct irq_fwspec *fwspec = arg;
	irq_hw_number_t hwirq;

	if (fwspec->param[0] == GIC_SHARED)
		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
	else
		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);

	return gic_irq_domain_map(d, virq, hwirq);
}

769 770
void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
771 772 773
{
}

774 775 776 777
static const struct irq_domain_ops gic_irq_domain_ops = {
	.xlate = gic_irq_domain_xlate,
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
778
	.map = gic_irq_domain_map,
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
800 801
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;
802

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
	if (base_hwirq == gic_shared_intrs)
		return -ENOMEM;

	/* check that we have enough space */
	for (i = base_hwirq; i < nr_irqs; i++) {
		if (!test_bit(i, ipi_available))
			return -EBUSY;
	}
	bitmap_clear(ipi_available, base_hwirq, nr_irqs);

	/* map the hwirq for each cpu consecutively */
	i = 0;
	for_each_cpu(cpu, ipimask) {
		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;
824

825
		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
826 827 828 829 830 831 832 833
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
834 835 836 837 838 839

		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
		if (ret)
			goto error;

		i++;
840 841 842 843
	}

	return 0;
error:
844
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
845 846 847 848 849 850
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
851 852 853 854 855 856 857 858 859
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_available, base_hwirq, nr_irqs);
860 861 862 863 864 865 866 867 868 869
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
870
		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
871 872 873 874 875 876 877 878 879 880 881
		break;
	default:
		return 0;
	}
}

static struct irq_domain_ops gic_ipi_domain_ops = {
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
882 883
};

884 885 886 887
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
888
{
889
	unsigned int gicconfig, cpu;
890
	unsigned int v[2];
891

892 893
	__gic_base_addr = gic_base_addr;

894
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
895

896
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
897
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
898
		   GIC_SH_CONFIG_NUMINTRS_SHF;
899
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
900

901
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
902
		  GIC_SH_CONFIG_NUMVPES_SHF;
903
	gic_vpes = gic_vpes + 1;
904

905
	if (cpu_has_veic) {
906 907 908 909 910 911 912 913
		/* Set EIC mode for all VPEs */
		for_each_present_cpu(cpu) {
			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
				  mips_cm_vp_id(cpu));
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
				  GIC_VPE_CTL_EIC_MODE_MSK);
		}

914 915
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
916
		timer_cpu_pin = gic_cpu_pin;
917 918 919 920 921 922
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
923 924 925 926 927 928 929 930 931 932 933 934 935
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
936
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
937 938 939 940 941 942 943 944 945
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
946 947
	}

948
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
949
					       gic_shared_intrs, irqbase,
950 951 952
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
953
	gic_irq_domain->name = "mips-gic-irq";
954

955 956 957 958 959 960 961
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
	if (!gic_ipi_domain)
		panic("Failed to add GIC IPI domain");

962
	gic_ipi_domain->name = "mips-gic-ipi";
963 964
	gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;

965 966 967 968 969 970 971 972 973
	if (node &&
	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
		bitmap_set(ipi_resrv, v[0], v[1]);
	} else {
		/* Make the last 2 * gic_vpes available for IPIs */
		bitmap_set(ipi_resrv,
			   gic_shared_intrs - 2 * gic_vpes,
			   2 * gic_vpes);
	}
974

975
	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
976
	gic_basic_init();
977
}
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);