irq-mips-gic.c 27.9 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
S
Steven J. Hill 已提交
27

28
struct gic_pcpu_mask {
29
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 31
};

32 33 34 35 36 37 38 39 40 41 42 43
struct gic_irq_spec {
	enum {
		GIC_DEVICE,
		GIC_IPI
	} type;

	union {
		struct cpumask *ipimask;
		unsigned int hwirq;
	};
};

44
static unsigned long __gic_base_addr;
45

46
static void __iomem *gic_base;
47
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
48
static DEFINE_SPINLOCK(gic_lock);
49
static struct irq_domain *gic_irq_domain;
50
static struct irq_domain *gic_dev_domain;
51
static struct irq_domain *gic_ipi_domain;
52
static int gic_shared_intrs;
53
static int gic_vpes;
54
static unsigned int gic_cpu_pin;
55
static unsigned int timer_cpu_pin;
56
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
57
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
58

59 60
static void __gic_irq_dispatch(void);

61
static inline u32 gic_read32(unsigned int reg)
62 63 64 65
{
	return __raw_readl(gic_base + reg);
}

66
static inline u64 gic_read64(unsigned int reg)
67
{
68
	return __raw_readq(gic_base + reg);
69 70
}

71
static inline unsigned long gic_read(unsigned int reg)
72
{
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
	return __raw_writel(val, gic_base + reg);
}

static inline void gic_write64(unsigned int reg, u64 val)
{
	return __raw_writeq(val, gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

static inline void gic_update_bits(unsigned int reg, unsigned long mask,
				   unsigned long val)
{
	unsigned long regval;
101 102 103 104 105 106 107 108 109 110

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
111
		  1ul << GIC_INTR_BIT(intr));
112 113 114 115 116
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
117
		  1ul << GIC_INTR_BIT(intr));
118 119 120 121 122
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
123 124
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)pol << GIC_INTR_BIT(intr));
125 126 127 128 129
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
130 131
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)trig << GIC_INTR_BIT(intr));
132 133 134 135 136
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
137 138
			1ul << GIC_INTR_BIT(intr),
			(unsigned long)dual << GIC_INTR_BIT(intr));
139 140 141 142
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
143 144
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
145 146 147 148 149 150 151 152 153
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

154
#ifdef CONFIG_CLKSRC_MIPS_GIC
155 156 157 158
cycle_t gic_read_count(void)
{
	unsigned int hi, hi2, lo;

159 160 161
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));

162
	do {
163 164 165
		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
166 167 168 169
	} while (hi2 != hi);

	return (((cycle_t) hi) << 32) + lo;
}
170

171 172 173 174
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

175
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
176 177 178 179 180 181
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

182 183
void gic_write_compare(cycle_t cnt)
{
184 185 186 187 188 189 190 191
	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
192 193
}

194 195 196 197 198 199
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

200
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
201 202 203 204 205 206 207 208 209

	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
210 211 212 213

	local_irq_restore(flags);
}

214 215 216 217
cycle_t gic_read_compare(void)
{
	unsigned int hi, lo;

218 219 220
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));

221 222
	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
223 224 225

	return (((cycle_t) hi) << 32) + lo;
}
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

247 248
#endif

249 250 251 252 253 254 255 256
unsigned gic_read_local_vp_id(void)
{
	unsigned long ident;

	ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
	return ident & GIC_VP_IDENT_VCNUM_MSK;
}

257 258 259 260 261 262 263 264
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

265
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

281
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
282 283 284 285 286
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
287 288
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
289 290
}

291
static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
292
{
293 294 295
	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));

	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
296 297
}

298 299 300 301 302 303 304 305 306 307 308
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
309
		/* Is the performance counter shared with the timer? */
310 311 312 313 314 315 316 317
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

318 319 320 321 322 323 324 325 326 327 328 329 330
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

331 332 333 334 335 336 337 338 339 340 341
int gic_get_usm_range(struct resource *gic_usm_res)
{
	if (!gic_present)
		return -1;

	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);

	return 0;
}

342
static void gic_handle_shared_int(bool chained)
343
{
344
	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
345
	unsigned long *pcpu_mask;
346
	unsigned long pending_reg, intrmask_reg;
347 348
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
349 350 351 352

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

353 354
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
355

356
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
357 358
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
359 360
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
361

362
		if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
363 364 365 366 367 368
			continue;

		pending[i] |= (u64)gic_read(pending_reg) << 32;
		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
369 370
	}

371 372
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
373

374 375 376 377
	intr = find_first_bit(pending, gic_shared_intrs);
	while (intr != gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
378 379 380 381
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
382 383 384 385 386

		/* go to next pending bit */
		bitmap_clear(pending, intr, 1);
		intr = find_first_bit(pending, gic_shared_intrs);
	}
387 388
}

389
static void gic_mask_irq(struct irq_data *d)
390
{
391
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
392 393
}

394
static void gic_unmask_irq(struct irq_data *d)
395
{
396
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
397 398
}

399 400
static void gic_ack_irq(struct irq_data *d)
{
401
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
402

403
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
404 405
}

406 407
static int gic_set_type(struct irq_data *d, unsigned int type)
{
408
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
409 410 411 412 413 414
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
415 416 417
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
418 419 420
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
421 422 423
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
424 425 426 427
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
428 429
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
430 431 432
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
433 434 435
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
436 437 438 439
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
440 441 442
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
443 444 445 446
		is_edge = false;
		break;
	}

447 448 449 450 451 452
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
453
	spin_unlock_irqrestore(&gic_lock, flags);
454

455 456 457 458
	return 0;
}

#ifdef CONFIG_SMP
459 460
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
461
{
462
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
463 464 465 466
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

467
	cpumask_and(&tmp, cpumask, cpu_online_mask);
468
	if (cpumask_empty(&tmp))
469
		return -EINVAL;
470 471 472 473

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

474
	/* Re-route this IRQ */
475
	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
476 477

	/* Update the pcpu_masks */
478
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
479
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
480
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
481

482
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
483 484
	spin_unlock_irqrestore(&gic_lock, flags);

485
	return IRQ_SET_MASK_OK_NOCOPY;
486 487 488
}
#endif

489 490 491 492 493 494 495 496 497 498 499
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
500
	.name			=	"MIPS GIC",
501
	.irq_ack		=	gic_ack_irq,
502 503
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
504
	.irq_set_type		=	gic_set_type,
505
#ifdef CONFIG_SMP
506
	.irq_set_affinity	=	gic_set_affinity,
507
#endif
508
	.ipi_send_single	=	gic_send_ipi,
509 510
};

511
static void gic_handle_local_int(bool chained)
512 513
{
	unsigned long pending, masked;
514
	unsigned int intr, virq;
515

516 517
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
518 519 520

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

521 522 523 524
	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	while (intr != GIC_NUM_LOCAL_INTRS) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
525 526 527 528
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
529 530 531 532 533

		/* go to next pending bit */
		bitmap_clear(&pending, intr, 1);
		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	}
534 535 536 537 538 539
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

540
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
541 542 543 544 545 546
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

547
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
564 565
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
566
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
567 568 569 570 571 572 573 574 575 576 577 578
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
579 580
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
581
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
582 583 584 585 586 587 588 589 590 591
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

592
static void __gic_irq_dispatch(void)
593
{
594 595
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
596
}
597

598
static void gic_irq_dispatch(struct irq_desc *desc)
599
{
600 601
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
602 603
}

604
static void __init gic_basic_init(void)
605 606
{
	unsigned int i;
S
Steven J. Hill 已提交
607 608

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
609 610

	/* Setup defaults */
611
	for (i = 0; i < gic_shared_intrs; i++) {
612 613 614
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
615 616
	}

617 618 619
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

620 621
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
622 623 624
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
625
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
626 627
		}
	}
628 629
}

630 631
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
632
{
633 634 635 636 637 638 639 640 641 642 643 644 645
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	/*
	 * HACK: These are all really percpu interrupts, but the rest
	 * of the MIPS kernel code does not use the percpu IRQ API for
	 * the CP0 timer and performance counter interrupts.
	 */
646 647 648 649 650 651 652 653 654
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		irq_set_chip_and_handler(virq,
					 &gic_all_vpes_local_irq_controller,
					 handle_percpu_irq);
		break;
	default:
655 656 657 658
		irq_set_chip_and_handler(virq,
					 &gic_local_irq_controller,
					 handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
659
		break;
660 661 662 663 664 665
	}

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

666 667
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
			  mips_cm_vp_id(i));
668 669 670

		switch (intr) {
		case GIC_LOCAL_INT_WD:
671
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
672 673
			break;
		case GIC_LOCAL_INT_COMPARE:
674 675
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
				    val);
676 677
			break;
		case GIC_LOCAL_INT_TIMER:
678 679
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
680 681
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
				    val);
682 683
			break;
		case GIC_LOCAL_INT_PERFCTR:
684 685
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
				    val);
686 687
			break;
		case GIC_LOCAL_INT_SWINT0:
688 689
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
				    val);
690 691
			break;
		case GIC_LOCAL_INT_SWINT1:
692 693
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
				    val);
694 695
			break;
		case GIC_LOCAL_INT_FDC:
696
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
697 698 699 700 701 702 703 704 705 706 707 708 709
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
710
				     irq_hw_number_t hw, unsigned int vpe)
711 712
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
713
	unsigned long flags;
714
	int i;
715

716 717
	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
				 handle_level_irq);
718 719

	spin_lock_irqsave(&gic_lock, flags);
720
	gic_map_to_pin(intr, gic_cpu_pin);
721
	gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
722
	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
723
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
724
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
725 726 727 728 729
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

730 731 732 733 734
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hw)
{
	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
		return gic_local_irq_domain_map(d, virq, hw);
735
	return gic_shared_irq_domain_map(d, virq, hw, 0);
736 737
}

738 739 740 741 742 743 744 745 746 747 748
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct gic_irq_spec *spec = arg;
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;

	if (spec->type == GIC_DEVICE) {
		/* verify that it doesn't conflict with an IPI irq */
		if (test_bit(spec->hwirq, ipi_resrv))
			return -EBUSY;
749 750 751 752 753 754

		hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);

		return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
						     &gic_level_irq_controller,
						     NULL);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
	} else {
		base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
		if (base_hwirq == gic_shared_intrs) {
			return -ENOMEM;
		}

		/* check that we have enough space */
		for (i = base_hwirq; i < nr_irqs; i++) {
			if (!test_bit(i, ipi_resrv))
				return -EBUSY;
		}
		bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);

		/* map the hwirq for each cpu consecutively */
		i = 0;
		for_each_cpu(cpu, spec->ipimask) {
			hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

			ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
							    &gic_edge_irq_controller,
							    NULL);
			if (ret)
				goto error;

			ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
			if (ret)
				goto error;

			i++;
		}

		/*
		 * tell the parent about the base hwirq we allocated so it can
		 * set its own domain data
		 */
		spec->hwirq = base_hwirq;
	}

	return 0;
error:
	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
	return ret;
}

void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
}

813 814 815 816 817 818 819
int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	/* this domain should'nt be accessed directly */
	return 0;
}

820
static const struct irq_domain_ops gic_irq_domain_ops = {
821
	.map = gic_irq_domain_map,
822 823
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
	.match = gic_irq_domain_match,
};

static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct irq_fwspec *fwspec = arg;
	struct gic_irq_spec spec = {
		.type = GIC_DEVICE,
		.hwirq = fwspec->param[1],
	};
	int i, ret;
	bool is_shared = fwspec->param[0] == GIC_SHARED;

	if (is_shared) {
		ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
		if (ret)
			return ret;
	}

	for (i = 0; i < nr_irqs; i++) {
		irq_hw_number_t hwirq;

		if (is_shared)
			hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
		else
			hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);

		ret = irq_domain_set_hwirq_and_chip(d, virq + i,
						    hwirq,
						    &gic_level_irq_controller,
						    NULL);
		if (ret)
876
			goto error;
877 878 879
	}

	return 0;
880 881 882 883

error:
	irq_domain_free_irqs_parent(d, virq, nr_irqs);
	return ret;
884 885 886 887 888 889 890 891 892 893 894 895 896
}

void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
	/* no real allocation is done for dev irqs, so no need to free anything */
	return;
}

static struct irq_domain_ops gic_dev_domain_ops = {
	.xlate = gic_dev_domain_xlate,
	.alloc = gic_dev_domain_alloc,
	.free = gic_dev_domain_free,
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
	struct gic_irq_spec spec = {
		.type = GIC_IPI,
		.ipimask = ipimask
	};
	int ret, i;

	ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
	if (ret)
		return ret;

	/* the parent should have set spec.hwirq to the base_hwirq it allocated */
	for (i = 0; i < nr_irqs; i++) {
		ret = irq_domain_set_hwirq_and_chip(d, virq + i,
						    GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
	}

	return 0;
error:
	irq_domain_free_irqs_parent(d, virq, nr_irqs);
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
	irq_domain_free_irqs_parent(d, virq, nr_irqs);
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
962
		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
963 964 965 966 967 968 969 970 971 972 973
		break;
	default:
		return 0;
	}
}

static struct irq_domain_ops gic_ipi_domain_ops = {
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
974 975
};

976 977 978 979
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
980
{
981
	unsigned int gicconfig, cpu;
982
	unsigned int v[2];
983

984 985
	__gic_base_addr = gic_base_addr;

986
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
987

988
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
989
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
990
		   GIC_SH_CONFIG_NUMINTRS_SHF;
991
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
992

993
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
994
		  GIC_SH_CONFIG_NUMVPES_SHF;
995
	gic_vpes = gic_vpes + 1;
996

997
	if (cpu_has_veic) {
998 999 1000 1001 1002 1003 1004 1005
		/* Set EIC mode for all VPEs */
		for_each_present_cpu(cpu) {
			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
				  mips_cm_vp_id(cpu));
			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
				  GIC_VPE_CTL_EIC_MODE_MSK);
		}

1006 1007
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
1008
		timer_cpu_pin = gic_cpu_pin;
1009 1010 1011 1012 1013 1014
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
1028
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
1029 1030 1031 1032 1033 1034 1035 1036 1037
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
1038 1039
	}

1040
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
1041
					       gic_shared_intrs, irqbase,
1042 1043 1044
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
1045
	gic_irq_domain->name = "mips-gic-irq";
1046

1047 1048 1049 1050 1051
	gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_dev_domain_ops, NULL);
	if (!gic_dev_domain)
		panic("Failed to add GIC DEV domain");
1052
	gic_dev_domain->name = "mips-gic-dev";
1053

1054 1055 1056 1057 1058 1059 1060
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
	if (!gic_ipi_domain)
		panic("Failed to add GIC IPI domain");

1061
	gic_ipi_domain->name = "mips-gic-ipi";
1062 1063
	gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;

1064 1065 1066 1067 1068 1069 1070 1071 1072
	if (node &&
	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
		bitmap_set(ipi_resrv, v[0], v[1]);
	} else {
		/* Make the last 2 * gic_vpes available for IPIs */
		bitmap_set(ipi_resrv,
			   gic_shared_intrs - 2 * gic_vpes,
			   2 * gic_vpes);
	}
1073

1074
	gic_basic_init();
1075
}
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);