irq-mips-gic.c 26.8 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 */
9
#include <linux/bitmap.h>
10
#include <linux/clocksource.h>
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/irq.h>
14
#include <linux/irqchip.h>
15
#include <linux/irqchip/mips-gic.h>
16
#include <linux/of_address.h>
17
#include <linux/sched.h>
18
#include <linux/smp.h>
19

20
#include <asm/mips-cm.h>
S
Steven J. Hill 已提交
21 22
#include <asm/setup.h>
#include <asm/traps.h>
23

24 25
#include <dt-bindings/interrupt-controller/mips-gic.h>

26
unsigned int gic_present;
S
Steven J. Hill 已提交
27

28
struct gic_pcpu_mask {
29
	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 31
};

32 33 34 35 36 37 38 39 40 41 42 43
struct gic_irq_spec {
	enum {
		GIC_DEVICE,
		GIC_IPI
	} type;

	union {
		struct cpumask *ipimask;
		unsigned int hwirq;
	};
};

44
static unsigned long __gic_base_addr;
45

46
static void __iomem *gic_base;
47
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
48
static DEFINE_SPINLOCK(gic_lock);
49
static struct irq_domain *gic_irq_domain;
50
static struct irq_domain *gic_ipi_domain;
51
static int gic_shared_intrs;
52
static int gic_vpes;
53
static unsigned int gic_cpu_pin;
54
static unsigned int timer_cpu_pin;
55
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
56
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
57

58 59
static void __gic_irq_dispatch(void);

60
static inline u32 gic_read32(unsigned int reg)
61 62 63 64
{
	return __raw_readl(gic_base + reg);
}

65
static inline u64 gic_read64(unsigned int reg)
66
{
67
	return __raw_readq(gic_base + reg);
68 69
}

70
static inline unsigned long gic_read(unsigned int reg)
71
{
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	if (!mips_cm_is64)
		return gic_read32(reg);
	else
		return gic_read64(reg);
}

static inline void gic_write32(unsigned int reg, u32 val)
{
	return __raw_writel(val, gic_base + reg);
}

static inline void gic_write64(unsigned int reg, u64 val)
{
	return __raw_writeq(val, gic_base + reg);
}

static inline void gic_write(unsigned int reg, unsigned long val)
{
	if (!mips_cm_is64)
		return gic_write32(reg, (u32)val);
	else
		return gic_write64(reg, (u64)val);
}

static inline void gic_update_bits(unsigned int reg, unsigned long mask,
				   unsigned long val)
{
	unsigned long regval;
100 101 102 103 104 105 106 107 108 109

	regval = gic_read(reg);
	regval &= ~mask;
	regval |= val;
	gic_write(reg, regval);
}

static inline void gic_reset_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
110
		  1ul << GIC_INTR_BIT(intr));
111 112 113 114 115
}

static inline void gic_set_mask(unsigned int intr)
{
	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
116
		  1ul << GIC_INTR_BIT(intr));
117 118 119 120 121
}

static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
122 123
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)pol << GIC_INTR_BIT(intr));
124 125 126 127 128
}

static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
129 130
			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
			(unsigned long)trig << GIC_INTR_BIT(intr));
131 132 133 134 135
}

static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
136 137
			1ul << GIC_INTR_BIT(intr),
			(unsigned long)dual << GIC_INTR_BIT(intr));
138 139 140 141
}

static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
142 143
	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
144 145 146 147 148 149 150 151 152
}

static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
{
	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
}

153
#ifdef CONFIG_CLKSRC_MIPS_GIC
154 155 156 157
cycle_t gic_read_count(void)
{
	unsigned int hi, hi2, lo;

158 159 160
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));

161
	do {
162 163 164
		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
165 166 167 168
	} while (hi2 != hi);

	return (((cycle_t) hi) << 32) + lo;
}
169

170 171 172 173
unsigned int gic_get_count_width(void)
{
	unsigned int bits, config;

174
	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
175 176 177 178 179 180
	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
			 GIC_SH_CONFIG_COUNTBITS_SHF);

	return bits;
}

181 182
void gic_write_compare(cycle_t cnt)
{
183 184 185 186 187 188 189 190
	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
191 192
}

193 194 195 196 197 198
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
	unsigned long flags;

	local_irq_save(flags);

199
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
200 201 202 203 204 205 206 207 208

	if (mips_cm_is64) {
		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
	} else {
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
					(int)(cnt >> 32));
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
					(int)(cnt & 0xffffffff));
	}
209 210 211 212

	local_irq_restore(flags);
}

213 214 215 216
cycle_t gic_read_compare(void)
{
	unsigned int hi, lo;

217 218 219
	if (mips_cm_is64)
		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));

220 221
	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
222 223 224

	return (((cycle_t) hi) << 32) + lo;
}
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

void gic_start_count(void)
{
	u32 gicconfig;

	/* Start the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

void gic_stop_count(void)
{
	u32 gicconfig;

	/* Stop the counter */
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
}

246 247
#endif

248 249 250 251 252 253 254 255
static bool gic_local_irq_is_routable(int intr)
{
	u32 vpe_ctl;

	/* All local interrupts are routable in EIC mode. */
	if (cpu_has_veic)
		return true;

256
	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
	case GIC_LOCAL_INT_PERFCTR:
		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
	case GIC_LOCAL_INT_FDC:
		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
	case GIC_LOCAL_INT_SWINT0:
	case GIC_LOCAL_INT_SWINT1:
		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
	default:
		return true;
	}
}

272
static void gic_bind_eic_interrupt(int irq, int set)
S
Steven J. Hill 已提交
273 274 275 276 277
{
	/* Convert irq vector # to hw int # */
	irq -= GIC_PIN_TO_VEC_OFFSET;

	/* Set irq to use shadow set */
278 279
	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
		  GIC_VPE_EIC_SS(irq), set);
S
Steven J. Hill 已提交
280 281
}

282 283
void gic_send_ipi(unsigned int intr)
{
284
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
285 286
}

287 288 289 290 291 292 293 294 295 296 297
int gic_get_c0_compare_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}

int gic_get_c0_perfcount_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
298
		/* Is the performance counter shared with the timer? */
299 300 301 302 303 304 305 306
		if (cp0_perfcount_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
	}
	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}

307 308 309 310 311 312 313 314 315 316 317 318 319
int gic_get_c0_fdc_int(void)
{
	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
		/* Is the FDC IRQ even present? */
		if (cp0_fdc_irq < 0)
			return -1;
		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
	}

	return irq_create_mapping(gic_irq_domain,
				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}

320 321 322 323 324 325 326 327 328 329 330
int gic_get_usm_range(struct resource *gic_usm_res)
{
	if (!gic_present)
		return -1;

	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);

	return 0;
}

331
static void gic_handle_shared_int(bool chained)
332
{
333
	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
334
	unsigned long *pcpu_mask;
335
	unsigned long pending_reg, intrmask_reg;
336 337
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
338 339 340 341

	/* Get per-cpu bitmaps */
	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;

342 343
	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
344

345
	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
346 347
		pending[i] = gic_read(pending_reg);
		intrmask[i] = gic_read(intrmask_reg);
348 349
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
350 351 352 353 354 355 356 357

		if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
			continue;

		pending[i] |= (u64)gic_read(pending_reg) << 32;
		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
		pending_reg += gic_reg_step;
		intrmask_reg += gic_reg_step;
358 359
	}

360 361
	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
362

363 364 365 366
	intr = find_first_bit(pending, gic_shared_intrs);
	while (intr != gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
367 368 369 370
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
371 372 373 374 375

		/* go to next pending bit */
		bitmap_clear(pending, intr, 1);
		intr = find_first_bit(pending, gic_shared_intrs);
	}
376 377
}

378
static void gic_mask_irq(struct irq_data *d)
379
{
380
	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
381 382
}

383
static void gic_unmask_irq(struct irq_data *d)
384
{
385
	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
386 387
}

388 389
static void gic_ack_irq(struct irq_data *d)
{
390
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
391

392
	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
393 394
}

395 396
static int gic_set_type(struct irq_data *d, unsigned int type)
{
397
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
398 399 400 401 402 403
	unsigned long flags;
	bool is_edge;

	spin_lock_irqsave(&gic_lock, flags);
	switch (type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_FALLING:
404 405 406
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
407 408 409
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_RISING:
410 411 412
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
413 414 415 416
		is_edge = true;
		break;
	case IRQ_TYPE_EDGE_BOTH:
		/* polarity is irrelevant in this case */
417 418
		gic_set_trigger(irq, GIC_TRIG_EDGE);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
419 420 421
		is_edge = true;
		break;
	case IRQ_TYPE_LEVEL_LOW:
422 423 424
		gic_set_polarity(irq, GIC_POL_NEG);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
425 426 427 428
		is_edge = false;
		break;
	case IRQ_TYPE_LEVEL_HIGH:
	default:
429 430 431
		gic_set_polarity(irq, GIC_POL_POS);
		gic_set_trigger(irq, GIC_TRIG_LEVEL);
		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
432 433 434 435
		is_edge = false;
		break;
	}

436 437 438 439 440 441
	if (is_edge)
		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
						 handle_edge_irq, NULL);
	else
		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
						 handle_level_irq, NULL);
442
	spin_unlock_irqrestore(&gic_lock, flags);
443

444 445 446 447
	return 0;
}

#ifdef CONFIG_SMP
448 449
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
			    bool force)
450
{
451
	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
452 453 454 455
	cpumask_t	tmp = CPU_MASK_NONE;
	unsigned long	flags;
	int		i;

456
	cpumask_and(&tmp, cpumask, cpu_online_mask);
457
	if (cpumask_empty(&tmp))
458
		return -EINVAL;
459 460 461 462

	/* Assumption : cpumask refers to a single CPU */
	spin_lock_irqsave(&gic_lock, flags);

463
	/* Re-route this IRQ */
464
	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
465 466 467 468

	/* Update the pcpu_masks */
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(irq, pcpu_masks[i].pcpu_mask);
469
	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
470

471
	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
472 473
	spin_unlock_irqrestore(&gic_lock, flags);

474
	return IRQ_SET_MASK_OK_NOCOPY;
475 476 477
}
#endif

478 479 480 481 482 483 484 485 486 487 488
static struct irq_chip gic_level_irq_controller = {
	.name			=	"MIPS GIC",
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
	.irq_set_type		=	gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	=	gic_set_affinity,
#endif
};

static struct irq_chip gic_edge_irq_controller = {
489
	.name			=	"MIPS GIC",
490
	.irq_ack		=	gic_ack_irq,
491 492
	.irq_mask		=	gic_mask_irq,
	.irq_unmask		=	gic_unmask_irq,
493
	.irq_set_type		=	gic_set_type,
494
#ifdef CONFIG_SMP
495
	.irq_set_affinity	=	gic_set_affinity,
496 497 498
#endif
};

499
static void gic_handle_local_int(bool chained)
500 501
{
	unsigned long pending, masked;
502
	unsigned int intr, virq;
503

504 505
	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
506 507 508

	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);

509 510 511 512
	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	while (intr != GIC_NUM_LOCAL_INTRS) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_LOCAL_TO_HWIRQ(intr));
513 514 515 516
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
517 518 519 520 521

		/* go to next pending bit */
		bitmap_clear(&pending, intr, 1);
		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
	}
522 523 524 525 526 527
}

static void gic_mask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

528
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
529 530 531 532 533 534
}

static void gic_unmask_local_irq(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);

535
	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
}

static struct irq_chip gic_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq,
	.irq_unmask		=	gic_unmask_local_irq,
};

static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
552
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
553
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
554 555 556 557 558 559 560 561 562 563 564 565
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
566
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
567
		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
568 569 570 571 572 573 574 575 576 577
	}
	spin_unlock_irqrestore(&gic_lock, flags);
}

static struct irq_chip gic_all_vpes_local_irq_controller = {
	.name			=	"MIPS GIC Local",
	.irq_mask		=	gic_mask_local_irq_all_vpes,
	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
};

578
static void __gic_irq_dispatch(void)
579
{
580 581
	gic_handle_local_int(false);
	gic_handle_shared_int(false);
582
}
583

584
static void gic_irq_dispatch(struct irq_desc *desc)
585
{
586 587
	gic_handle_local_int(true);
	gic_handle_shared_int(true);
588 589 590 591 592 593 594 595 596 597
}

#ifdef CONFIG_MIPS_GIC_IPI
static int gic_resched_int_base;
static int gic_call_int_base;

unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
{
	return gic_resched_int_base + cpu;
}
598

599 600 601 602
unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
{
	return gic_call_int_base + cpu;
}
603

604 605 606 607 608 609 610 611 612
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
	scheduler_ipi();

	return IRQ_HANDLED;
}

static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
613
	generic_smp_call_function_interrupt();
614 615 616

	return IRQ_HANDLED;
}
J
Jeffrey Deans 已提交
617

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
static struct irqaction irq_resched = {
	.handler	= ipi_resched_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI resched"
};

static struct irqaction irq_call = {
	.handler	= ipi_call_interrupt,
	.flags		= IRQF_PERCPU,
	.name		= "IPI call"
};

static __init void gic_ipi_init_one(unsigned int intr, int cpu,
				    struct irqaction *action)
{
633 634
	int virq = irq_create_mapping(gic_irq_domain,
				      GIC_SHARED_TO_HWIRQ(intr));
635 636
	int i;

637
	gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
638 639
	for (i = 0; i < NR_CPUS; i++)
		clear_bit(intr, pcpu_masks[i].pcpu_mask);
J
Jeffrey Deans 已提交
640 641
	set_bit(intr, pcpu_masks[cpu].pcpu_mask);

642 643 644 645
	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);

	irq_set_handler(virq, handle_percpu_irq);
	setup_irq(virq, action);
646 647
}

648
static __init void gic_ipi_init(void)
649
{
650 651 652
	int i;

	/* Use last 2 * NR_CPUS interrupts as IPIs */
653
	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
654 655 656 657 658 659 660 661 662 663 664 665 666
	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;

	for (i = 0; i < nr_cpu_ids; i++) {
		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
	}
}
#else
static inline void gic_ipi_init(void)
{
}
#endif

667
static void __init gic_basic_init(void)
668 669
{
	unsigned int i;
S
Steven J. Hill 已提交
670 671

	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
672 673

	/* Setup defaults */
674
	for (i = 0; i < gic_shared_intrs; i++) {
675 676 677
		gic_set_polarity(i, GIC_POL_POS);
		gic_set_trigger(i, GIC_TRIG_LEVEL);
		gic_reset_mask(i);
678 679
	}

680 681 682
	for (i = 0; i < gic_vpes; i++) {
		unsigned int j;

683
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
684 685 686
		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
			if (!gic_local_irq_is_routable(j))
				continue;
687
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
688 689
		}
	}
690 691
}

692 693
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
				    irq_hw_number_t hw)
694
{
695 696 697 698 699 700 701 702 703 704 705 706 707
	int intr = GIC_HWIRQ_TO_LOCAL(hw);
	int ret = 0;
	int i;
	unsigned long flags;

	if (!gic_local_irq_is_routable(intr))
		return -EPERM;

	/*
	 * HACK: These are all really percpu interrupts, but the rest
	 * of the MIPS kernel code does not use the percpu IRQ API for
	 * the CP0 timer and performance counter interrupts.
	 */
708 709 710 711 712 713 714 715 716
	switch (intr) {
	case GIC_LOCAL_INT_TIMER:
	case GIC_LOCAL_INT_PERFCTR:
	case GIC_LOCAL_INT_FDC:
		irq_set_chip_and_handler(virq,
					 &gic_all_vpes_local_irq_controller,
					 handle_percpu_irq);
		break;
	default:
717 718 719 720
		irq_set_chip_and_handler(virq,
					 &gic_local_irq_controller,
					 handle_percpu_devid_irq);
		irq_set_percpu_devid(virq);
721
		break;
722 723 724 725 726 727
	}

	spin_lock_irqsave(&gic_lock, flags);
	for (i = 0; i < gic_vpes; i++) {
		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;

728
		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
729 730 731

		switch (intr) {
		case GIC_LOCAL_INT_WD:
732
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
733 734
			break;
		case GIC_LOCAL_INT_COMPARE:
735 736
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
				    val);
737 738
			break;
		case GIC_LOCAL_INT_TIMER:
739 740
			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
741 742
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
				    val);
743 744
			break;
		case GIC_LOCAL_INT_PERFCTR:
745 746
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
				    val);
747 748
			break;
		case GIC_LOCAL_INT_SWINT0:
749 750
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
				    val);
751 752
			break;
		case GIC_LOCAL_INT_SWINT1:
753 754
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
				    val);
755 756
			break;
		case GIC_LOCAL_INT_FDC:
757
			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
758 759 760 761 762 763 764 765 766 767 768 769 770
			break;
		default:
			pr_err("Invalid local IRQ %d\n", intr);
			ret = -EINVAL;
			break;
		}
	}
	spin_unlock_irqrestore(&gic_lock, flags);

	return ret;
}

static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
771
				     irq_hw_number_t hw, unsigned int vpe)
772 773
{
	int intr = GIC_HWIRQ_TO_SHARED(hw);
774 775
	unsigned long flags;

776 777
	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
				 handle_level_irq);
778 779

	spin_lock_irqsave(&gic_lock, flags);
780
	gic_map_to_pin(intr, gic_cpu_pin);
781 782
	gic_map_to_vpe(intr, vpe);
	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
783 784 785 786 787
	spin_unlock_irqrestore(&gic_lock, flags);

	return 0;
}

788 789 790 791 792
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
			      irq_hw_number_t hw)
{
	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
		return gic_local_irq_domain_map(d, virq, hw);
793
	return gic_shared_irq_domain_map(d, virq, hw, 0);
794 795
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	if (intsize != 3)
		return -EINVAL;

	if (intspec[0] == GIC_SHARED)
		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
	else if (intspec[0] == GIC_LOCAL)
		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
	else
		return -EINVAL;
	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;

	return 0;
}

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct gic_irq_spec *spec = arg;
	irq_hw_number_t hwirq, base_hwirq;
	int cpu, ret, i;

	if (spec->type == GIC_DEVICE) {
		/* verify that it doesn't conflict with an IPI irq */
		if (test_bit(spec->hwirq, ipi_resrv))
			return -EBUSY;
	} else {
		base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
		if (base_hwirq == gic_shared_intrs) {
			return -ENOMEM;
		}

		/* check that we have enough space */
		for (i = base_hwirq; i < nr_irqs; i++) {
			if (!test_bit(i, ipi_resrv))
				return -EBUSY;
		}
		bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);

		/* map the hwirq for each cpu consecutively */
		i = 0;
		for_each_cpu(cpu, spec->ipimask) {
			hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);

			ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
							    &gic_edge_irq_controller,
							    NULL);
			if (ret)
				goto error;

			ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
			if (ret)
				goto error;

			i++;
		}

		/*
		 * tell the parent about the base hwirq we allocated so it can
		 * set its own domain data
		 */
		spec->hwirq = base_hwirq;
	}

	return 0;
error:
	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
	return ret;
}

void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
	irq_hw_number_t base_hwirq;
	struct irq_data *data;

	data = irq_get_irq_data(virq);
	if (!data)
		return;

	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
}

884
static const struct irq_domain_ops gic_irq_domain_ops = {
885
	.map = gic_irq_domain_map,
886
	.xlate = gic_irq_domain_xlate,
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
	.alloc = gic_irq_domain_alloc,
	.free = gic_irq_domain_free,
};

static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_type)
{
	/*
	 * There's nothing to translate here. hwirq is dynamically allocated and
	 * the irq type is always edge triggered.
	 * */
	*out_hwirq = 0;
	*out_type = IRQ_TYPE_EDGE_RISING;

	return 0;
}

static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	struct cpumask *ipimask = arg;
	struct gic_irq_spec spec = {
		.type = GIC_IPI,
		.ipimask = ipimask
	};
	int ret, i;

	ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
	if (ret)
		return ret;

	/* the parent should have set spec.hwirq to the base_hwirq it allocated */
	for (i = 0; i < nr_irqs; i++) {
		ret = irq_domain_set_hwirq_and_chip(d, virq + i,
						    GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
						    &gic_edge_irq_controller,
						    NULL);
		if (ret)
			goto error;

		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
		if (ret)
			goto error;
	}

	return 0;
error:
	irq_domain_free_irqs_parent(d, virq, nr_irqs);
	return ret;
}

void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
			 unsigned int nr_irqs)
{
	irq_domain_free_irqs_parent(d, virq, nr_irqs);
}

int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
			 enum irq_domain_bus_token bus_token)
{
	bool is_ipi;

	switch (bus_token) {
	case DOMAIN_BUS_IPI:
		is_ipi = d->bus_token == bus_token;
		return to_of_node(d->fwnode) == node && is_ipi;
		break;
	default:
		return 0;
	}
}

static struct irq_domain_ops gic_ipi_domain_ops = {
	.xlate = gic_ipi_domain_xlate,
	.alloc = gic_ipi_domain_alloc,
	.free = gic_ipi_domain_free,
	.match = gic_ipi_domain_match,
966 967
};

968 969 970 971
static void __init __gic_init(unsigned long gic_base_addr,
			      unsigned long gic_addrspace_size,
			      unsigned int cpu_vec, unsigned int irqbase,
			      struct device_node *node)
972 973 974
{
	unsigned int gicconfig;

975 976
	__gic_base_addr = gic_base_addr;

977
	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
978

979
	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
980
	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
981
		   GIC_SH_CONFIG_NUMINTRS_SHF;
982
	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
983

984
	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
985
		  GIC_SH_CONFIG_NUMVPES_SHF;
986
	gic_vpes = gic_vpes + 1;
987

988 989 990
	if (cpu_has_veic) {
		/* Always use vector 1 in EIC mode */
		gic_cpu_pin = 0;
991
		timer_cpu_pin = gic_cpu_pin;
992 993 994 995 996 997
		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
			       __gic_irq_dispatch);
	} else {
		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
					gic_irq_dispatch);
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
		/*
		 * With the CMP implementation of SMP (deprecated), other CPUs
		 * are started by the bootloader and put into a timer based
		 * waiting poll loop. We must not re-route those CPU's local
		 * timer interrupts as the wait instruction will never finish,
		 * so just handle whatever CPU interrupt it is routed to by
		 * default.
		 *
		 * This workaround should be removed when CMP support is
		 * dropped.
		 */
		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
1011
			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
1012 1013 1014 1015 1016 1017 1018 1019 1020
							 GIC_VPE_TIMER_MAP)) &
					GIC_MAP_MSK;
			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
						GIC_CPU_PIN_OFFSET +
						timer_cpu_pin,
						gic_irq_dispatch);
		} else {
			timer_cpu_pin = gic_cpu_pin;
		}
1021 1022
	}

1023
	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
1024
					       gic_shared_intrs, irqbase,
1025 1026 1027
					       &gic_irq_domain_ops, NULL);
	if (!gic_irq_domain)
		panic("Failed to add GIC IRQ domain");
1028

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
						  node, &gic_ipi_domain_ops, NULL);
	if (!gic_ipi_domain)
		panic("Failed to add GIC IPI domain");

	gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;

	/* Make the last 2 * NR_CPUS available for IPIs */
	bitmap_set(ipi_resrv, gic_shared_intrs - 2 * NR_CPUS, 2 * NR_CPUS);

1041
	gic_basic_init();
1042 1043

	gic_ipi_init();
1044
}
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

void __init gic_init(unsigned long gic_base_addr,
		     unsigned long gic_addrspace_size,
		     unsigned int cpu_vec, unsigned int irqbase)
{
	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
}

static int __init gic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct resource res;
	unsigned int cpu_vec, i = 0, reserved = 0;
	phys_addr_t gic_base;
	size_t gic_len;

	/* Find the first available CPU vector. */
	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
					   i++, &cpu_vec))
		reserved |= BIT(cpu_vec);
	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
		if (!(reserved & BIT(cpu_vec)))
			break;
	}
	if (cpu_vec == 8) {
		pr_err("No CPU vectors available for GIC\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		/*
		 * Probe the CM for the GIC base address if not specified
		 * in the device-tree.
		 */
		if (mips_cm_present()) {
			gic_base = read_gcr_gic_base() &
				~CM_GCR_GIC_BASE_GICEN_MSK;
			gic_len = 0x20000;
		} else {
			pr_err("Failed to get GIC memory range\n");
			return -ENODEV;
		}
	} else {
		gic_base = res.start;
		gic_len = resource_size(&res);
	}

	if (mips_cm_present())
		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
	gic_present = true;

	__gic_init(gic_base, gic_len, cpu_vec, 0, node);

	return 0;
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);