irq-gic.c 35.5 KB
Newer Older
R
Russell King 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Interrupt architecture for the GIC:
 *
 * o There is one Interrupt Distributor, which receives interrupts
 *   from system devices and sends them to the Interrupt Controllers.
 *
 * o There is one CPU Interface per CPU, which sends interrupts sent
 *   by the Distributor, and interrupts generated locally, to the
15 16 17
 *   associated CPU. The base address of the CPU interface is usually
 *   aliased so that the same address points to different chips depending
 *   on the CPU it is accessed from.
R
Russell King 已提交
18 19 20 21 22 23 24
 *
 * Note that IRQs 0-31 are special - they are local to each CPU.
 * As such, the enable set/clear, pending set/clear and active bit
 * registers are banked per-cpu for these sources.
 */
#include <linux/init.h>
#include <linux/kernel.h>
25
#include <linux/err.h>
26
#include <linux/module.h>
R
Russell King 已提交
27 28
#include <linux/list.h>
#include <linux/smp.h>
29
#include <linux/cpu.h>
30
#include <linux/cpu_pm.h>
31
#include <linux/cpumask.h>
32
#include <linux/io.h>
33 34 35
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
36
#include <linux/acpi.h>
R
Rob Herring 已提交
37
#include <linux/irqdomain.h>
M
Marc Zyngier 已提交
38 39 40
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
41
#include <linux/irqchip.h>
42
#include <linux/irqchip/chained_irq.h>
43
#include <linux/irqchip/arm-gic.h>
R
Russell King 已提交
44

45
#include <asm/cputype.h>
R
Russell King 已提交
46
#include <asm/irq.h>
47
#include <asm/exception.h>
48
#include <asm/smp_plat.h>
49
#include <asm/virt.h>
R
Russell King 已提交
50

51
#include "irq-gic-common.h"
R
Russell King 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_ARM64
#include <asm/cpufeature.h>

static void gic_check_cpu_features(void)
{
	WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
			TAINT_CPU_OUT_OF_SPEC,
			"GICv3 system registers enabled, broken firmware!\n");
}
#else
#define gic_check_cpu_features()	do { } while(0)
#endif

66 67
union gic_base {
	void __iomem *common_base;
68
	void __percpu * __iomem *percpu_base;
69 70 71
};

struct gic_chip_data {
72
	struct irq_chip chip;
73 74 75 76
	union gic_base dist_base;
	union gic_base cpu_base;
#ifdef CONFIG_CPU_PM
	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
77
	u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
78 79 80
	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
	u32 __percpu *saved_ppi_enable;
81
	u32 __percpu *saved_ppi_active;
82 83
	u32 __percpu *saved_ppi_conf;
#endif
84
	struct irq_domain *domain;
85 86 87 88 89 90
	unsigned int gic_irqs;
#ifdef CONFIG_GIC_NON_BANKED
	void __iomem *(*get_base)(union gic_base *);
#endif
};

91
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
R
Russell King 已提交
92

93 94 95 96 97 98 99 100
/*
 * The GIC mapping of CPU interfaces does not necessarily match
 * the logical CPU numbering.  Let's use a mapping as returned
 * by the GIC itself.
 */
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;

101 102
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;

103
static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
104

105 106 107
#ifdef CONFIG_GIC_NON_BANKED
static void __iomem *gic_get_percpu_base(union gic_base *base)
{
108
	return raw_cpu_read(*base->percpu_base);
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
}

static void __iomem *gic_get_common_base(union gic_base *base)
{
	return base->common_base;
}

static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
{
	return data->get_base(&data->dist_base);
}

static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
{
	return data->get_base(&data->cpu_base);
}

static inline void gic_set_base_accessor(struct gic_chip_data *data,
					 void __iomem *(*f)(union gic_base *))
{
	data->get_base = f;
}
#else
#define gic_data_dist_base(d)	((d)->dist_base.common_base)
#define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
134
#define gic_set_base_accessor(d, f)
135 136
#endif

137
static inline void __iomem *gic_dist_base(struct irq_data *d)
138
{
139
	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
140
	return gic_data_dist_base(gic_data);
141 142
}

143
static inline void __iomem *gic_cpu_base(struct irq_data *d)
144
{
145
	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
146
	return gic_data_cpu_base(gic_data);
147 148
}

149
static inline unsigned int gic_irq(struct irq_data *d)
150
{
R
Rob Herring 已提交
151
	return d->hwirq;
152 153
}

154 155 156 157 158
static inline bool cascading_gic_irq(struct irq_data *d)
{
	void *data = irq_data_get_irq_handler_data(d);

	/*
159 160
	 * If handler_data is set, this is a cascading interrupt, and
	 * it cannot possibly be forwarded.
161
	 */
162
	return data != NULL;
163 164
}

R
Russell King 已提交
165 166 167
/*
 * Routines to acknowledge, disable and enable interrupts
 */
168 169 170 171 172 173 174
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
	u32 mask = 1 << (gic_irq(d) % 32);
	writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
}

static int gic_peek_irq(struct irq_data *d, u32 offset)
R
Russell King 已提交
175
{
R
Rob Herring 已提交
176
	u32 mask = 1 << (gic_irq(d) % 32);
177 178 179 180 181 182
	return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
}

static void gic_mask_irq(struct irq_data *d)
{
	gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
R
Russell King 已提交
183 184
}

185 186 187
static void gic_eoimode1_mask_irq(struct irq_data *d)
{
	gic_mask_irq(d);
188 189 190 191 192 193 194 195
	/*
	 * When masking a forwarded interrupt, make sure it is
	 * deactivated as well.
	 *
	 * This ensures that an interrupt that is getting
	 * disabled/masked will not get "stuck", because there is
	 * noone to deactivate it (guest is being terminated).
	 */
196
	if (irqd_is_forwarded_to_vcpu(d))
197
		gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
198 199
}

200
static void gic_unmask_irq(struct irq_data *d)
R
Russell King 已提交
201
{
202
	gic_poke_irq(d, GIC_DIST_ENABLE_SET);
R
Russell King 已提交
203 204
}

205 206
static void gic_eoi_irq(struct irq_data *d)
{
207
	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
208 209
}

210 211
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
212
	/* Do not deactivate an IRQ forwarded to a vcpu. */
213
	if (irqd_is_forwarded_to_vcpu(d))
214 215
		return;

216 217 218
	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
static int gic_irq_set_irqchip_state(struct irq_data *d,
				     enum irqchip_irq_state which, bool val)
{
	u32 reg;

	switch (which) {
	case IRQCHIP_STATE_PENDING:
		reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
		break;

	case IRQCHIP_STATE_ACTIVE:
		reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
		break;

	case IRQCHIP_STATE_MASKED:
		reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
		break;

	default:
		return -EINVAL;
	}

	gic_poke_irq(d, reg);
	return 0;
}

static int gic_irq_get_irqchip_state(struct irq_data *d,
				      enum irqchip_irq_state which, bool *val)
{
	switch (which) {
	case IRQCHIP_STATE_PENDING:
		*val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
		break;

	case IRQCHIP_STATE_ACTIVE:
		*val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
		break;

	case IRQCHIP_STATE_MASKED:
		*val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
		break;

	default:
		return -EINVAL;
	}

	return 0;
}

268
static int gic_set_type(struct irq_data *d, unsigned int type)
269
{
270 271
	void __iomem *base = gic_dist_base(d);
	unsigned int gicirq = gic_irq(d);
272 273 274 275 276

	/* Interrupt configuration for SGIs can't be changed */
	if (gicirq < 16)
		return -EINVAL;

277 278 279
	/* SPIs have restrictions on the supported types */
	if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
			    type != IRQ_TYPE_EDGE_RISING)
280 281
		return -EINVAL;

282
	return gic_configure_irq(gicirq, type, base, NULL);
283 284
}

285 286 287 288 289 290
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
	/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
	if (cascading_gic_irq(d))
		return -EINVAL;

291 292 293 294
	if (vcpu)
		irqd_set_forwarded_to_vcpu(d);
	else
		irqd_clr_forwarded_to_vcpu(d);
295 296 297
	return 0;
}

298
#ifdef CONFIG_SMP
299 300
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
			    bool force)
R
Russell King 已提交
301
{
302
	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
303
	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
304
	u32 val, mask, bit;
305
	unsigned long flags;
R
Russell King 已提交
306

307 308 309 310 311
	if (!force)
		cpu = cpumask_any_and(mask_val, cpu_online_mask);
	else
		cpu = cpumask_first(mask_val);

312
	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
C
Chao Xie 已提交
313
		return -EINVAL;
314

315
	raw_spin_lock_irqsave(&irq_controller_lock, flags);
316
	mask = 0xff << shift;
317
	bit = gic_cpu_map[cpu] << shift;
318 319
	val = readl_relaxed(reg) & ~mask;
	writel_relaxed(val | bit, reg);
320
	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
321

322
	return IRQ_SET_MASK_OK;
R
Russell King 已提交
323
}
324
#endif
R
Russell King 已提交
325

326
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
327 328 329 330 331 332 333
{
	u32 irqstat, irqnr;
	struct gic_chip_data *gic = &gic_data[0];
	void __iomem *cpu_base = gic_data_cpu_base(gic);

	do {
		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
334
		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
335

336
		if (likely(irqnr > 15 && irqnr < 1020)) {
337 338
			if (static_key_true(&supports_deactivate))
				writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
339
			handle_domain_irq(gic->domain, irqnr, regs);
340 341 342 343
			continue;
		}
		if (irqnr < 16) {
			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
344 345
			if (static_key_true(&supports_deactivate))
				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
346 347 348 349 350 351 352 353 354
#ifdef CONFIG_SMP
			handle_IPI(irqnr, regs);
#endif
			continue;
		}
		break;
	} while (1);
}

355
static void gic_handle_cascade_irq(struct irq_desc *desc)
356
{
357 358
	struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
	struct irq_chip *chip = irq_desc_get_chip(desc);
359
	unsigned int cascade_irq, gic_irq;
360 361
	unsigned long status;

362
	chained_irq_enter(chip, desc);
363

364
	raw_spin_lock(&irq_controller_lock);
365
	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
366
	raw_spin_unlock(&irq_controller_lock);
367

368 369
	gic_irq = (status & GICC_IAR_INT_ID_MASK);
	if (gic_irq == GICC_INT_SPURIOUS)
370 371
		goto out;

372 373
	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
	if (unlikely(gic_irq < 32 || gic_irq > 1020))
374
		handle_bad_irq(desc);
375 376
	else
		generic_handle_irq(cascade_irq);
377 378

 out:
379
	chained_irq_exit(chip, desc);
380 381
}

382
static struct irq_chip gic_chip = {
383 384
	.irq_mask		= gic_mask_irq,
	.irq_unmask		= gic_unmask_irq,
385
	.irq_eoi		= gic_eoi_irq,
386
	.irq_set_type		= gic_set_type,
R
Russell King 已提交
387
#ifdef CONFIG_SMP
388
	.irq_set_affinity	= gic_set_affinity,
R
Russell King 已提交
389
#endif
390 391
	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
392 393 394
	.flags			= IRQCHIP_SET_TYPE_MASKED |
				  IRQCHIP_SKIP_SET_WAKE |
				  IRQCHIP_MASK_ON_SUSPEND,
R
Russell King 已提交
395 396
};

397 398 399 400 401 402 403 404 405 406 407
static struct irq_chip gic_eoimode1_chip = {
	.name			= "GICv2",
	.irq_mask		= gic_eoimode1_mask_irq,
	.irq_unmask		= gic_unmask_irq,
	.irq_eoi		= gic_eoimode1_eoi_irq,
	.irq_set_type		= gic_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity	= gic_set_affinity,
#endif
	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
408
	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
409 410 411 412 413
	.flags			= IRQCHIP_SET_TYPE_MASKED |
				  IRQCHIP_SKIP_SET_WAKE |
				  IRQCHIP_MASK_ON_SUSPEND,
};

414 415
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
416
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
417 418
	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
					 &gic_data[gic_nr]);
419 420
}

421 422 423 424 425 426 427 428 429 430 431 432 433
static u8 gic_get_cpumask(struct gic_chip_data *gic)
{
	void __iomem *base = gic_data_dist_base(gic);
	u32 mask, i;

	for (i = mask = 0; i < 32; i += 4) {
		mask = readl_relaxed(base + GIC_DIST_TARGET + i);
		mask |= mask >> 16;
		mask |= mask >> 8;
		if (mask)
			break;
	}

434
	if (!mask && num_possible_cpus() > 1)
435 436 437 438 439
		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");

	return mask;
}

440
static void gic_cpu_if_up(struct gic_chip_data *gic)
441
{
442
	void __iomem *cpu_base = gic_data_cpu_base(gic);
443
	u32 bypass = 0;
444 445 446 447
	u32 mode = 0;

	if (static_key_true(&supports_deactivate))
		mode = GIC_CPU_CTRL_EOImodeNS;
448 449 450 451 452 453 454

	/*
	* Preserve bypass disable bits to be written back later
	*/
	bypass = readl(cpu_base + GIC_CPU_CTRL);
	bypass &= GICC_DIS_BYPASS_MASK;

455
	writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
456 457 458
}


R
Rob Herring 已提交
459
static void __init gic_dist_init(struct gic_chip_data *gic)
R
Russell King 已提交
460
{
461
	unsigned int i;
462
	u32 cpumask;
R
Rob Herring 已提交
463
	unsigned int gic_irqs = gic->gic_irqs;
464
	void __iomem *base = gic_data_dist_base(gic);
R
Russell King 已提交
465

466
	writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
R
Russell King 已提交
467 468 469 470

	/*
	 * Set all global interrupts to this CPU only.
	 */
471 472 473
	cpumask = gic_get_cpumask(gic);
	cpumask |= cpumask << 8;
	cpumask |= cpumask << 16;
474
	for (i = 32; i < gic_irqs; i += 4)
475
		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
R
Russell King 已提交
476

477
	gic_dist_config(base, gic_irqs, NULL);
R
Russell King 已提交
478

479
	writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
R
Russell King 已提交
480 481
}

482
static void gic_cpu_init(struct gic_chip_data *gic)
R
Russell King 已提交
483
{
484 485
	void __iomem *dist_base = gic_data_dist_base(gic);
	void __iomem *base = gic_data_cpu_base(gic);
486
	unsigned int cpu_mask, cpu = smp_processor_id();
487 488
	int i;

489
	/*
490 491 492
	 * Setting up the CPU map is only relevant for the primary GIC
	 * because any nested/secondary GICs do not directly interface
	 * with the CPU(s).
493
	 */
494 495 496 497 498 499 500
	if (gic == &gic_data[0]) {
		/*
		 * Get what the GIC says our CPU mask is.
		 */
		BUG_ON(cpu >= NR_GIC_CPU_IF);
		cpu_mask = gic_get_cpumask(gic);
		gic_cpu_map[cpu] = cpu_mask;
501

502 503 504 505 506 507 508 509
		/*
		 * Clear our mask from the other map entries in case they're
		 * still undefined.
		 */
		for (i = 0; i < NR_GIC_CPU_IF; i++)
			if (i != cpu)
				gic_cpu_map[i] &= ~cpu_mask;
	}
510

511
	gic_cpu_config(dist_base, NULL);
512

513
	writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
514
	gic_cpu_if_up(gic);
R
Russell King 已提交
515 516
}

517
int gic_cpu_if_down(unsigned int gic_nr)
518
{
519
	void __iomem *cpu_base;
520 521
	u32 val = 0;

522
	if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
523 524 525
		return -EINVAL;

	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
526 527 528
	val = readl(cpu_base + GIC_CPU_CTRL);
	val &= ~GICC_ENABLE;
	writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
529 530

	return 0;
531 532
}

533 534 535 536 537 538 539 540 541 542 543 544 545
#ifdef CONFIG_CPU_PM
/*
 * Saves the GIC distributor registers during suspend or idle.  Must be called
 * with interrupts disabled but before powering down the GIC.  After calling
 * this function, no interrupts will be delivered by the GIC, and another
 * platform-specific wakeup source must be enabled.
 */
static void gic_dist_save(unsigned int gic_nr)
{
	unsigned int gic_irqs;
	void __iomem *dist_base;
	int i;

546
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
547 548

	gic_irqs = gic_data[gic_nr].gic_irqs;
549
	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564

	if (!dist_base)
		return;

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
		gic_data[gic_nr].saved_spi_conf[i] =
			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
		gic_data[gic_nr].saved_spi_target[i] =
			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
		gic_data[gic_nr].saved_spi_enable[i] =
			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
565 566 567 568

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
		gic_data[gic_nr].saved_spi_active[i] =
			readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
}

/*
 * Restores the GIC distributor registers during resume or when coming out of
 * idle.  Must be called before enabling interrupts.  If a level interrupt
 * that occured while the GIC was suspended is still present, it will be
 * handled normally, but any edge interrupts that occured will not be seen by
 * the GIC and need to be handled by the platform-specific wakeup source.
 */
static void gic_dist_restore(unsigned int gic_nr)
{
	unsigned int gic_irqs;
	unsigned int i;
	void __iomem *dist_base;

584
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
585 586

	gic_irqs = gic_data[gic_nr].gic_irqs;
587
	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
588 589 590 591

	if (!dist_base)
		return;

592
	writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
593 594 595 596 597 598

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
		writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
			dist_base + GIC_DIST_CONFIG + i * 4);

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
599
		writel_relaxed(GICD_INT_DEF_PRI_X4,
600 601 602 603 604 605
			dist_base + GIC_DIST_PRI + i * 4);

	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
		writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
			dist_base + GIC_DIST_TARGET + i * 4);

606 607 608
	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
		writel_relaxed(GICD_INT_EN_CLR_X32,
			dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
609 610
		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
			dist_base + GIC_DIST_ENABLE_SET + i * 4);
611
	}
612

613 614 615 616 617 618 619
	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
		writel_relaxed(GICD_INT_EN_CLR_X32,
			dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
		writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
			dist_base + GIC_DIST_ACTIVE_SET + i * 4);
	}

620
	writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
621 622 623 624 625 626 627 628 629
}

static void gic_cpu_save(unsigned int gic_nr)
{
	int i;
	u32 *ptr;
	void __iomem *dist_base;
	void __iomem *cpu_base;

630
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
631

632 633
	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
634 635 636 637

	if (!dist_base || !cpu_base)
		return;

638
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
639 640 641
	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);

642 643 644 645
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);

646
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
647 648 649 650 651 652 653 654 655 656 657 658
	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);

}

static void gic_cpu_restore(unsigned int gic_nr)
{
	int i;
	u32 *ptr;
	void __iomem *dist_base;
	void __iomem *cpu_base;

659
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
660

661 662
	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
663 664 665 666

	if (!dist_base || !cpu_base)
		return;

667
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
668 669 670
	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
		writel_relaxed(GICD_INT_EN_CLR_X32,
			       dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
671
		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
672
	}
673

674 675 676 677 678 679 680
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
		writel_relaxed(GICD_INT_EN_CLR_X32,
			       dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
		writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
	}

681
	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
682 683 684 685
	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);

	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
686 687
		writel_relaxed(GICD_INT_DEF_PRI_X4,
					dist_base + GIC_DIST_PRI + i * 4);
688

689
	writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
690
	gic_cpu_if_up(&gic_data[gic_nr]);
691 692 693 694 695 696
}

static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
{
	int i;

697
	for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
698 699 700 701 702
#ifdef CONFIG_GIC_NON_BANKED
		/* Skip over unused GICs */
		if (!gic_data[i].get_base)
			continue;
#endif
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		switch (cmd) {
		case CPU_PM_ENTER:
			gic_cpu_save(i);
			break;
		case CPU_PM_ENTER_FAILED:
		case CPU_PM_EXIT:
			gic_cpu_restore(i);
			break;
		case CPU_CLUSTER_PM_ENTER:
			gic_dist_save(i);
			break;
		case CPU_CLUSTER_PM_ENTER_FAILED:
		case CPU_CLUSTER_PM_EXIT:
			gic_dist_restore(i);
			break;
		}
	}

	return NOTIFY_OK;
}

static struct notifier_block gic_notifier_block = {
	.notifier_call = gic_notifier,
};

static void __init gic_pm_init(struct gic_chip_data *gic)
{
	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
		sizeof(u32));
	BUG_ON(!gic->saved_ppi_enable);

734 735 736 737
	gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
		sizeof(u32));
	BUG_ON(!gic->saved_ppi_active);

738 739 740 741
	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
		sizeof(u32));
	BUG_ON(!gic->saved_ppi_conf);

742 743
	if (gic == &gic_data[0])
		cpu_pm_register_notifier(&gic_notifier_block);
744 745 746 747 748 749 750
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
{
}
#endif

751
#ifdef CONFIG_SMP
752
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
753 754
{
	int cpu;
755 756 757
	unsigned long flags, map = 0;

	raw_spin_lock_irqsave(&irq_controller_lock, flags);
758 759 760

	/* Convert our logical CPU mask into a physical one. */
	for_each_cpu(cpu, mask)
761
		map |= gic_cpu_map[cpu];
762 763 764

	/*
	 * Ensure that stores to Normal memory are visible to the
765
	 * other CPUs before they observe us issuing the IPI.
766
	 */
767
	dmb(ishst);
768 769 770

	/* this always happens on GIC0 */
	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
771 772 773 774 775 776

	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
#endif

#ifdef CONFIG_BL_SWITCHER
777 778 779 780 781 782 783 784 785 786 787 788 789 790
/*
 * gic_send_sgi - send a SGI directly to given CPU interface number
 *
 * cpu_id: the ID for the destination CPU interface
 * irq: the IPI number to send a SGI for
 */
void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
{
	BUG_ON(cpu_id >= NR_GIC_CPU_IF);
	cpu_id = 1 << cpu_id;
	/* this always happens on GIC0 */
	writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
/*
 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
 *
 * @cpu: the logical CPU number to get the GIC ID for.
 *
 * Return the CPU interface ID for the given logical CPU number,
 * or -1 if the CPU number is too large or the interface ID is
 * unknown (more than one bit set).
 */
int gic_get_cpu_id(unsigned int cpu)
{
	unsigned int cpu_bit;

	if (cpu >= NR_GIC_CPU_IF)
		return -1;
	cpu_bit = gic_cpu_map[cpu];
	if (cpu_bit & (cpu_bit - 1))
		return -1;
	return __ffs(cpu_bit);
}

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
/*
 * gic_migrate_target - migrate IRQs to another CPU interface
 *
 * @new_cpu_id: the CPU target ID to migrate IRQs to
 *
 * Migrate all peripheral interrupts with a target matching the current CPU
 * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
 * is also updated.  Targets to other CPU interfaces are unchanged.
 * This must be called with IRQs locally disabled.
 */
void gic_migrate_target(unsigned int new_cpu_id)
{
	unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
	void __iomem *dist_base;
	int i, ror_val, cpu = smp_processor_id();
	u32 val, cur_target_mask, active_mask;

829
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884

	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
	if (!dist_base)
		return;
	gic_irqs = gic_data[gic_nr].gic_irqs;

	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
	cur_target_mask = 0x01010101 << cur_cpu_id;
	ror_val = (cur_cpu_id - new_cpu_id) & 31;

	raw_spin_lock(&irq_controller_lock);

	/* Update the target interface for this logical CPU */
	gic_cpu_map[cpu] = 1 << new_cpu_id;

	/*
	 * Find all the peripheral interrupts targetting the current
	 * CPU interface and migrate them to the new CPU interface.
	 * We skip DIST_TARGET 0 to 7 as they are read-only.
	 */
	for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
		val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
		active_mask = val & cur_target_mask;
		if (active_mask) {
			val &= ~active_mask;
			val |= ror32(active_mask, ror_val);
			writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
		}
	}

	raw_spin_unlock(&irq_controller_lock);

	/*
	 * Now let's migrate and clear any potential SGIs that might be
	 * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
	 * is a banked register, we can only forward the SGI using
	 * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
	 * doesn't use that information anyway.
	 *
	 * For the same reason we do not adjust SGI source information
	 * for previously sent SGIs by us to other CPUs either.
	 */
	for (i = 0; i < 16; i += 4) {
		int j;
		val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
		if (!val)
			continue;
		writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
		for (j = i; j < i + 4; j++) {
			if (val & 0xff)
				writel_relaxed((1 << (new_cpu_id + 16)) | j,
						dist_base + GIC_DIST_SOFTINT);
			val >>= 8;
		}
	}
885
}
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912

/*
 * gic_get_sgir_physaddr - get the physical address for the SGI register
 *
 * REturn the physical address of the SGI register to be used
 * by some early assembly code when the kernel is not yet available.
 */
static unsigned long gic_dist_physaddr;

unsigned long gic_get_sgir_physaddr(void)
{
	if (!gic_dist_physaddr)
		return 0;
	return gic_dist_physaddr + GIC_DIST_SOFTINT;
}

void __init gic_init_physaddr(struct device_node *node)
{
	struct resource res;
	if (of_address_to_resource(node, 0, &res) == 0) {
		gic_dist_physaddr = res.start;
		pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
	}
}

#else
#define gic_init_physaddr(node)  do { } while (0)
913 914
#endif

915 916 917
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
				irq_hw_number_t hw)
{
918
	struct gic_chip_data *gic = d->host_data;
919

920 921
	if (hw < 32) {
		irq_set_percpu_devid(irq);
922
		irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
923
				    handle_percpu_devid_irq, NULL, NULL);
924
		irq_set_status_flags(irq, IRQ_NOAUTOEN);
925
	} else {
926
		irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
927
				    handle_fasteoi_irq, NULL, NULL);
928
		irq_set_probe(irq);
929 930 931 932
	}
	return 0;
}

933 934 935 936
static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
{
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
static int gic_irq_domain_translate(struct irq_domain *d,
				    struct irq_fwspec *fwspec,
				    unsigned long *hwirq,
				    unsigned int *type)
{
	if (is_of_node(fwspec->fwnode)) {
		if (fwspec->param_count < 3)
			return -EINVAL;

		/* Get the interrupt number and add 16 to skip over SGIs */
		*hwirq = fwspec->param[1] + 16;

		/*
		 * For SPIs, we need to add 16 more to get the GIC irq
		 * ID number
		 */
		if (!fwspec->param[0])
			*hwirq += 16;

		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
		return 0;
	}

960
	if (is_fwnode_irqchip(fwspec->fwnode)) {
961 962 963 964 965 966 967 968
		if(fwspec->param_count != 2)
			return -EINVAL;

		*hwirq = fwspec->param[0];
		*type = fwspec->param[1];
		return 0;
	}

969 970 971
	return -EINVAL;
}

972
#ifdef CONFIG_SMP
973 974
static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
			      void *hcpu)
975
{
976
	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
977 978 979 980 981 982 983 984
		gic_cpu_init(&gic_data[0]);
	return NOTIFY_OK;
}

/*
 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
 * priority because the GIC needs to be up before the ARM generic timers.
 */
985
static struct notifier_block gic_cpu_notifier = {
986 987 988 989 990
	.notifier_call = gic_secondary_init,
	.priority = 100,
};
#endif

991 992 993 994 995 996
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
				unsigned int nr_irqs, void *arg)
{
	int i, ret;
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
997
	struct irq_fwspec *fwspec = arg;
998

999
	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	if (ret)
		return ret;

	for (i = 0; i < nr_irqs; i++)
		gic_irq_domain_map(domain, virq + i, hwirq + i);

	return 0;
}

static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
1010
	.translate = gic_irq_domain_translate,
1011 1012 1013 1014
	.alloc = gic_irq_domain_alloc,
	.free = irq_domain_free_irqs_top,
};

1015
static const struct irq_domain_ops gic_irq_domain_ops = {
1016
	.map = gic_irq_domain_map,
1017
	.unmap = gic_irq_domain_unmap,
R
Rob Herring 已提交
1018 1019
};

1020
static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1021
			   void __iomem *dist_base, void __iomem *cpu_base,
1022
			   u32 percpu_offset, struct fwnode_handle *handle)
1023
{
1024
	irq_hw_number_t hwirq_base;
1025
	struct gic_chip_data *gic;
1026
	int gic_irqs, irq_base, i;
1027

1028
	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
1029

1030 1031
	gic_check_cpu_features();

1032
	gic = &gic_data[gic_nr];
1033 1034 1035 1036 1037 1038 1039 1040 1041

	/* Initialize irq_chip */
	if (static_key_true(&supports_deactivate) && gic_nr == 0) {
		gic->chip = gic_eoimode1_chip;
	} else {
		gic->chip = gic_chip;
		gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
	}

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
#ifdef CONFIG_GIC_NON_BANKED
	if (percpu_offset) { /* Frankein-GIC without banked registers... */
		unsigned int cpu;

		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
		if (WARN_ON(!gic->dist_base.percpu_base ||
			    !gic->cpu_base.percpu_base)) {
			free_percpu(gic->dist_base.percpu_base);
			free_percpu(gic->cpu_base.percpu_base);
			return;
		}

		for_each_possible_cpu(cpu) {
1056 1057 1058
			u32 mpidr = cpu_logical_map(cpu);
			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
			unsigned long offset = percpu_offset * core_id;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
		}

		gic_set_base_accessor(gic, gic_get_percpu_base);
	} else
#endif
	{			/* Normal, sane GIC... */
		WARN(percpu_offset,
		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
		     percpu_offset);
		gic->dist_base.common_base = dist_base;
		gic->cpu_base.common_base = cpu_base;
		gic_set_base_accessor(gic, gic_get_common_base);
	}
1074

R
Rob Herring 已提交
1075 1076 1077 1078
	/*
	 * Find out how many interrupts are supported.
	 * The GIC only supports up to 1020 interrupt sources.
	 */
1079
	gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
R
Rob Herring 已提交
1080 1081 1082 1083 1084
	gic_irqs = (gic_irqs + 1) * 32;
	if (gic_irqs > 1020)
		gic_irqs = 1020;
	gic->gic_irqs = gic_irqs;

1085 1086 1087 1088 1089
	if (handle) {		/* DT/ACPI */
		gic->domain = irq_domain_create_linear(handle, gic_irqs,
						       &gic_irq_domain_hierarchy_ops,
						       gic);
	} else {		/* Legacy support */
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
		/*
		 * For primary GICs, skip over SGIs.
		 * For secondary GICs, skip over PPIs, too.
		 */
		if (gic_nr == 0 && (irq_start & 31) > 0) {
			hwirq_base = 16;
			if (irq_start != -1)
				irq_start = (irq_start & ~31) + 16;
		} else {
			hwirq_base = 32;
		}

		gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
1103 1104 1105 1106 1107 1108 1109 1110 1111

		irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
					   numa_node_id());
		if (IS_ERR_VALUE(irq_base)) {
			WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
			     irq_start);
			irq_base = irq_start;
		}

1112
		gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1113
					hwirq_base, &gic_irq_domain_ops, gic);
1114
	}
1115

1116 1117
	if (WARN_ON(!gic->domain))
		return;
1118

1119
	if (gic_nr == 0) {
1120 1121 1122 1123 1124 1125 1126
		/*
		 * Initialize the CPU interface map to all CPUs.
		 * It will be refined as each CPU probes its ID.
		 * This is only necessary for the primary GIC.
		 */
		for (i = 0; i < NR_GIC_CPU_IF; i++)
			gic_cpu_map[i] = 0xff;
1127
#ifdef CONFIG_SMP
1128 1129
		set_smp_cross_call(gic_raise_softirq);
		register_cpu_notifier(&gic_cpu_notifier);
1130
#endif
1131
		set_handle_irq(gic_handle_irq);
1132 1133
		if (static_key_true(&supports_deactivate))
			pr_info("GIC: Using split EOI/Deactivate mode\n");
1134
	}
1135

R
Rob Herring 已提交
1136
	gic_dist_init(gic);
1137
	gic_cpu_init(gic);
1138
	gic_pm_init(gic);
1139 1140
}

1141 1142
void __init gic_init(unsigned int gic_nr, int irq_start,
		     void __iomem *dist_base, void __iomem *cpu_base)
1143 1144 1145 1146 1147 1148
{
	/*
	 * Non-DT/ACPI systems won't run a hypervisor, so let's not
	 * bother with these...
	 */
	static_key_slow_dec(&supports_deactivate);
1149
	__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
1150 1151
}

1152
#ifdef CONFIG_OF
1153
static int gic_cnt __initdata;
1154

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
{
	struct resource cpuif_res;

	of_address_to_resource(node, 1, &cpuif_res);

	if (!is_hyp_mode_available())
		return false;
	if (resource_size(&cpuif_res) < SZ_8K)
		return false;
	if (resource_size(&cpuif_res) == SZ_128K) {
		u32 val_low, val_high;

		/*
		 * Verify that we have the first 4kB of a GIC400
		 * aliased over the first 64kB by checking the
		 * GICC_IIDR register on both ends.
		 */
		val_low = readl_relaxed(*base + GIC_CPU_IDENT);
		val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
		if ((val_low & 0xffff0fff) != 0x0202043B ||
		    val_low != val_high)
			return false;

		/*
		 * Move the base up by 60kB, so that we have a 8kB
		 * contiguous region, which allows us to use GICC_DIR
		 * at its normal offset. Please pass me that bucket.
		 */
		*base += 0xf000;
		cpuif_res.start += 0xf000;
		pr_warn("GIC: Adjusting CPU interface base to %pa",
			&cpuif_res.start);
	}

	return true;
}

1193
int __init
1194
gic_of_init(struct device_node *node, struct device_node *parent)
1195 1196 1197
{
	void __iomem *cpu_base;
	void __iomem *dist_base;
1198
	u32 percpu_offset;
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	int irq;

	if (WARN_ON(!node))
		return -ENODEV;

	dist_base = of_iomap(node, 0);
	WARN(!dist_base, "unable to map gic dist registers\n");

	cpu_base = of_iomap(node, 1);
	WARN(!cpu_base, "unable to map gic cpu registers\n");

1210 1211 1212 1213
	/*
	 * Disable split EOI/Deactivate if either HYP is not available
	 * or the CPU interface is too small.
	 */
1214
	if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
1215 1216
		static_key_slow_dec(&supports_deactivate);

1217 1218 1219
	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
		percpu_offset = 0;

1220 1221
	__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
			 &node->fwnode);
1222 1223
	if (!gic_cnt)
		gic_init_physaddr(node);
1224 1225 1226 1227 1228

	if (parent) {
		irq = irq_of_parse_and_map(node, 0);
		gic_cascade_irq(gic_cnt, irq);
	}
1229 1230

	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1231
		gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
1232

1233 1234 1235
	gic_cnt++;
	return 0;
}
1236
IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1237 1238
IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1239 1240
IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1241
IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1242 1243
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1244
IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
1245

1246
#endif
1247 1248

#ifdef CONFIG_ACPI
1249
static phys_addr_t cpu_phy_base __initdata;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

static int __init
gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
			const unsigned long end)
{
	struct acpi_madt_generic_interrupt *processor;
	phys_addr_t gic_cpu_base;
	static int cpu_base_assigned;

	processor = (struct acpi_madt_generic_interrupt *)header;

1261
	if (BAD_MADT_GICC_ENTRY(processor, end))
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
		return -EINVAL;

	/*
	 * There is no support for non-banked GICv1/2 register in ACPI spec.
	 * All CPU interface addresses have to be the same.
	 */
	gic_cpu_base = processor->base_address;
	if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
		return -EINVAL;

	cpu_phy_base = gic_cpu_base;
	cpu_base_assigned = 1;
	return 0;
}

1277 1278 1279
/* The things you have to do to just *count* something... */
static int __init acpi_dummy_func(struct acpi_subtable_header *header,
				  const unsigned long end)
1280
{
1281 1282
	return 0;
}
1283

1284 1285 1286 1287 1288
static bool __init acpi_gic_redist_is_present(void)
{
	return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
				     acpi_dummy_func, 0) > 0;
}
1289

1290 1291 1292 1293 1294
static bool __init gic_validate_dist(struct acpi_subtable_header *header,
				     struct acpi_probe_entry *ape)
{
	struct acpi_madt_generic_distributor *dist;
	dist = (struct acpi_madt_generic_distributor *)header;
1295

1296 1297 1298
	return (dist->version == ape->driver_data &&
		(dist->version != ACPI_MADT_GIC_VERSION_NONE ||
		 !acpi_gic_redist_is_present()));
1299 1300
}

1301 1302 1303 1304 1305
#define ACPI_GICV2_DIST_MEM_SIZE	(SZ_4K)
#define ACPI_GIC_CPU_IF_MEM_SIZE	(SZ_8K)

static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
				   const unsigned long end)
1306
{
1307
	struct acpi_madt_generic_distributor *dist;
1308
	void __iomem *cpu_base, *dist_base;
1309
	struct fwnode_handle *domain_handle;
1310 1311 1312
	int count;

	/* Collect CPU base addresses */
1313 1314
	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
				      gic_acpi_parse_madt_cpu, 0);
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	if (count <= 0) {
		pr_err("No valid GICC entries exist\n");
		return -EINVAL;
	}

	cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
	if (!cpu_base) {
		pr_err("Unable to map GICC registers\n");
		return -ENOMEM;
	}

1326 1327
	dist = (struct acpi_madt_generic_distributor *)header;
	dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE);
1328 1329 1330 1331 1332 1333
	if (!dist_base) {
		pr_err("Unable to map GICD registers\n");
		iounmap(cpu_base);
		return -ENOMEM;
	}

1334 1335 1336 1337 1338 1339 1340 1341
	/*
	 * Disable split EOI/Deactivate if HYP is not available. ACPI
	 * guarantees that we'll always have a GICv2, so the CPU
	 * interface will always be the right size.
	 */
	if (!is_hyp_mode_available())
		static_key_slow_dec(&supports_deactivate);

1342
	/*
1343
	 * Initialize GIC instance zero (no multi-GIC support).
1344
	 */
1345 1346 1347 1348 1349 1350 1351 1352 1353
	domain_handle = irq_domain_alloc_fwnode(dist_base);
	if (!domain_handle) {
		pr_err("Unable to allocate domain handle\n");
		iounmap(cpu_base);
		iounmap(dist_base);
		return -ENOMEM;
	}

	__gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
1354

1355
	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1356 1357 1358 1359

	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
		gicv2m_init(NULL, gic_data[0].domain);

1360 1361
	return 0;
}
1362 1363 1364 1365 1366 1367
IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
		     gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
		     gic_v2_acpi_init);
IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
		     gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
		     gic_v2_acpi_init);
1368
#endif