mpic.c 46.9 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 *  arch/powerpc/kernel/mpic.c
 *
 *  Driver for interrupt controllers following the OpenPIC standard, the
 *  common implementation beeing IBM's MPIC. This driver also can deal
 *  with various broken implementations of this HW.
 *
 *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9
 *  Copyright 2010-2011 Freescale Semiconductor, Inc.
10 11 12 13 14 15 16
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of this archive
 *  for more details.
 */

#undef DEBUG
17 18 19
#undef DEBUG_IPI
#undef DEBUG_IRQ
#undef DEBUG_LOW
20 21 22 23 24 25 26 27 28 29

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31
#include <linux/syscore_ops.h>
32 33 34 35 36 37 38 39 40 41

#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/smp.h>

M
Michael Ellerman 已提交
42 43
#include "mpic.h"

44 45 46 47 48 49 50 51
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif

static struct mpic *mpics;
static struct mpic *mpic_primary;
52
static DEFINE_RAW_SPINLOCK(mpic_lock);
53

54
#ifdef CONFIG_PPC32	/* XXX for now */
55 56 57 58 59
#ifdef CONFIG_IRQ_ALL_CPUS
#define distribute_irqs	(1)
#else
#define distribute_irqs	(0)
#endif
60
#endif
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
#ifdef CONFIG_MPIC_WEIRD
static u32 mpic_infos[][MPIC_IDX_END] = {
	[0] = {	/* Original OpenPIC compatible MPIC */
		MPIC_GREG_BASE,
		MPIC_GREG_FEATURE_0,
		MPIC_GREG_GLOBAL_CONF_0,
		MPIC_GREG_VENDOR_ID,
		MPIC_GREG_IPI_VECTOR_PRI_0,
		MPIC_GREG_IPI_STRIDE,
		MPIC_GREG_SPURIOUS,
		MPIC_GREG_TIMER_FREQ,

		MPIC_TIMER_BASE,
		MPIC_TIMER_STRIDE,
		MPIC_TIMER_CURRENT_CNT,
		MPIC_TIMER_BASE_CNT,
		MPIC_TIMER_VECTOR_PRI,
		MPIC_TIMER_DESTINATION,

		MPIC_CPU_BASE,
		MPIC_CPU_STRIDE,
		MPIC_CPU_IPI_DISPATCH_0,
		MPIC_CPU_IPI_DISPATCH_STRIDE,
		MPIC_CPU_CURRENT_TASK_PRI,
		MPIC_CPU_WHOAMI,
		MPIC_CPU_INTACK,
		MPIC_CPU_EOI,
89
		MPIC_CPU_MCACK,
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127

		MPIC_IRQ_BASE,
		MPIC_IRQ_STRIDE,
		MPIC_IRQ_VECTOR_PRI,
		MPIC_VECPRI_VECTOR_MASK,
		MPIC_VECPRI_POLARITY_POSITIVE,
		MPIC_VECPRI_POLARITY_NEGATIVE,
		MPIC_VECPRI_SENSE_LEVEL,
		MPIC_VECPRI_SENSE_EDGE,
		MPIC_VECPRI_POLARITY_MASK,
		MPIC_VECPRI_SENSE_MASK,
		MPIC_IRQ_DESTINATION
	},
	[1] = {	/* Tsi108/109 PIC */
		TSI108_GREG_BASE,
		TSI108_GREG_FEATURE_0,
		TSI108_GREG_GLOBAL_CONF_0,
		TSI108_GREG_VENDOR_ID,
		TSI108_GREG_IPI_VECTOR_PRI_0,
		TSI108_GREG_IPI_STRIDE,
		TSI108_GREG_SPURIOUS,
		TSI108_GREG_TIMER_FREQ,

		TSI108_TIMER_BASE,
		TSI108_TIMER_STRIDE,
		TSI108_TIMER_CURRENT_CNT,
		TSI108_TIMER_BASE_CNT,
		TSI108_TIMER_VECTOR_PRI,
		TSI108_TIMER_DESTINATION,

		TSI108_CPU_BASE,
		TSI108_CPU_STRIDE,
		TSI108_CPU_IPI_DISPATCH_0,
		TSI108_CPU_IPI_DISPATCH_STRIDE,
		TSI108_CPU_CURRENT_TASK_PRI,
		TSI108_CPU_WHOAMI,
		TSI108_CPU_INTACK,
		TSI108_CPU_EOI,
128
		TSI108_CPU_MCACK,
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

		TSI108_IRQ_BASE,
		TSI108_IRQ_STRIDE,
		TSI108_IRQ_VECTOR_PRI,
		TSI108_VECPRI_VECTOR_MASK,
		TSI108_VECPRI_POLARITY_POSITIVE,
		TSI108_VECPRI_POLARITY_NEGATIVE,
		TSI108_VECPRI_SENSE_LEVEL,
		TSI108_VECPRI_SENSE_EDGE,
		TSI108_VECPRI_POLARITY_MASK,
		TSI108_VECPRI_SENSE_MASK,
		TSI108_IRQ_DESTINATION
	},
};

#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]

#else /* CONFIG_MPIC_WEIRD */

#define MPIC_INFO(name) MPIC_##name

#endif /* CONFIG_MPIC_WEIRD */

152 153 154 155 156 157 158 159 160 161
static inline unsigned int mpic_processor_id(struct mpic *mpic)
{
	unsigned int cpu = 0;

	if (mpic->flags & MPIC_PRIMARY)
		cpu = hard_smp_processor_id();

	return cpu;
}

162 163 164 165 166
/*
 * Register accessor functions
 */


167 168 169
static inline u32 _mpic_read(enum mpic_reg_type type,
			     struct mpic_reg_bank *rb,
			     unsigned int reg)
170
{
171 172 173
	switch(type) {
#ifdef CONFIG_PPC_DCR
	case mpic_access_dcr:
174
		return dcr_read(rb->dhost, reg);
175 176 177 178 179 180 181
#endif
	case mpic_access_mmio_be:
		return in_be32(rb->base + (reg >> 2));
	case mpic_access_mmio_le:
	default:
		return in_le32(rb->base + (reg >> 2));
	}
182 183
}

184 185 186
static inline void _mpic_write(enum mpic_reg_type type,
			       struct mpic_reg_bank *rb,
 			       unsigned int reg, u32 value)
187
{
188 189 190
	switch(type) {
#ifdef CONFIG_PPC_DCR
	case mpic_access_dcr:
191 192
		dcr_write(rb->dhost, reg, value);
		break;
193 194
#endif
	case mpic_access_mmio_be:
195 196
		out_be32(rb->base + (reg >> 2), value);
		break;
197 198
	case mpic_access_mmio_le:
	default:
199 200
		out_le32(rb->base + (reg >> 2), value);
		break;
201
	}
202 203 204 205
}

static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
{
206
	enum mpic_reg_type type = mpic->reg_type;
207 208
	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
209

210 211 212
	if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
		type = mpic_access_mmio_be;
	return _mpic_read(type, &mpic->gregs, offset);
213 214 215 216
}

static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
{
217 218
	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
219

220
	_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
221 222
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
{
	unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
			      ((tm & 3) * MPIC_INFO(TIMER_STRIDE));

	if (tm >= 4)
		offset += 0x1000 / 4;

	return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
}

static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
{
	unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
			      ((tm & 3) * MPIC_INFO(TIMER_STRIDE));

	if (tm >= 4)
		offset += 0x1000 / 4;

	_mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
}

245 246
static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
{
247
	unsigned int cpu = mpic_processor_id(mpic);
248

249
	return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
250 251 252 253
}

static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
{
254
	unsigned int cpu = mpic_processor_id(mpic);
255

256
	_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
257 258 259 260 261 262
}

static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
{
	unsigned int	isu = src_no >> mpic->isu_shift;
	unsigned int	idx = src_no & mpic->isu_mask;
263
	unsigned int	val;
264

265 266
	val = _mpic_read(mpic->reg_type, &mpic->isus[isu],
			 reg + (idx * MPIC_INFO(IRQ_STRIDE)));
267 268
#ifdef CONFIG_MPIC_BROKEN_REGREAD
	if (reg == 0)
269 270
		val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) |
			mpic->isu_reg0_shadow[src_no];
271
#endif
272
	return val;
273 274 275 276 277 278 279 280
}

static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
				   unsigned int reg, u32 value)
{
	unsigned int	isu = src_no >> mpic->isu_shift;
	unsigned int	idx = src_no & mpic->isu_mask;

281
	_mpic_write(mpic->reg_type, &mpic->isus[isu],
282
		    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
283 284 285

#ifdef CONFIG_MPIC_BROKEN_REGREAD
	if (reg == 0)
286 287
		mpic->isu_reg0_shadow[src_no] =
			value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY);
288
#endif
289 290
}

291 292
#define mpic_read(b,r)		_mpic_read(mpic->reg_type,&(b),(r))
#define mpic_write(b,r,v)	_mpic_write(mpic->reg_type,&(b),(r),(v))
293 294
#define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
#define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
295 296
#define mpic_tm_read(i)		_mpic_tm_read(mpic,(i))
#define mpic_tm_write(i,v)	_mpic_tm_write(mpic,(i),(v))
297 298 299 300 301 302 303 304 305 306 307
#define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
#define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))
#define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r))
#define mpic_irq_write(s,r,v)	_mpic_irq_write(mpic,(s),(r),(v))


/*
 * Low level utility functions
 */


308
static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
309 310 311 312 313 314 315 316
			   struct mpic_reg_bank *rb, unsigned int offset,
			   unsigned int size)
{
	rb->base = ioremap(phys_addr + offset, size);
	BUG_ON(rb->base == NULL);
}

#ifdef CONFIG_PPC_DCR
317 318
static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
			  struct mpic_reg_bank *rb,
319 320
			  unsigned int offset, unsigned int size)
{
321 322
	const u32 *dbasep;

323
	dbasep = of_get_property(node, "dcr-reg", NULL);
324

325
	rb->dhost = dcr_map(node, *dbasep + offset, size);
326 327 328
	BUG_ON(!DCR_MAP_OK(rb->dhost));
}

329 330 331
static inline void mpic_map(struct mpic *mpic, struct device_node *node,
			    phys_addr_t phys_addr, struct mpic_reg_bank *rb,
			    unsigned int offset, unsigned int size)
332 333
{
	if (mpic->flags & MPIC_USES_DCR)
334
		_mpic_map_dcr(mpic, node, rb, offset, size);
335 336 337 338
	else
		_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
339
#define mpic_map(m,n,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
340 341 342
#endif /* !CONFIG_PPC_DCR */


343 344 345 346 347 348 349 350

/* Check if we have one of those nice broken MPICs with a flipped endian on
 * reads from IPI registers
 */
static void __init mpic_test_broken_ipi(struct mpic *mpic)
{
	u32 r;

351 352
	mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
	r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
353 354 355 356 357 358 359

	if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
		printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
		mpic->flags |= MPIC_BROKEN_IPI;
	}
}

360
#ifdef CONFIG_MPIC_U3_HT_IRQS
361 362 363 364

/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
 * to force the edge setting on the MPIC and do the ack workaround.
 */
365
static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
366
{
367
	if (source >= 128 || !mpic->fixups)
368
		return 0;
369
	return mpic->fixups[source].base != NULL;
370 371
}

372

373
static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
374
{
375
	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
376

377 378 379 380 381
	if (fixup->applebase) {
		unsigned int soff = (fixup->index >> 3) & ~3;
		unsigned int mask = 1U << (fixup->index & 0x1f);
		writel(mask, fixup->applebase + soff);
	} else {
382
		raw_spin_lock(&mpic->fixup_lock);
383 384
		writeb(0x11 + 2 * fixup->index, fixup->base + 2);
		writel(fixup->data, fixup->base + 4);
385
		raw_spin_unlock(&mpic->fixup_lock);
386
	}
387 388
}

389
static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
390
				      bool level)
391 392 393 394 395 396 397 398
{
	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
	unsigned long flags;
	u32 tmp;

	if (fixup->base == NULL)
		return;

399 400
	DBG("startup_ht_interrupt(0x%x) index: %d\n",
	    source, fixup->index);
401
	raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
402 403 404 405
	/* Enable and configure */
	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
	tmp = readl(fixup->base + 4);
	tmp &= ~(0x23U);
406
	if (level)
407 408
		tmp |= 0x22;
	writel(tmp, fixup->base + 4);
409
	raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
410 411 412 413 414 415

#ifdef CONFIG_PM
	/* use the lowest bit inverted to the actual HW,
	 * set if this fixup was enabled, clear otherwise */
	mpic->save_data[source].fixup_data = tmp | 1;
#endif
416 417
}

418
static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
419 420 421 422 423 424 425 426
{
	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
	unsigned long flags;
	u32 tmp;

	if (fixup->base == NULL)
		return;

427
	DBG("shutdown_ht_interrupt(0x%x)\n", source);
428 429

	/* Disable */
430
	raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
431 432
	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
	tmp = readl(fixup->base + 4);
433
	tmp |= 1;
434
	writel(tmp, fixup->base + 4);
435
	raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
436 437 438 439 440 441

#ifdef CONFIG_PM
	/* use the lowest bit inverted to the actual HW,
	 * set if this fixup was enabled, clear otherwise */
	mpic->save_data[source].fixup_data = tmp & ~1;
#endif
442
}
443

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
#ifdef CONFIG_PCI_MSI
static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
				    unsigned int devfn)
{
	u8 __iomem *base;
	u8 pos, flags;
	u64 addr = 0;

	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
		if (id == PCI_CAP_ID_HT) {
			id = readb(devbase + pos + 3);
			if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING)
				break;
		}
	}

	if (pos == 0)
		return;

	base = devbase + pos;

	flags = readb(base + HT_MSI_FLAGS);
	if (!(flags & HT_MSI_FLAGS_FIXED)) {
		addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK;
		addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
	}

473
	printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
474 475 476 477 478 479 480 481 482 483 484 485 486 487
		PCI_SLOT(devfn), PCI_FUNC(devfn),
		flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);

	if (!(flags & HT_MSI_FLAGS_ENABLE))
		writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
}
#else
static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
				    unsigned int devfn)
{
	return;
}
#endif

488 489
static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
				    unsigned int devfn, u32 vdid)
490
{
491
	int i, irq, n;
492
	u8 __iomem *base;
493
	u32 tmp;
494
	u8 pos;
495

496 497 498
	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
499
		if (id == PCI_CAP_ID_HT) {
500
			id = readb(devbase + pos + 3);
501
			if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ)
502 503
				break;
		}
504
	}
505 506 507
	if (pos == 0)
		return;

508 509 510
	base = devbase + pos;
	writeb(0x01, base + 2);
	n = (readl(base + 4) >> 16) & 0xff;
511

512 513 514
	printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
	       " has %d irqs\n",
	       devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
515 516

	for (i = 0; i <= n; i++) {
517 518
		writeb(0x10 + 2 * i, base + 2);
		tmp = readl(base + 4);
519
		irq = (tmp >> 16) & 0xff;
520 521 522 523 524 525 526 527 528 529 530 531 532
		DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
		/* mask it , will be unmasked later */
		tmp |= 0x1;
		writel(tmp, base + 4);
		mpic->fixups[irq].index = i;
		mpic->fixups[irq].base = base;
		/* Apple HT PIC has a non-standard way of doing EOIs */
		if ((vdid & 0xffff) == 0x106b)
			mpic->fixups[irq].applebase = devbase + 0x60;
		else
			mpic->fixups[irq].applebase = NULL;
		writeb(0x11 + 2 * i, base + 2);
		mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
533 534 535
	}
}
 
536

537
static void __init mpic_scan_ht_pics(struct mpic *mpic)
538 539 540 541
{
	unsigned int devfn;
	u8 __iomem *cfgspace;

542
	printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
543 544

	/* Allocate fixups array */
545
	mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
546 547 548
	BUG_ON(mpic->fixups == NULL);

	/* Init spinlock */
549
	raw_spin_lock_init(&mpic->fixup_lock);
550

551 552
	/* Map U3 config space. We assume all IO-APICs are on the primary bus
	 * so we only need to map 64kB.
553
	 */
554
	cfgspace = ioremap(0xf2000000, 0x10000);
555 556
	BUG_ON(cfgspace == NULL);

557 558
	/* Now we scan all slots. We do a very quick scan, we read the header
	 * type, vendor ID and device ID only, that's plenty enough
559
	 */
560
	for (devfn = 0; devfn < 0x100; devfn++) {
561 562 563
		u8 __iomem *devbase = cfgspace + (devfn << 8);
		u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
		u32 l = readl(devbase + PCI_VENDOR_ID);
564
		u16 s;
565 566 567 568 569 570 571

		DBG("devfn %x, l: %x\n", devfn, l);

		/* If no device, skip */
		if (l == 0xffffffff || l == 0x00000000 ||
		    l == 0x0000ffff || l == 0xffff0000)
			goto next;
572 573 574 575
		/* Check if is supports capability lists */
		s = readw(devbase + PCI_STATUS);
		if (!(s & PCI_STATUS_CAP_LIST))
			goto next;
576

577
		mpic_scan_ht_pic(mpic, devbase, devfn, l);
578
		mpic_scan_ht_msi(mpic, devbase, devfn);
579

580 581
	next:
		/* next device, if function 0 */
582
		if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
583 584 585 586
			devfn += 7;
	}
}

587
#else /* CONFIG_MPIC_U3_HT_IRQS */
588 589 590 591 592 593 594 595 596 597

static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
{
	return 0;
}

static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
}

598
#endif /* CONFIG_MPIC_U3_HT_IRQS */
599

600
#ifdef CONFIG_SMP
601
static int irq_choose_cpu(const struct cpumask *mask)
602 603 604
{
	int cpuid;

605
	if (cpumask_equal(mask, cpu_all_mask)) {
606
		static int irq_rover = 0;
607
		static DEFINE_RAW_SPINLOCK(irq_rover_lock);
608 609 610 611
		unsigned long flags;

		/* Round-robin distribution... */
	do_round_robin:
612
		raw_spin_lock_irqsave(&irq_rover_lock, flags);
613

614 615 616 617
		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
		if (irq_rover >= nr_cpu_ids)
			irq_rover = cpumask_first(cpu_online_mask);

618 619
		cpuid = irq_rover;

620
		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
621
	} else {
622 623
		cpuid = cpumask_first_and(mask, cpu_online_mask);
		if (cpuid >= nr_cpu_ids)
624 625 626
			goto do_round_robin;
	}

627
	return get_hard_smp_processor_id(cpuid);
628 629
}
#else
630
static int irq_choose_cpu(const struct cpumask *mask)
631 632 633 634
{
	return hard_smp_processor_id();
}
#endif
635 636

/* Find an mpic associated with a given linux interrupt */
637
static struct mpic *mpic_find(unsigned int irq)
638
{
639 640
	if (irq < NUM_ISA_INTERRUPTS)
		return NULL;
641

642
	return irq_get_chip_data(irq);
643
}
644

645 646 647
/* Determine if the linux irq is an IPI */
static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
{
648
	unsigned int src = virq_to_hw(irq);
649

650
	return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
651 652
}

653 654 655 656 657 658 659
/* Determine if the linux irq is a timer */
static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
{
	unsigned int src = virq_to_hw(irq);

	return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
}
660

661 662 663 664 665 666
/* Convert a cpu mask from logical to physical cpu numbers. */
static inline u32 mpic_physmask(u32 cpumask)
{
	int i;
	u32 mask = 0;

667
	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
668 669 670 671 672 673
		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
	return mask;
}

#ifdef CONFIG_SMP
/* Get the mpic structure from the IPI number */
674
static inline struct mpic * mpic_from_ipi(struct irq_data *d)
675
{
676
	return irq_data_get_irq_chip_data(d);
677 678 679 680 681 682
}
#endif

/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
683
	return irq_get_chip_data(irq);
684 685 686 687 688 689
}

/* Get the mpic structure from the irq data */
static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
{
	return irq_data_get_irq_chip_data(d);
690 691 692 693 694
}

/* Send an EOI */
static inline void mpic_eoi(struct mpic *mpic)
{
695 696
	mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
	(void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
697 698 699 700 701 702 703
}

/*
 * Linux descriptor level callbacks
 */


704
void mpic_unmask_irq(struct irq_data *d)
705 706
{
	unsigned int loops = 100000;
707
	struct mpic *mpic = mpic_from_irq_data(d);
708
	unsigned int src = irqd_to_hwirq(d);
709

710
	DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
711

712 713
	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
		       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
714
		       ~MPIC_VECPRI_MASK);
715 716 717
	/* make sure mask gets to controller before we return to user */
	do {
		if (!loops--) {
718 719
			printk(KERN_ERR "%s: timeout on hwirq %u\n",
			       __func__, src);
720 721
			break;
		}
722
	} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
723 724
}

725
void mpic_mask_irq(struct irq_data *d)
726 727
{
	unsigned int loops = 100000;
728
	struct mpic *mpic = mpic_from_irq_data(d);
729
	unsigned int src = irqd_to_hwirq(d);
730

731
	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
732

733 734
	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
		       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
735
		       MPIC_VECPRI_MASK);
736 737 738 739

	/* make sure mask gets to controller before we return to user */
	do {
		if (!loops--) {
740 741
			printk(KERN_ERR "%s: timeout on hwirq %u\n",
			       __func__, src);
742 743
			break;
		}
744
	} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
745 746
}

747
void mpic_end_irq(struct irq_data *d)
748
{
749
	struct mpic *mpic = mpic_from_irq_data(d);
750 751

#ifdef DEBUG_IRQ
752
	DBG("%s: end_irq: %d\n", mpic->name, d->irq);
753 754 755 756 757 758 759 760 761
#endif
	/* We always EOI on end_irq() even for edge interrupts since that
	 * should only lower the priority, the MPIC should have properly
	 * latched another edge interrupt coming in anyway
	 */

	mpic_eoi(mpic);
}

762
#ifdef CONFIG_MPIC_U3_HT_IRQS
763

764
static void mpic_unmask_ht_irq(struct irq_data *d)
765
{
766
	struct mpic *mpic = mpic_from_irq_data(d);
767
	unsigned int src = irqd_to_hwirq(d);
768

769
	mpic_unmask_irq(d);
770

771
	if (irqd_is_level_type(d))
772 773 774
		mpic_ht_end_irq(mpic, src);
}

775
static unsigned int mpic_startup_ht_irq(struct irq_data *d)
776
{
777
	struct mpic *mpic = mpic_from_irq_data(d);
778
	unsigned int src = irqd_to_hwirq(d);
779

780
	mpic_unmask_irq(d);
781
	mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
782 783

	return 0;
784 785
}

786
static void mpic_shutdown_ht_irq(struct irq_data *d)
787
{
788
	struct mpic *mpic = mpic_from_irq_data(d);
789
	unsigned int src = irqd_to_hwirq(d);
790

791
	mpic_shutdown_ht_interrupt(mpic, src);
792
	mpic_mask_irq(d);
793 794
}

795
static void mpic_end_ht_irq(struct irq_data *d)
796
{
797
	struct mpic *mpic = mpic_from_irq_data(d);
798
	unsigned int src = irqd_to_hwirq(d);
799

800
#ifdef DEBUG_IRQ
801
	DBG("%s: end_irq: %d\n", mpic->name, d->irq);
802
#endif
803 804 805 806 807
	/* We always EOI on end_irq() even for edge interrupts since that
	 * should only lower the priority, the MPIC should have properly
	 * latched another edge interrupt coming in anyway
	 */

808
	if (irqd_is_level_type(d))
809
		mpic_ht_end_irq(mpic, src);
810 811
	mpic_eoi(mpic);
}
812
#endif /* !CONFIG_MPIC_U3_HT_IRQS */
813

814 815
#ifdef CONFIG_SMP

816
static void mpic_unmask_ipi(struct irq_data *d)
817
{
818
	struct mpic *mpic = mpic_from_ipi(d);
819
	unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
820

821
	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
822 823 824
	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
}

825
static void mpic_mask_ipi(struct irq_data *d)
826 827 828 829
{
	/* NEVER disable an IPI... that's just plain wrong! */
}

830
static void mpic_end_ipi(struct irq_data *d)
831
{
832
	struct mpic *mpic = mpic_from_ipi(d);
833 834 835 836 837

	/*
	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
	 * applying to them. We EOI them late to avoid re-entering.
838
	 * We mark IPI's with IRQF_DISABLED as they must run with
839 840 841 842 843 844 845
	 * irqs disabled.
	 */
	mpic_eoi(mpic);
}

#endif /* CONFIG_SMP */

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
static void mpic_unmask_tm(struct irq_data *d)
{
	struct mpic *mpic = mpic_from_irq_data(d);
	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];

	DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src);
	mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
	mpic_tm_read(src);
}

static void mpic_mask_tm(struct irq_data *d)
{
	struct mpic *mpic = mpic_from_irq_data(d);
	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];

	mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
	mpic_tm_read(src);
}

865 866
int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
		      bool force)
867
{
868
	struct mpic *mpic = mpic_from_irq_data(d);
869
	unsigned int src = irqd_to_hwirq(d);
870

871
	if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
872
		int cpuid = irq_choose_cpu(cpumask);
873

874 875
		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
	} else {
876
		u32 mask = cpumask_bits(cpumask)[0];
877

878
		mask &= cpumask_bits(cpu_online_mask)[0];
879 880

		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
881
			       mpic_physmask(mask));
882
	}
883 884

	return 0;
885 886
}

887
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
888 889
{
	/* Now convert sense value */
890
	switch(type & IRQ_TYPE_SENSE_MASK) {
891
	case IRQ_TYPE_EDGE_RISING:
892 893
		return MPIC_INFO(VECPRI_SENSE_EDGE) |
		       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
894
	case IRQ_TYPE_EDGE_FALLING:
895
	case IRQ_TYPE_EDGE_BOTH:
896 897
		return MPIC_INFO(VECPRI_SENSE_EDGE) |
		       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
898
	case IRQ_TYPE_LEVEL_HIGH:
899 900
		return MPIC_INFO(VECPRI_SENSE_LEVEL) |
		       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
901 902
	case IRQ_TYPE_LEVEL_LOW:
	default:
903 904
		return MPIC_INFO(VECPRI_SENSE_LEVEL) |
		       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
905
	}
906 907
}

908
int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
909
{
910
	struct mpic *mpic = mpic_from_irq_data(d);
911
	unsigned int src = irqd_to_hwirq(d);
912 913
	unsigned int vecpri, vold, vnew;

914
	DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
915
	    mpic, d->irq, src, flow_type);
916 917 918 919 920 921 922 923 924 925

	if (src >= mpic->irq_count)
		return -EINVAL;

	if (flow_type == IRQ_TYPE_NONE)
		if (mpic->senses && src < mpic->senses_count)
			flow_type = mpic->senses[src];
	if (flow_type == IRQ_TYPE_NONE)
		flow_type = IRQ_TYPE_LEVEL_LOW;

926
	irqd_set_trigger_type(d, flow_type);
927 928 929 930 931

	if (mpic_is_ht_interrupt(mpic, src))
		vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
			MPIC_VECPRI_SENSE_EDGE;
	else
932
		vecpri = mpic_type_to_vecpri(mpic, flow_type);
933

934 935 936
	vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
	vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
			MPIC_INFO(VECPRI_SENSE_MASK));
937 938
	vnew |= vecpri;
	if (vold != vnew)
939
		mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
940

941
	return IRQ_SET_MASK_OK_NOCOPY;;
942 943
}

944 945 946
void mpic_set_vector(unsigned int virq, unsigned int vector)
{
	struct mpic *mpic = mpic_from_irq(virq);
947
	unsigned int src = virq_to_hw(virq);
948 949 950 951 952 953 954 955 956 957 958 959 960 961
	unsigned int vecpri;

	DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
	    mpic, virq, src, vector);

	if (src >= mpic->irq_count)
		return;

	vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
	vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
	vecpri |= vector;
	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
}

962 963 964
void mpic_set_destination(unsigned int virq, unsigned int cpuid)
{
	struct mpic *mpic = mpic_from_irq(virq);
965
	unsigned int src = virq_to_hw(virq);
966 967 968 969 970 971 972 973 974 975

	DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
	    mpic, virq, src, cpuid);

	if (src >= mpic->irq_count)
		return;

	mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
}

976
static struct irq_chip mpic_irq_chip = {
977 978 979 980
	.irq_mask	= mpic_mask_irq,
	.irq_unmask	= mpic_unmask_irq,
	.irq_eoi	= mpic_end_irq,
	.irq_set_type	= mpic_set_irq_type,
981 982 983 984
};

#ifdef CONFIG_SMP
static struct irq_chip mpic_ipi_chip = {
985 986 987
	.irq_mask	= mpic_mask_ipi,
	.irq_unmask	= mpic_unmask_ipi,
	.irq_eoi	= mpic_end_ipi,
988 989 990
};
#endif /* CONFIG_SMP */

991 992 993 994 995 996
static struct irq_chip mpic_tm_chip = {
	.irq_mask	= mpic_mask_tm,
	.irq_unmask	= mpic_unmask_tm,
	.irq_eoi	= mpic_end_irq,
};

997
#ifdef CONFIG_MPIC_U3_HT_IRQS
998
static struct irq_chip mpic_irq_ht_chip = {
999 1000 1001 1002 1003 1004
	.irq_startup	= mpic_startup_ht_irq,
	.irq_shutdown	= mpic_shutdown_ht_irq,
	.irq_mask	= mpic_mask_irq,
	.irq_unmask	= mpic_unmask_ht_irq,
	.irq_eoi	= mpic_end_ht_irq,
	.irq_set_type	= mpic_set_irq_type,
1005
};
1006
#endif /* CONFIG_MPIC_U3_HT_IRQS */
1007

1008

1009 1010 1011
static int mpic_host_match(struct irq_host *h, struct device_node *node)
{
	/* Exact match, unless mpic node is NULL */
1012
	return h->of_node == NULL || h->of_node == node;
1013 1014 1015
}

static int mpic_host_map(struct irq_host *h, unsigned int virq,
1016
			 irq_hw_number_t hw)
1017 1018
{
	struct mpic *mpic = h->host_data;
1019
	struct irq_chip *chip;
1020

1021
	DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw);
1022

1023
	if (hw == mpic->spurious_vec)
1024
		return -EINVAL;
1025 1026
	if (mpic->protected && test_bit(hw, mpic->protected))
		return -EINVAL;
1027

1028
#ifdef CONFIG_SMP
1029
	else if (hw >= mpic->ipi_vecs[0]) {
1030 1031
		WARN_ON(!(mpic->flags & MPIC_PRIMARY));

1032
		DBG("mpic: mapping as IPI\n");
1033 1034
		irq_set_chip_data(virq, mpic);
		irq_set_chip_and_handler(virq, &mpic->hc_ipi,
1035 1036 1037 1038 1039
					 handle_percpu_irq);
		return 0;
	}
#endif /* CONFIG_SMP */

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
		WARN_ON(!(mpic->flags & MPIC_PRIMARY));

		DBG("mpic: mapping as timer\n");
		irq_set_chip_data(virq, mpic);
		irq_set_chip_and_handler(virq, &mpic->hc_tm,
					 handle_fasteoi_irq);
		return 0;
	}

1050 1051 1052
	if (hw >= mpic->irq_count)
		return -EINVAL;

M
Michael Ellerman 已提交
1053 1054
	mpic_msi_reserve_hwirq(mpic, hw);

1055
	/* Default chip */
1056 1057
	chip = &mpic->hc_irq;

1058
#ifdef CONFIG_MPIC_U3_HT_IRQS
1059
	/* Check for HT interrupts, override vecpri */
1060
	if (mpic_is_ht_interrupt(mpic, hw))
1061
		chip = &mpic->hc_ht_irq;
1062
#endif /* CONFIG_MPIC_U3_HT_IRQS */
1063

1064
	DBG("mpic: mapping to irq chip @%p\n", chip);
1065

1066 1067
	irq_set_chip_data(virq, mpic);
	irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
1068 1069

	/* Set default irq type */
1070
	irq_set_irq_type(virq, IRQ_TYPE_NONE);
1071

1072 1073 1074 1075 1076 1077
	/* If the MPIC was reset, then all vectors have already been
	 * initialized.  Otherwise, a per source lazy initialization
	 * is done here.
	 */
	if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
		mpic_set_vector(virq, hw);
1078
		mpic_set_destination(virq, mpic_processor_id(mpic));
1079 1080 1081
		mpic_irq_set_priority(virq, 8);
	}

1082 1083 1084 1085
	return 0;
}

static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
1086
			   const u32 *intspec, unsigned int intsize,
1087 1088 1089
			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)

{
1090
	struct mpic *mpic = h->host_data;
1091 1092 1093 1094 1095 1096 1097 1098
	static unsigned char map_mpic_senses[4] = {
		IRQ_TYPE_EDGE_RISING,
		IRQ_TYPE_LEVEL_LOW,
		IRQ_TYPE_LEVEL_HIGH,
		IRQ_TYPE_EDGE_FALLING,
	};

	*out_hwirq = intspec[0];
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
		/*
		 * Freescale MPIC with extended intspec:
		 * First two cells are as usual.  Third specifies
		 * an "interrupt type".  Fourth is type-specific data.
		 *
		 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
		 */
		switch (intspec[2]) {
		case 0:
		case 1: /* no EISR/EIMR support for now, treat as shared IRQ */
			break;
		case 2:
			if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
				return -EINVAL;

			*out_hwirq = mpic->ipi_vecs[intspec[0]];
			break;
		case 3:
			if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
				return -EINVAL;

			*out_hwirq = mpic->timer_vecs[intspec[0]];
			break;
		default:
			pr_debug("%s: unknown irq type %u\n",
				 __func__, intspec[2]);
			return -EINVAL;
		}

		*out_flags = map_mpic_senses[intspec[1] & 3];
	} else if (intsize > 1) {
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
		u32 mask = 0x3;

		/* Apple invented a new race of encoding on machines with
		 * an HT APIC. They encode, among others, the index within
		 * the HT APIC. We don't care about it here since thankfully,
		 * it appears that they have the APIC already properly
		 * configured, and thus our current fixup code that reads the
		 * APIC config works fine. However, we still need to mask out
		 * bits in the specifier to make sure we only get bit 0 which
		 * is the level/edge bit (the only sense bit exposed by Apple),
		 * as their bit 1 means something else.
		 */
		if (machine_is(powermac))
			mask = 0x1;
		*out_flags = map_mpic_senses[intspec[1] & mask];
	} else
1147 1148
		*out_flags = IRQ_TYPE_NONE;

1149 1150 1151
	DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
	    intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);

1152 1153 1154 1155 1156 1157 1158 1159 1160
	return 0;
}

static struct irq_host_ops mpic_host_ops = {
	.match = mpic_host_match,
	.map = mpic_host_map,
	.xlate = mpic_host_xlate,
};

1161 1162 1163 1164 1165
static int mpic_reset_prohibited(struct device_node *node)
{
	return node && of_get_property(node, "pic-no-reset", NULL);
}

1166 1167 1168 1169
/*
 * Exported functions
 */

1170
struct mpic * __init mpic_alloc(struct device_node *node,
1171
				phys_addr_t phys_addr,
1172 1173 1174 1175 1176 1177
				unsigned int flags,
				unsigned int isu_size,
				unsigned int irq_count,
				const char *name)
{
	struct mpic	*mpic;
1178
	u32		greg_feature;
1179 1180
	const char	*vers;
	int		i;
1181
	int		intvec_top;
1182
	u64		paddr = phys_addr;
1183

K
Kumar Gala 已提交
1184
	mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
1185 1186
	if (mpic == NULL)
		return NULL;
K
Kumar Gala 已提交
1187

1188 1189
	mpic->name = name;

1190
	mpic->hc_irq = mpic_irq_chip;
1191
	mpic->hc_irq.name = name;
1192
	if (flags & MPIC_PRIMARY)
1193
		mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1194
#ifdef CONFIG_MPIC_U3_HT_IRQS
1195
	mpic->hc_ht_irq = mpic_irq_ht_chip;
1196
	mpic->hc_ht_irq.name = name;
1197
	if (flags & MPIC_PRIMARY)
1198
		mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1199
#endif /* CONFIG_MPIC_U3_HT_IRQS */
1200

1201
#ifdef CONFIG_SMP
1202
	mpic->hc_ipi = mpic_ipi_chip;
1203
	mpic->hc_ipi.name = name;
1204 1205
#endif /* CONFIG_SMP */

1206 1207 1208
	mpic->hc_tm = mpic_tm_chip;
	mpic->hc_tm.name = name;

1209 1210 1211 1212 1213
	mpic->flags = flags;
	mpic->isu_size = isu_size;
	mpic->irq_count = irq_count;
	mpic->num_sources = 0; /* so far */

1214 1215 1216 1217 1218
	if (flags & MPIC_LARGE_VECTORS)
		intvec_top = 2047;
	else
		intvec_top = 255;

1219 1220 1221 1222 1223 1224 1225 1226
	mpic->timer_vecs[0] = intvec_top - 12;
	mpic->timer_vecs[1] = intvec_top - 11;
	mpic->timer_vecs[2] = intvec_top - 10;
	mpic->timer_vecs[3] = intvec_top - 9;
	mpic->timer_vecs[4] = intvec_top - 8;
	mpic->timer_vecs[5] = intvec_top - 7;
	mpic->timer_vecs[6] = intvec_top - 6;
	mpic->timer_vecs[7] = intvec_top - 5;
1227 1228 1229 1230 1231 1232
	mpic->ipi_vecs[0]   = intvec_top - 4;
	mpic->ipi_vecs[1]   = intvec_top - 3;
	mpic->ipi_vecs[2]   = intvec_top - 2;
	mpic->ipi_vecs[3]   = intvec_top - 1;
	mpic->spurious_vec  = intvec_top;

1233
	/* Check for "big-endian" in device-tree */
1234
	if (node && of_get_property(node, "big-endian", NULL) != NULL)
1235
		mpic->flags |= MPIC_BIG_ENDIAN;
1236 1237
	if (node && of_device_is_compatible(node, "fsl,mpic"))
		mpic->flags |= MPIC_FSL;
1238

1239 1240
	/* Look for protected sources */
	if (node) {
1241 1242
		int psize;
		unsigned int bits, mapsize;
1243 1244 1245 1246 1247 1248
		const u32 *psrc =
			of_get_property(node, "protected-sources", &psize);
		if (psrc) {
			psize /= 4;
			bits = intvec_top + 1;
			mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long);
1249
			mpic->protected = kzalloc(mapsize, GFP_KERNEL);
1250 1251 1252 1253 1254 1255 1256 1257
			BUG_ON(mpic->protected == NULL);
			for (i = 0; i < psize; i++) {
				if (psrc[i] > intvec_top)
					continue;
				__set_bit(psrc[i], mpic->protected);
			}
		}
	}
1258

1259 1260 1261 1262
#ifdef CONFIG_MPIC_WEIRD
	mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
#endif

1263 1264 1265 1266
	/* default register type */
	mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
		mpic_access_mmio_be : mpic_access_mmio_le;

1267 1268 1269 1270
	/* If no physical address is passed in, a device-node is mandatory */
	BUG_ON(paddr == 0 && node == NULL);

	/* If no physical address passed in, check if it's dcr based */
1271
	if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) {
1272
#ifdef CONFIG_PPC_DCR
1273
		mpic->flags |= MPIC_USES_DCR;
1274 1275
		mpic->reg_type = mpic_access_dcr;
#else
1276
		BUG();
1277
#endif /* CONFIG_PPC_DCR */
1278
	}
1279

1280 1281 1282 1283
	/* If the MPIC is not DCR based, and no physical address was passed
	 * in, try to obtain one
	 */
	if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
1284
		const u32 *reg = of_get_property(node, "reg", NULL);
1285 1286 1287 1288 1289
		BUG_ON(reg == NULL);
		paddr = of_translate_address(node, reg);
		BUG_ON(paddr == OF_BAD_ADDR);
	}

1290
	/* Map the global registers */
1291 1292
	mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
	mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1293 1294

	/* Reset */
1295 1296 1297 1298 1299 1300 1301 1302 1303

	/* When using a device-node, reset requests are only honored if the MPIC
	 * is allowed to reset.
	 */
	if (mpic_reset_prohibited(node))
		mpic->flags |= MPIC_NO_RESET;

	if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
		printk(KERN_DEBUG "mpic: Resetting\n");
1304 1305
		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1306
			   | MPIC_GREG_GCONF_RESET);
1307
		while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1308 1309 1310 1311
		       & MPIC_GREG_GCONF_RESET)
			mb();
	}

1312 1313 1314 1315 1316 1317
	/* CoreInt */
	if (flags & MPIC_ENABLE_COREINT)
		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
			   | MPIC_GREG_GCONF_COREINT);

1318 1319 1320 1321 1322
	if (flags & MPIC_ENABLE_MCK)
		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
			   | MPIC_GREG_GCONF_MCK);

1323 1324 1325 1326
	/* Read feature register, calculate num CPUs and, for non-ISU
	 * MPICs, num sources as well. On ISU MPICs, sources are counted
	 * as ISUs are added
	 */
1327 1328
	greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
	mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK)
1329
			  >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
1330
	if (isu_size == 0) {
1331 1332 1333 1334 1335 1336
		if (flags & MPIC_BROKEN_FRR_NIRQS)
			mpic->num_sources = mpic->irq_count;
		else
			mpic->num_sources =
				((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
				 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
1337
	}
1338 1339 1340

	/* Map the per-CPU registers */
	for (i = 0; i < mpic->num_cpus; i++) {
1341
		mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
1342 1343
			 MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
			 0x1000);
1344 1345 1346 1347 1348
	}

	/* Initialize main ISU if none provided */
	if (mpic->isu_size == 0) {
		mpic->isu_size = mpic->num_sources;
1349
		mpic_map(mpic, node, paddr, &mpic->isus[0],
1350
			 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
1351 1352 1353 1354
	}
	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
	mpic->isu_mask = (1 << mpic->isu_shift) - 1;

1355 1356 1357 1358 1359 1360 1361 1362 1363
	mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
				       isu_size ? isu_size : mpic->num_sources,
				       &mpic_host_ops,
				       flags & MPIC_LARGE_VECTORS ? 2048 : 256);
	if (mpic->irqhost == NULL)
		return NULL;

	mpic->irqhost->host_data = mpic;

1364
	/* Display version */
1365
	switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	case 1:
		vers = "1.0";
		break;
	case 2:
		vers = "1.2";
		break;
	case 3:
		vers = "1.3";
		break;
	default:
		vers = "<unknown>";
		break;
	}
1379 1380 1381 1382 1383
	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
	       " max %d CPUs\n",
	       name, vers, (unsigned long long)paddr, mpic->num_cpus);
	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
	       mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
1384 1385 1386 1387

	mpic->next = mpics;
	mpics = mpic;

1388
	if (flags & MPIC_PRIMARY) {
1389
		mpic_primary = mpic;
1390 1391
		irq_set_default_host(mpic->irqhost);
	}
1392 1393 1394 1395 1396

	return mpic;
}

void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1397
			    phys_addr_t paddr)
1398 1399 1400 1401 1402
{
	unsigned int isu_first = isu_num * mpic->isu_size;

	BUG_ON(isu_num >= MPIC_MAX_ISU);

1403 1404
	mpic_map(mpic, mpic->irqhost->of_node,
		 paddr, &mpic->isus[isu_num], 0,
1405
		 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
1406

1407 1408 1409 1410
	if ((isu_first + mpic->isu_size) > mpic->num_sources)
		mpic->num_sources = isu_first + mpic->isu_size;
}

1411 1412 1413 1414 1415 1416
void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
{
	mpic->senses = senses;
	mpic->senses_count = count;
}

1417 1418 1419
void __init mpic_init(struct mpic *mpic)
{
	int i;
1420
	int cpu;
1421 1422 1423 1424 1425 1426

	BUG_ON(mpic->num_sources == 0);

	printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);

	/* Set current processor priority to max */
1427
	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1428

1429
	/* Initialize timers to our reserved vectors and mask them for now */
1430 1431
	for (i = 0; i < 4; i++) {
		mpic_write(mpic->tmregs,
1432
			   i * MPIC_INFO(TIMER_STRIDE) +
1433 1434
			   MPIC_INFO(TIMER_DESTINATION),
			   1 << hard_smp_processor_id());
1435
		mpic_write(mpic->tmregs,
1436 1437
			   i * MPIC_INFO(TIMER_STRIDE) +
			   MPIC_INFO(TIMER_VECTOR_PRI),
1438
			   MPIC_VECPRI_MASK |
1439
			   (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1440
			   (mpic->timer_vecs[0] + i));
1441 1442 1443 1444 1445 1446 1447 1448
	}

	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
	mpic_test_broken_ipi(mpic);
	for (i = 0; i < 4; i++) {
		mpic_ipi_write(i,
			       MPIC_VECPRI_MASK |
			       (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
1449
			       (mpic->ipi_vecs[0] + i));
1450 1451 1452 1453 1454 1455
	}

	/* Initialize interrupt sources */
	if (mpic->irq_count == 0)
		mpic->irq_count = mpic->num_sources;

1456
	/* Do the HT PIC fixups on U3 broken mpic */
1457
	DBG("MPIC flags: %x\n", mpic->flags);
1458
	if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) {
1459
		mpic_scan_ht_pics(mpic);
1460 1461
		mpic_u3msi_init(mpic);
	}
1462

1463 1464
	mpic_pasemi_msi_init(mpic);

1465
	cpu = mpic_processor_id(mpic);
1466

1467 1468 1469 1470 1471
	if (!(mpic->flags & MPIC_NO_RESET)) {
		for (i = 0; i < mpic->num_sources; i++) {
			/* start with vector = source number, and masked */
			u32 vecpri = MPIC_VECPRI_MASK | i |
				(8 << MPIC_VECPRI_PRIORITY_SHIFT);
1472
		
1473 1474 1475 1476 1477 1478 1479
			/* check if protected */
			if (mpic->protected && test_bit(i, mpic->protected))
				continue;
			/* init hw */
			mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
			mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
		}
1480 1481
	}
	
1482 1483
	/* Init spurious vector */
	mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
1484

1485 1486 1487 1488 1489
	/* Disable 8259 passthrough, if supported */
	if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
			   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
1490

1491 1492 1493 1494 1495
	if (mpic->flags & MPIC_NO_BIAS)
		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
			mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
			| MPIC_GREG_GCONF_NO_BIAS);

1496
	/* Set current processor priority to 0 */
1497
	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1498 1499 1500

#ifdef CONFIG_PM
	/* allocate memory to save mpic state */
1501 1502
	mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
				  GFP_KERNEL);
1503 1504
	BUG_ON(mpic->save_data == NULL);
#endif
1505 1506
}

1507 1508 1509 1510 1511 1512 1513 1514 1515
void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
{
	u32 v;

	v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
	v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
	v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
}
1516

1517 1518
void __init mpic_set_serial_int(struct mpic *mpic, int enable)
{
1519
	unsigned long flags;
1520 1521
	u32 v;

1522
	raw_spin_lock_irqsave(&mpic_lock, flags);
1523 1524 1525 1526 1527 1528
	v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
	if (enable)
		v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
	else
		v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1529
	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1530
}
1531 1532 1533

void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
1534
	struct mpic *mpic = mpic_find(irq);
1535
	unsigned int src = virq_to_hw(irq);
1536 1537 1538
	unsigned long flags;
	u32 reg;

1539 1540 1541
	if (!mpic)
		return;

1542
	raw_spin_lock_irqsave(&mpic_lock, flags);
1543
	if (mpic_is_ipi(mpic, irq)) {
1544
		reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
1545
			~MPIC_VECPRI_PRIORITY_MASK;
1546
		mpic_ipi_write(src - mpic->ipi_vecs[0],
1547
			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1548 1549 1550 1551 1552
	} else if (mpic_is_tm(mpic, irq)) {
		reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
			~MPIC_VECPRI_PRIORITY_MASK;
		mpic_tm_write(src - mpic->timer_vecs[0],
			      reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1553
	} else {
1554
		reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1555
			& ~MPIC_VECPRI_PRIORITY_MASK;
1556
		mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
1557 1558
			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
	}
1559
	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
}

void mpic_setup_this_cpu(void)
{
#ifdef CONFIG_SMP
	struct mpic *mpic = mpic_primary;
	unsigned long flags;
	u32 msk = 1 << hard_smp_processor_id();
	unsigned int i;

	BUG_ON(mpic == NULL);

	DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());

1574
	raw_spin_lock_irqsave(&mpic_lock, flags);
1575 1576 1577 1578

 	/* let the mpic know we want intrs. default affinity is 0xffffffff
	 * until changed via /proc. That's how it's done on x86. If we want
	 * it differently, then we should make sure we also change the default
1579
	 * values of irq_desc[].affinity in irq.c.
1580 1581 1582
 	 */
	if (distribute_irqs) {
	 	for (i = 0; i < mpic->num_sources ; i++)
1583 1584
			mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
				mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
1585 1586 1587
	}

	/* Set current processor priority to 0 */
1588
	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1589

1590
	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1591 1592 1593 1594 1595 1596 1597
#endif /* CONFIG_SMP */
}

int mpic_cpu_get_priority(void)
{
	struct mpic *mpic = mpic_primary;

1598
	return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
1599 1600 1601 1602 1603 1604 1605
}

void mpic_cpu_set_priority(int prio)
{
	struct mpic *mpic = mpic_primary;

	prio &= MPIC_CPU_TASKPRI_MASK;
1606
	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
}

void mpic_teardown_this_cpu(int secondary)
{
	struct mpic *mpic = mpic_primary;
	unsigned long flags;
	u32 msk = 1 << hard_smp_processor_id();
	unsigned int i;

	BUG_ON(mpic == NULL);

	DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1619
	raw_spin_lock_irqsave(&mpic_lock, flags);
1620 1621 1622

	/* let the mpic know we don't want intrs.  */
	for (i = 0; i < mpic->num_sources ; i++)
1623 1624
		mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
			mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
1625 1626

	/* Set current processor priority to max */
1627
	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1628 1629 1630 1631
	/* We need to EOI the IPI since not all platforms reset the MPIC
	 * on boot and new interrupts wouldn't get delivered otherwise.
	 */
	mpic_eoi(mpic);
1632

1633
	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1634 1635 1636
}


1637
static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1638
{
1639
	u32 src;
1640

1641
	src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
1642
#ifdef DEBUG_LOW
1643
	DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
1644
#endif
1645 1646 1647
	if (unlikely(src == mpic->spurious_vec)) {
		if (mpic->flags & MPIC_SPV_EOI)
			mpic_eoi(mpic);
1648
		return NO_IRQ;
1649
	}
1650 1651 1652 1653 1654 1655 1656 1657
	if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
		if (printk_ratelimit())
			printk(KERN_WARNING "%s: Got protected source %d !\n",
			       mpic->name, (int)src);
		mpic_eoi(mpic);
		return NO_IRQ;
	}

1658
	return irq_linear_revmap(mpic->irqhost, src);
1659 1660
}

1661 1662 1663 1664 1665
unsigned int mpic_get_one_irq(struct mpic *mpic)
{
	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
}

O
Olaf Hering 已提交
1666
unsigned int mpic_get_irq(void)
1667 1668 1669 1670 1671
{
	struct mpic *mpic = mpic_primary;

	BUG_ON(mpic == NULL);

O
Olaf Hering 已提交
1672
	return mpic_get_one_irq(mpic);
1673 1674
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
unsigned int mpic_get_coreint_irq(void)
{
#ifdef CONFIG_BOOKE
	struct mpic *mpic = mpic_primary;
	u32 src;

	BUG_ON(mpic == NULL);

	src = mfspr(SPRN_EPR);

	if (unlikely(src == mpic->spurious_vec)) {
		if (mpic->flags & MPIC_SPV_EOI)
			mpic_eoi(mpic);
		return NO_IRQ;
	}
	if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
		if (printk_ratelimit())
			printk(KERN_WARNING "%s: Got protected source %d !\n",
			       mpic->name, (int)src);
		return NO_IRQ;
	}

	return irq_linear_revmap(mpic->irqhost, src);
#else
	return NO_IRQ;
#endif
}

1703 1704 1705 1706 1707 1708 1709 1710
unsigned int mpic_get_mcirq(void)
{
	struct mpic *mpic = mpic_primary;

	BUG_ON(mpic == NULL);

	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
}
1711 1712 1713 1714 1715

#ifdef CONFIG_SMP
void mpic_request_ipis(void)
{
	struct mpic *mpic = mpic_primary;
1716
	int i;
1717 1718
	BUG_ON(mpic == NULL);

1719
	printk(KERN_INFO "mpic: requesting IPIs...\n");
1720 1721 1722

	for (i = 0; i < 4; i++) {
		unsigned int vipi = irq_create_mapping(mpic->irqhost,
1723
						       mpic->ipi_vecs[0] + i);
1724
		if (vipi == NO_IRQ) {
1725 1726
			printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
			continue;
1727
		}
1728
		smp_request_message_ipi(vipi, i);
1729
	}
1730
}
1731

1732
void smp_mpic_message_pass(int cpu, int msg)
1733 1734
{
	struct mpic *mpic = mpic_primary;
1735
	u32 physmask;
1736 1737 1738

	BUG_ON(mpic == NULL);

1739 1740 1741 1742 1743 1744
	/* make sure we're sending something that translates to an IPI */
	if ((unsigned int)msg > 3) {
		printk("SMP %d: smp_message_pass: unknown msg %d\n",
		       smp_processor_id(), msg);
		return;
	}
1745 1746 1747 1748 1749 1750 1751 1752 1753

#ifdef DEBUG_IPI
	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
#endif

	physmask = 1 << get_hard_smp_processor_id(cpu);

	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
		       msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1754
}
1755 1756 1757 1758 1759 1760 1761

int __init smp_mpic_probe(void)
{
	int nr_cpus;

	DBG("smp_mpic_probe()...\n");

1762
	nr_cpus = cpumask_weight(cpu_possible_mask);
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775

	DBG("nr_cpus: %d\n", nr_cpus);

	if (nr_cpus > 1)
		mpic_request_ipis();

	return nr_cpus;
}

void __devinit smp_mpic_setup_cpu(int cpu)
{
	mpic_setup_this_cpu();
}
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793

void mpic_reset_core(int cpu)
{
	struct mpic *mpic = mpic_primary;
	u32 pir;
	int cpuid = get_hard_smp_processor_id(cpu);

	/* Set target bit for core reset */
	pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
	pir |= (1 << cpuid);
	mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
	mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));

	/* Restore target bit after reset complete */
	pir &= ~(1 << cpuid);
	mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
	mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
}
1794
#endif /* CONFIG_SMP */
1795 1796

#ifdef CONFIG_PM
1797
static void mpic_suspend_one(struct mpic *mpic)
1798 1799 1800 1801 1802 1803 1804 1805 1806
{
	int i;

	for (i = 0; i < mpic->num_sources; i++) {
		mpic->save_data[i].vecprio =
			mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
		mpic->save_data[i].dest =
			mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
	}
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
}

static int mpic_suspend(void)
{
	struct mpic *mpic = mpics;

	while (mpic) {
		mpic_suspend_one(mpic);
		mpic = mpic->next;
	}
1817 1818 1819 1820

	return 0;
}

1821
static void mpic_resume_one(struct mpic *mpic)
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
{
	int i;

	for (i = 0; i < mpic->num_sources; i++) {
		mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
			       mpic->save_data[i].vecprio);
		mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
			       mpic->save_data[i].dest);

#ifdef CONFIG_MPIC_U3_HT_IRQS
1832
	if (mpic->fixups) {
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
		struct mpic_irq_fixup *fixup = &mpic->fixups[i];

		if (fixup->base) {
			/* we use the lowest bit in an inverted meaning */
			if ((mpic->save_data[i].fixup_data & 1) == 0)
				continue;

			/* Enable and configure */
			writeb(0x10 + 2 * fixup->index, fixup->base + 2);

			writel(mpic->save_data[i].fixup_data & ~1,
			       fixup->base + 4);
		}
	}
#endif
	} /* end for loop */
1849
}
1850

1851 1852 1853 1854 1855 1856 1857 1858
static void mpic_resume(void)
{
	struct mpic *mpic = mpics;

	while (mpic) {
		mpic_resume_one(mpic);
		mpic = mpic->next;
	}
1859 1860
}

1861
static struct syscore_ops mpic_syscore_ops = {
1862 1863 1864 1865 1866 1867
	.resume = mpic_resume,
	.suspend = mpic_suspend,
};

static int mpic_init_sys(void)
{
1868 1869
	register_syscore_ops(&mpic_syscore_ops);
	return 0;
1870 1871 1872
}

device_initcall(mpic_init_sys);
1873
#endif