irq.c 23.8 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
22
#include <linux/bootmem.h>
23
#include <linux/irq.h>
24
#include <linux/msi.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
31
#include <asm/io.h>
L
Linus Torvalds 已提交
32 33 34 35
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
36
#include <asm/prom.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
43
#include <asm/auxio.h>
44
#include <asm/head.h>
45
#include <asm/hypervisor.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57

/* UPA nodes send interrupt packet to UltraSparc with first data reg
 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
 * delivered.  We must translate this into a non-vector IRQ so we can
 * set the softint on this cpu.
 *
 * To make processing these packets efficient and race free we use
 * an array of irq buckets below.  The interrupt vector handler in
 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
 * The IVEC handler does not need to act atomically, the PIL dispatch
 * code uses CAS to get an atomic snapshot of the list and clear it
 * at the same time.
58 59 60
 *
 * If you make changes to ino_bucket, please update hand coded assembler
 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
L
Linus Torvalds 已提交
61
 */
62 63 64 65 66 67
struct ino_bucket {
	/* Next handler in per-CPU IRQ worklist.  We know that
	 * bucket pointers have the high 32-bits clear, so to
	 * save space we only store the bits we need.
	 */
/*0x00*/unsigned int irq_chain;
L
Linus Torvalds 已提交
68

69 70 71 72 73
	/* Virtual interrupt number assigned to this INO.  */
/*0x04*/unsigned int virt_irq;
};

#define NUM_IVECS	(IMAP_INR + 1)
L
Linus Torvalds 已提交
74 75
struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));

76 77 78 79 80
#define __irq_ino(irq) \
        (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))

L
Linus Torvalds 已提交
81 82 83 84 85 86 87
/* This has to be in the main kernel image, it cannot be
 * turned into per-cpu data.  The reason is that the main
 * kernel image is locked into the TLB and this structure
 * is accessed from the vectored interrupt trap handler.  If
 * access to this structure takes a TLB miss it could cause
 * the 5-level sparc v9 trap stack to overflow.
 */
88
#define irq_work(__cpu)	&(trap_block[(__cpu)].irq_worklist)
L
Linus Torvalds 已提交
89

90 91 92 93 94 95 96 97
static unsigned int virt_to_real_irq_table[NR_IRQS];

static unsigned char virt_irq_alloc(unsigned int real_irq)
{
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

98 99 100 101
	for (ent = 1; ent < NR_IRQS; ent++) {
		if (!virt_to_real_irq_table[ent])
			break;
	}
102 103 104 105 106 107 108 109 110 111
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
		return 0;
	}

	virt_to_real_irq_table[ent] = real_irq;

	return ent;
}

112
#ifdef CONFIG_PCI_MSI
113
static void virt_irq_free(unsigned int virt_irq)
114
{
115
	unsigned int real_irq;
116

117 118 119 120 121 122 123
	if (virt_irq >= NR_IRQS)
		return;

	real_irq = virt_to_real_irq_table[virt_irq];
	virt_to_real_irq_table[virt_irq] = 0;

	__bucket(real_irq)->virt_irq = 0;
124
}
125
#endif
126 127 128 129 130 131

static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
	return virt_to_real_irq_table[virt_irq];
}

L
Linus Torvalds 已提交
132
/*
133
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
134 135 136 137
 */

int show_interrupts(struct seq_file *p, void *v)
{
138 139
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
140 141
	unsigned long flags;

142 143 144 145 146 147 148 149 150 151 152 153 154
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
155 156 157
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
158 159
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
L
Linus Torvalds 已提交
160
#endif
161
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
162 163 164
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
165
			seq_printf(p, ", %s", action->name);
166

L
Linus Torvalds 已提交
167
		seq_putc(p, '\n');
168 169
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
L
Linus Torvalds 已提交
170 171 172 173
	}
	return 0;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

209 210 211
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
212

213 214 215 216
	void		(*pre_handler)(unsigned int, void *, void *);
	void		*pre_handler_arg1;
	void		*pre_handler_arg2;
};
L
Linus Torvalds 已提交
217

218
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
L
Linus Torvalds 已提交
219
{
220
	unsigned int real_irq = virt_to_real_irq(virt_irq);
221
	struct ino_bucket *bucket = NULL;
L
Linus Torvalds 已提交
222

223 224
	if (likely(real_irq))
		bucket = __bucket(real_irq);
225

226
	return bucket;
L
Linus Torvalds 已提交
227 228
}

229 230
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
231
{
232
	cpumask_t mask = irq_desc[virt_irq].affinity;
233
	int cpuid;
234

235 236 237 238
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
L
Linus Torvalds 已提交
239

240 241 242
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
243

244 245 246 247 248 249 250 251 252
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
L
Linus Torvalds 已提交
253

254 255 256
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
257

258
		cpus_and(tmp, cpu_online_map, mask);
259

260 261
		if (cpus_empty(tmp))
			goto do_round_robin;
262

263
		cpuid = first_cpu(tmp);
L
Linus Torvalds 已提交
264
	}
265

266 267 268 269 270 271
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
L
Linus Torvalds 已提交
272
}
273
#endif
L
Linus Torvalds 已提交
274

275
static void sun4u_irq_enable(unsigned int virt_irq)
276
{
277
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
278

279
	if (likely(data)) {
280
		unsigned long cpuid, imap, val;
281
		unsigned int tid;
282

283 284
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
285

286
		tid = sun4u_compute_tid(imap, cpuid);
287

288 289 290 291 292
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
293 294 295
	}
}

296
static void sun4u_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
297
{
298
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
L
Linus Torvalds 已提交
299

300 301
	if (likely(data)) {
		unsigned long imap = data->imap;
302
		u32 tmp = upa_readq(imap);
L
Linus Torvalds 已提交
303

304
		tmp &= ~IMAP_VALID;
305
		upa_writeq(tmp, imap);
306 307 308
	}
}

309
static void sun4u_irq_end(unsigned int virt_irq)
310
{
311
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
312 313 314 315
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
316

317
	if (likely(data))
318
		upa_writeq(ICLR_IDLE, data->iclr);
319 320
}

321
static void sun4v_irq_enable(unsigned int virt_irq)
322
{
323 324
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
325

326 327 328
	if (likely(bucket)) {
		unsigned long cpuid;
		int err;
329

330
		cpuid = irq_choose_cpu(virt_irq);
331

332 333 334 335
		err = sun4v_intr_settarget(ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
			       ino, cpuid, err);
336 337 338 339
		err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_intr_setstate(%x): "
			       "err(%d)\n", ino, err);
340 341 342 343
		err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): err(%d)\n",
			       ino, err);
344 345 346
	}
}

347
static void sun4v_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
348
{
349 350
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
L
Linus Torvalds 已提交
351

352 353
	if (likely(bucket)) {
		int err;
L
Linus Torvalds 已提交
354

355 356 357 358
		err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): "
			       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
359
	}
360
}
L
Linus Torvalds 已提交
361

362 363 364 365 366 367 368 369 370 371 372 373 374 375
#ifdef CONFIG_PCI_MSI
static void sun4v_msi_enable(unsigned int virt_irq)
{
	sun4v_irq_enable(virt_irq);
	unmask_msi_irq(virt_irq);
}

static void sun4v_msi_disable(unsigned int virt_irq)
{
	mask_msi_irq(virt_irq);
	sun4v_irq_disable(virt_irq);
}
#endif

376 377 378 379
static void sun4v_irq_end(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
380 381 382 383
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
L
Linus Torvalds 已提交
384

385 386
	if (likely(bucket)) {
		int err;
L
Linus Torvalds 已提交
387

388 389 390 391
		err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_intr_setstate(%x): "
			       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
392 393 394
	}
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
static void sun4v_virq_enable(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long cpuid, dev_handle, dev_ino;
		int err;

		cpuid = irq_choose_cpu(virt_irq);

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

		err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
			       "err(%d)\n",
			       dev_handle, dev_ino, cpuid, err);
		err = sun4v_vintr_set_state(dev_handle, dev_ino,
D
David S. Miller 已提交
415 416 417 418 419 420
					    HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
				"HV_INTR_STATE_IDLE): err(%d)\n",
			       dev_handle, dev_ino, err);
		err = sun4v_vintr_set_valid(dev_handle, dev_ino,
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
					    HV_INTR_ENABLED);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
			       "HV_INTR_ENABLED): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

static void sun4v_virq_disable(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long dev_handle, dev_ino;
		int err;

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

D
David S. Miller 已提交
441
		err = sun4v_vintr_set_valid(dev_handle, dev_ino,
442 443 444 445 446 447 448 449 450 451 452 453
					    HV_INTR_DISABLED);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
			       "HV_INTR_DISABLED): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

static void sun4v_virq_end(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
454 455 456 457
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474

	if (likely(bucket)) {
		unsigned long dev_handle, dev_ino;
		int err;

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

		err = sun4v_vintr_set_state(dev_handle, dev_ino,
					    HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
				"HV_INTR_STATE_IDLE): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

475
static void run_pre_handler(unsigned int virt_irq)
L
Linus Torvalds 已提交
476
{
477
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
478
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
L
Linus Torvalds 已提交
479

480 481 482 483
	if (likely(data->pre_handler)) {
		data->pre_handler(__irq_ino(__irq(bucket)),
				  data->pre_handler_arg1,
				  data->pre_handler_arg2);
L
Linus Torvalds 已提交
484
	}
485 486
}

487
static struct irq_chip sun4u_irq = {
488 489 490 491 492
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.end		= sun4u_irq_end,
};
493

494
static struct irq_chip sun4u_irq_ack = {
495 496 497 498 499 500
	.typename	= "sun4u+ack",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4u_irq_end,
};
501

502
static struct irq_chip sun4v_irq = {
503 504 505 506 507
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.end		= sun4v_irq_end,
};
L
Linus Torvalds 已提交
508

509
static struct irq_chip sun4v_irq_ack = {
510 511 512 513 514 515
	.typename	= "sun4v+ack",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_irq_end,
};
L
Linus Torvalds 已提交
516

517 518 519 520 521 522 523 524 525 526 527 528
#ifdef CONFIG_PCI_MSI
static struct irq_chip sun4v_msi = {
	.typename	= "sun4v+msi",
	.mask		= mask_msi_irq,
	.unmask		= unmask_msi_irq,
	.enable		= sun4v_msi_enable,
	.disable	= sun4v_msi_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_irq_end,
};
#endif

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
static struct irq_chip sun4v_virq = {
	.typename	= "vsun4v",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
	.end		= sun4v_virq_end,
};

static struct irq_chip sun4v_virq_ack = {
	.typename	= "vsun4v+ack",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_virq_end,
};

544 545 546 547
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
548 549
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
	struct irq_chip *chip;
550

551 552 553
	data->pre_handler = func;
	data->pre_handler_arg1 = arg1;
	data->pre_handler_arg2 = arg2;
L
Linus Torvalds 已提交
554

555 556
	chip = get_irq_chip(virt_irq);
	if (chip == &sun4u_irq_ack ||
557 558
	    chip == &sun4v_irq_ack ||
	    chip == &sun4v_virq_ack
559 560 561 562
#ifdef CONFIG_PCI_MSI
	    || chip == &sun4v_msi
#endif
	    )
563 564
		return;

565
	chip = (chip == &sun4u_irq ?
566 567 568
		&sun4u_irq_ack :
		(chip == &sun4v_irq ?
		 &sun4v_irq_ack : &sun4v_virq_ack));
569
	set_irq_chip(virt_irq, chip);
570
}
L
Linus Torvalds 已提交
571

572 573 574 575 576
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
	int ino;
L
Linus Torvalds 已提交
577

578
	BUG_ON(tlb_type == hypervisor);
579

580
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
581 582 583
	bucket = &ivector_table[ino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
584
		set_irq_chip(bucket->virt_irq, &sun4u_irq);
585
	}
L
Linus Torvalds 已提交
586

587 588
	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
589
		goto out;
590

591 592 593 594
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
595
	}
596
	set_irq_chip_data(bucket->virt_irq, data);
L
Linus Torvalds 已提交
597

598 599
	data->imap  = imap;
	data->iclr  = iclr;
L
Linus Torvalds 已提交
600

601 602 603
out:
	return bucket->virt_irq;
}
L
Linus Torvalds 已提交
604

605 606
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
L
Linus Torvalds 已提交
607
{
608
	struct ino_bucket *bucket;
609
	struct irq_handler_data *data;
610

611
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
612

613 614 615
	bucket = &ivector_table[sysino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
616
		set_irq_chip(bucket->virt_irq, chip);
L
Linus Torvalds 已提交
617 618
	}

619 620
	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
L
Linus Torvalds 已提交
621 622
		goto out;

623 624 625 626 627
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
628
	set_irq_chip_data(bucket->virt_irq, data);
L
Linus Torvalds 已提交
629

630 631 632 633 634 635
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
L
Linus Torvalds 已提交
636

637 638 639
out:
	return bucket->virt_irq;
}
L
Linus Torvalds 已提交
640

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino, hv_err;

	BUG_ON(devhandle & ~IMAP_IGN);
	BUG_ON(devino & ~IMAP_INO);

	sysino = devhandle | devino;

	hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

	return sun4v_build_common(sysino, &sun4v_virq);
}

667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
#ifdef CONFIG_PCI_MSI
unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
			     unsigned int msi_start, unsigned int msi_end)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
	unsigned long sysino;
	unsigned int devino;

	BUG_ON(tlb_type != hypervisor);

	/* Find a free devino in the given range.  */
	for (devino = msi_start; devino < msi_end; devino++) {
		sysino = sun4v_devino_to_sysino(devhandle, devino);
		bucket = &ivector_table[sysino];
		if (!bucket->virt_irq)
			break;
	}
	if (devino >= msi_end)
		return 0;

	sysino = sun4v_devino_to_sysino(devhandle, devino);
	bucket = &ivector_table[sysino];
	bucket->virt_irq = virt_irq_alloc(__irq(bucket));
	*virt_irq_p = bucket->virt_irq;
	set_irq_chip(bucket->virt_irq, &sun4v_msi);

	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
		return devino;

	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
	set_irq_chip_data(bucket->virt_irq, data);

	data->imap = ~0UL;
	data->iclr = ~0UL;

	return devino;
}

void sun4v_destroy_msi(unsigned int virt_irq)
{
	virt_irq_free(virt_irq);
}
#endif

717 718 719 720
void ack_bad_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = 0xdeadbeef;
721

722 723
	if (bucket)
		ino = bucket - &ivector_table[0];
724

725 726
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
L
Linus Torvalds 已提交
727 728 729 730
}

void handler_irq(int irq, struct pt_regs *regs)
{
731
	struct ino_bucket *bucket;
A
Al Viro 已提交
732
	struct pt_regs *old_regs;
L
Linus Torvalds 已提交
733 734 735

	clear_softint(1 << irq);

A
Al Viro 已提交
736
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
737 738 739
	irq_enter();

	/* Sliiiick... */
740 741 742
	bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
	while (bucket) {
		struct ino_bucket *next = __bucket(bucket->irq_chain);
L
Linus Torvalds 已提交
743

744
		bucket->irq_chain = 0;
A
Al Viro 已提交
745
		__do_IRQ(bucket->virt_irq);
746

747
		bucket = next;
L
Linus Torvalds 已提交
748
	}
749

L
Linus Torvalds 已提交
750
	irq_exit();
A
Al Viro 已提交
751
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
752 753
}

754 755 756 757 758 759
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
760

761
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
762 763 764 765
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
766
	struct device_node *dp;
767
	const unsigned int *addr;
L
Linus Torvalds 已提交
768 769

	/* PROM timer node hangs out in the top level of device siblings... */
770 771 772 773 774 775 776
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
777 778 779 780

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
781
	if (!dp) {
L
Linus Torvalds 已提交
782 783 784 785 786
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
787 788
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

void init_irqwork_curcpu(void)
{
	int cpu = hard_smp_processor_id();

827
	trap_block[cpu].irq_worklist = 0;
L
Linus Torvalds 已提交
828 829
}

830 831 832 833 834 835 836 837 838 839 840 841
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
842
{
843
	unsigned long num_entries = (qmask + 1) / 64;
844 845 846 847 848 849
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
850 851 852 853
		prom_halt();
	}
}

854
static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
855
{
856 857
	struct trap_per_cpu *tb = &trap_block[this_cpu];

858 859 860 861 862 863 864 865
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
866 867
}

868
static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
869
{
870 871 872
	unsigned long size = PAGE_ALIGN(qmask + 1);
	unsigned long order = get_order(size);
	void *p = NULL;
873

874 875 876 877 878 879 880
	if (use_bootmem) {
		p = __alloc_bootmem_low(size, size, 0);
	} else {
		struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
		if (page)
			p = page_address(page);
	}
881

882
	if (!p) {
883 884 885 886
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

887
	*pa_ptr = __pa(p);
888 889
}

890
static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
891
{
892 893 894
	unsigned long size = PAGE_ALIGN(qmask + 1);
	unsigned long order = get_order(size);
	void *p = NULL;
895

896 897 898 899 900 901 902
	if (use_bootmem) {
		p = __alloc_bootmem_low(size, size, 0);
	} else {
		struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
		if (page)
			p = page_address(page);
	}
903

904
	if (!p) {
905 906 907 908
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

909
	*pa_ptr = __pa(p);
910 911
}

912
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
913 914
{
#ifdef CONFIG_SMP
915
	void *page;
916 917 918

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

919 920 921 922 923
	if (use_bootmem)
		page = alloc_bootmem_low_pages(PAGE_SIZE);
	else
		page = (void *) get_zeroed_page(GFP_ATOMIC);

924 925 926 927 928 929 930 931 932 933
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

934
/* Allocate and register the mondo and error queues for this cpu.  */
935
void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
936 937 938
{
	struct trap_per_cpu *tb = &trap_block[cpu];

939
	if (alloc) {
940 941 942 943 944 945
		alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
		alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
		alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
		alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
946

947 948
		init_cpu_send_mondo_info(tb, use_bootmem);
	}
949

950 951 952 953 954 955 956 957
	if (load) {
		if (cpu != hard_smp_processor_id()) {
			prom_printf("SUN4V: init mondo on cpu %d not %d\n",
				    cpu, hard_smp_processor_id());
			prom_halt();
		}
		sun4v_register_mondo_queues(cpu);
	}
958 959
}

960 961 962 963
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
964 965 966 967 968 969 970
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
	map_prom_timers();
	kill_prom_timer();
	memset(&ivector_table[0], 0, sizeof(ivector_table));

971
	if (tlb_type == hypervisor)
972
		sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
973

L
Linus Torvalds 已提交
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

992
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
993
}