irq.c 23.6 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
22
#include <linux/bootmem.h>
23
#include <linux/irq.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
30
#include <asm/io.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
35
#include <asm/prom.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
42
#include <asm/auxio.h>
43
#include <asm/head.h>
44
#include <asm/hypervisor.h>
45
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54

/* UPA nodes send interrupt packet to UltraSparc with first data reg
 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
 * delivered.  We must translate this into a non-vector IRQ so we can
 * set the softint on this cpu.
 *
 * To make processing these packets efficient and race free we use
 * an array of irq buckets below.  The interrupt vector handler in
 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55 56 57
 *
 * If you make changes to ino_bucket, please update hand coded assembler
 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
L
Linus Torvalds 已提交
58
 */
59
struct ino_bucket {
60
/*0x00*/unsigned long __irq_chain_pa;
L
Linus Torvalds 已提交
61

62
	/* Virtual interrupt number assigned to this INO.  */
63
/*0x08*/unsigned int __virt_irq;
64
/*0x0c*/unsigned int __pad;
65 66 67
};

#define NUM_IVECS	(IMAP_INR + 1)
68
struct ino_bucket *ivector_table;
69
unsigned long ivector_table_pa;
L
Linus Torvalds 已提交
70

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
/* On several sun4u processors, it is illegal to mix bypass and
 * non-bypass accesses.  Therefore we access all INO buckets
 * using bypass accesses only.
 */
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
	unsigned long ret;

	__asm__ __volatile__("ldxa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
	__asm__ __volatile__("stxa	%%g0, [%0] %1"
			     : /* no outputs */
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));
}

static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
{
	unsigned int ret;

	__asm__ __volatile__("lduwa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_set_virt_irq(unsigned long bucket_pa,
				unsigned int virt_irq)
{
	__asm__ __volatile__("stwa	%0, [%1] %2"
			     : /* no outputs */
			     : "r" (virt_irq),
			       "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));
}

125
#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
L
Linus Torvalds 已提交
126

127 128 129
static struct {
	unsigned int dev_handle;
	unsigned int dev_ino;
130
	unsigned int in_use;
131
} virt_irq_table[NR_IRQS];
132
static DEFINE_SPINLOCK(virt_irq_alloc_lock);
133

134
unsigned char virt_irq_alloc(unsigned int dev_handle,
135
			     unsigned int dev_ino)
136
{
137
	unsigned long flags;
138 139 140 141
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

142 143
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

144
	for (ent = 1; ent < NR_IRQS; ent++) {
145
		if (!virt_irq_table[ent].in_use)
146 147
			break;
	}
148 149
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
150 151
		ent = 0;
	} else {
152 153 154
		virt_irq_table[ent].dev_handle = dev_handle;
		virt_irq_table[ent].dev_ino = dev_ino;
		virt_irq_table[ent].in_use = 1;
155 156
	}

157
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
158 159 160 161

	return ent;
}

162
#ifdef CONFIG_PCI_MSI
163
void virt_irq_free(unsigned int virt_irq)
164
{
165
	unsigned long flags;
166

167 168 169
	if (virt_irq >= NR_IRQS)
		return;

170 171
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

172
	virt_irq_table[virt_irq].in_use = 0;
173

174
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
175
}
176
#endif
177

L
Linus Torvalds 已提交
178
/*
179
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
180 181 182 183
 */

int show_interrupts(struct seq_file *p, void *v)
{
184 185
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
186 187
	unsigned long flags;

188 189 190 191 192 193 194 195 196 197 198 199 200
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
201 202 203
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
204 205
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
L
Linus Torvalds 已提交
206
#endif
207
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
208 209 210
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
211
			seq_printf(p, ", %s", action->name);
212

L
Linus Torvalds 已提交
213
		seq_putc(p, '\n');
214 215
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
L
Linus Torvalds 已提交
216 217 218 219
	}
	return 0;
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

255 256 257
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
258

259
	void		(*pre_handler)(unsigned int, void *, void *);
260 261
	void		*arg1;
	void		*arg2;
262
};
L
Linus Torvalds 已提交
263

264 265
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
266
{
267
	cpumask_t mask = irq_desc[virt_irq].affinity;
268
	int cpuid;
269

270 271 272 273
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
L
Linus Torvalds 已提交
274

275 276 277
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
278

279 280 281 282 283 284 285 286 287
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
L
Linus Torvalds 已提交
288

289 290 291
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
292

293
		cpus_and(tmp, cpu_online_map, mask);
294

295 296
		if (cpus_empty(tmp))
			goto do_round_robin;
297

298
		cpuid = first_cpu(tmp);
L
Linus Torvalds 已提交
299
	}
300

301 302 303 304 305 306
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
L
Linus Torvalds 已提交
307
}
308
#endif
L
Linus Torvalds 已提交
309

310
static void sun4u_irq_enable(unsigned int virt_irq)
311
{
312
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
313

314
	if (likely(data)) {
315
		unsigned long cpuid, imap, val;
316
		unsigned int tid;
317

318 319
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
320

321
		tid = sun4u_compute_tid(imap, cpuid);
322

323 324 325 326 327
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
328 329 330
	}
}

331 332 333 334 335
static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
	sun4u_irq_enable(virt_irq);
}

336
static void sun4u_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
337
{
338
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
L
Linus Torvalds 已提交
339

340 341
	if (likely(data)) {
		unsigned long imap = data->imap;
342
		unsigned long tmp = upa_readq(imap);
L
Linus Torvalds 已提交
343

344
		tmp &= ~IMAP_VALID;
345
		upa_writeq(tmp, imap);
346 347 348
	}
}

349
static void sun4u_irq_eoi(unsigned int virt_irq)
350
{
351
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
352 353 354 355
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
356

357
	if (likely(data))
358
		upa_writeq(ICLR_IDLE, data->iclr);
359 360
}

361
static void sun4v_irq_enable(unsigned int virt_irq)
362
{
363
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
		       ino, err);
379 380
}

381 382
static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
383
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
384 385 386 387 388 389 390
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
391 392
}

393
static void sun4v_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
394
{
395
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
396
	int err;
L
Linus Torvalds 已提交
397

398 399 400 401
	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): "
		       "err(%d)\n", ino, err);
402
}
L
Linus Torvalds 已提交
403

404
static void sun4v_irq_eoi(unsigned int virt_irq)
405
{
406
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
407
	struct irq_desc *desc = irq_desc + virt_irq;
408
	int err;
409 410 411

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
L
Linus Torvalds 已提交
412

413 414 415 416
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
417 418
}

419 420
static void sun4v_virq_enable(unsigned int virt_irq)
{
421 422 423 424 425
	unsigned long cpuid, dev_handle, dev_ino;
	int err;

	cpuid = irq_choose_cpu(virt_irq);

426 427
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445

	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_ENABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
446 447
}

448 449
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
450 451
	unsigned long cpuid, dev_handle, dev_ino;
	int err;
452

453
	cpuid = irq_choose_cpu(virt_irq);
454

455 456
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
457

458 459 460 461 462
	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
463 464
}

465 466
static void sun4v_virq_disable(unsigned int virt_irq)
{
467 468 469
	unsigned long dev_handle, dev_ino;
	int err;

470 471
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
472 473 474 475 476 477 478

	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_DISABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
479 480
}

481
static void sun4v_virq_eoi(unsigned int virt_irq)
482
{
483
	struct irq_desc *desc = irq_desc + virt_irq;
484 485
	unsigned long dev_handle, dev_ino;
	int err;
486 487 488

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
489

490 491
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
492

493 494 495 496 497 498
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
499 500
}

501
static struct irq_chip sun4u_irq = {
502 503 504
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
505
	.eoi		= sun4u_irq_eoi,
506
	.set_affinity	= sun4u_set_affinity,
507
};
508

509
static struct irq_chip sun4v_irq = {
510 511 512
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
513
	.eoi		= sun4v_irq_eoi,
514
	.set_affinity	= sun4v_set_affinity,
515
};
L
Linus Torvalds 已提交
516

517 518 519 520
static struct irq_chip sun4v_virq = {
	.typename	= "vsun4v",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
521
	.eoi		= sun4v_virq_eoi,
522
	.set_affinity	= sun4v_virt_set_affinity,
523 524
};

525 526 527 528 529 530 531 532 533 534 535
static void fastcall pre_flow_handler(unsigned int virt_irq,
				      struct irq_desc *desc)
{
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;

	data->pre_handler(ino, data->arg1, data->arg2);

	handle_fasteoi_irq(virt_irq, desc);
}

536 537 538 539
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
540
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
541
	struct irq_desc *desc = irq_desc + virt_irq;
542

543
	data->pre_handler = func;
544 545
	data->arg1 = arg1;
	data->arg2 = arg2;
546

547
	desc->handle_irq = pre_flow_handler;
548
}
L
Linus Torvalds 已提交
549

550 551 552 553
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
554
	unsigned int virt_irq;
555
	int ino;
L
Linus Torvalds 已提交
556

557
	BUG_ON(tlb_type == hypervisor);
558

559
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
560
	bucket = &ivector_table[ino];
561 562
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
563
		virt_irq = virt_irq_alloc(0, ino);
564
		bucket_set_virt_irq(__pa(bucket), virt_irq);
565 566 567 568
		set_irq_chip_and_handler_name(virt_irq,
					      &sun4u_irq,
					      handle_fasteoi_irq,
					      "IVEC");
569
	}
L
Linus Torvalds 已提交
570

571
	data = get_irq_chip_data(virt_irq);
572
	if (unlikely(data))
573
		goto out;
574

575 576 577 578
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
579
	}
580
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
581

582 583
	data->imap  = imap;
	data->iclr  = iclr;
L
Linus Torvalds 已提交
584

585
out:
586
	return virt_irq;
587
}
L
Linus Torvalds 已提交
588

589 590
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
L
Linus Torvalds 已提交
591
{
592
	struct ino_bucket *bucket;
593
	struct irq_handler_data *data;
594
	unsigned int virt_irq;
595

596
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
597

598
	bucket = &ivector_table[sysino];
599 600
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
601
		virt_irq = virt_irq_alloc(0, sysino);
602
		bucket_set_virt_irq(__pa(bucket), virt_irq);
603 604 605
		set_irq_chip_and_handler_name(virt_irq, chip,
					      handle_fasteoi_irq,
					      "IVEC");
L
Linus Torvalds 已提交
606 607
	}

608
	data = get_irq_chip_data(virt_irq);
609
	if (unlikely(data))
L
Linus Torvalds 已提交
610 611
		goto out;

612 613 614 615 616
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
617
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
618

619 620 621 622 623 624
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
L
Linus Torvalds 已提交
625

626
out:
627
	return virt_irq;
628
}
L
Linus Torvalds 已提交
629

630 631 632 633 634 635 636 637 638
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
639 640 641
	struct irq_handler_data *data;
	struct ino_bucket *bucket;
	unsigned long hv_err, cookie;
642
	unsigned int virt_irq;
643 644 645 646

	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
	if (unlikely(!bucket))
		return 0;
647 648 649
	__flush_dcache_range((unsigned long) bucket,
			     ((unsigned long) bucket +
			      sizeof(struct ino_bucket)));
650

651
	virt_irq = virt_irq_alloc(devhandle, devino);
652
	bucket_set_virt_irq(__pa(bucket), virt_irq);
653 654 655 656

	set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
				      handle_fasteoi_irq,
				      "IVEC");
657

658 659 660
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data))
		return 0;
661

662
	set_irq_chip_data(virt_irq, data);
663

664 665 666 667 668 669 670 671 672
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;

	cookie = ~__pa(bucket);
	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
673 674 675 676 677 678
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

679
	return virt_irq;
680 681
}

682 683
void ack_bad_irq(unsigned int virt_irq)
{
684
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
685

686 687
	if (!ino)
		ino = 0xdeadbeef;
688

689 690
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
L
Linus Torvalds 已提交
691 692 693 694
}

void handler_irq(int irq, struct pt_regs *regs)
{
695
	unsigned long pstate, bucket_pa;
A
Al Viro 已提交
696
	struct pt_regs *old_regs;
L
Linus Torvalds 已提交
697 698 699

	clear_softint(1 << irq);

A
Al Viro 已提交
700
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
701 702
	irq_enter();

703 704 705 706 707 708
	/* Grab an atomic snapshot of the pending IVECs.  */
	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
			     "wrpr	%0, %3, %%pstate\n\t"
			     "ldx	[%2], %1\n\t"
			     "stx	%%g0, [%2]\n\t"
			     "wrpr	%0, 0x0, %%pstate\n\t"
709 710
			     : "=&r" (pstate), "=&r" (bucket_pa)
			     : "r" (irq_work_pa(smp_processor_id())),
711 712 713
			       "i" (PSTATE_IE)
			     : "memory");

714
	while (bucket_pa) {
715
		struct irq_desc *desc;
716 717
		unsigned long next_pa;
		unsigned int virt_irq;
L
Linus Torvalds 已提交
718

719 720 721
		next_pa = bucket_get_chain_pa(bucket_pa);
		virt_irq = bucket_get_virt_irq(bucket_pa);
		bucket_clear_chain_pa(bucket_pa);
722

723 724 725
		desc = irq_desc + virt_irq;

		desc->handle_irq(virt_irq, desc);
726 727

		bucket_pa = next_pa;
L
Linus Torvalds 已提交
728
	}
729

L
Linus Torvalds 已提交
730
	irq_exit();
A
Al Viro 已提交
731
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
732 733
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
	unsigned int irq;

	for (irq = 0; irq < NR_IRQS; irq++) {
		unsigned long flags;

		spin_lock_irqsave(&irq_desc[irq].lock, flags);
		if (irq_desc[irq].action &&
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
			if (irq_desc[irq].chip->set_affinity)
				irq_desc[irq].chip->set_affinity(irq,
					irq_desc[irq].affinity);
		}
		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
	}
}
#endif

754 755 756 757 758 759
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
760

761
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
762 763 764 765
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
766
	struct device_node *dp;
767
	const unsigned int *addr;
L
Linus Torvalds 已提交
768 769

	/* PROM timer node hangs out in the top level of device siblings... */
770 771 772 773 774 775 776
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
777 778 779 780

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
781
	if (!dp) {
L
Linus Torvalds 已提交
782 783 784 785 786
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
787 788
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

void init_irqwork_curcpu(void)
{
	int cpu = hard_smp_processor_id();

827
	trap_block[cpu].irq_worklist_pa = 0UL;
L
Linus Torvalds 已提交
828 829
}

830 831 832 833 834 835 836 837 838 839 840 841
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
842
{
843
	unsigned long num_entries = (qmask + 1) / 64;
844 845 846 847 848 849
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
850 851 852 853
		prom_halt();
	}
}

854
void __cpuinit sun4v_register_mondo_queues(int this_cpu)
855
{
856 857
	struct trap_per_cpu *tb = &trap_block[this_cpu];

858 859 860 861 862 863 864 865
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
866 867
}

868
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
869
{
870
	unsigned long size = PAGE_ALIGN(qmask + 1);
871
	void *p = __alloc_bootmem(size, size, 0);
872
	if (!p) {
873 874 875 876
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

877
	*pa_ptr = __pa(p);
878 879
}

880
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
881
{
882
	unsigned long size = PAGE_ALIGN(qmask + 1);
883
	void *p = __alloc_bootmem(size, size, 0);
884

885
	if (!p) {
886 887 888 889
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

890
	*pa_ptr = __pa(p);
891 892
}

893
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
894 895
{
#ifdef CONFIG_SMP
896
	void *page;
897 898 899

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

900
	page = alloc_bootmem_pages(PAGE_SIZE);
901 902 903 904 905 906 907 908 909 910
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

911 912
/* Allocate mondo and error queues for all possible cpus.  */
static void __init sun4v_init_mondo_queues(void)
913
{
914
	int cpu;
915

916 917
	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
918

919 920 921 922 923 924 925
		alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
		alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
		alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
		alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
			       tb->nonresum_qmask);
926

927
		init_cpu_send_mondo_info(tb);
928
	}
929 930 931

	/* Load up the boot cpu's entries.  */
	sun4v_register_mondo_queues(hard_smp_processor_id());
932 933
}

934 935 936 937
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
938 939 940
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
941 942
	unsigned long size;

L
Linus Torvalds 已提交
943 944 945
	map_prom_timers();
	kill_prom_timer();

946
	size = sizeof(struct ino_bucket) * NUM_IVECS;
947
	ivector_table = alloc_bootmem(size);
948 949 950 951
	if (!ivector_table) {
		prom_printf("Fatal error, cannot allocate ivector_table\n");
		prom_halt();
	}
952 953
	__flush_dcache_range((unsigned long) ivector_table,
			     ((unsigned long) ivector_table) + size);
954 955

	ivector_table_pa = __pa(ivector_table);
956

957
	if (tlb_type == hypervisor)
958
		sun4v_init_mondo_queues();
959

L
Linus Torvalds 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

978
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
979
}