irq.c 24.5 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7 8 9
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
10
#include <linux/linkage.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
23
#include <linux/bootmem.h>
24
#include <linux/irq.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
31
#include <asm/io.h>
L
Linus Torvalds 已提交
32 33 34
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
35
#include <asm/prom.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
42
#include <asm/auxio.h>
43
#include <asm/head.h>
44
#include <asm/hypervisor.h>
45
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
46

47
#include "entry.h"
48 49

#define NUM_IVECS	(IMAP_INR + 1)
50

51
struct ino_bucket *ivector_table;
52
unsigned long ivector_table_pa;
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/* On several sun4u processors, it is illegal to mix bypass and
 * non-bypass accesses.  Therefore we access all INO buckets
 * using bypass accesses only.
 */
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
	unsigned long ret;

	__asm__ __volatile__("ldxa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
	__asm__ __volatile__("stxa	%%g0, [%0] %1"
			     : /* no outputs */
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));
}

static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
{
	unsigned int ret;

	__asm__ __volatile__("lduwa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_set_virt_irq(unsigned long bucket_pa,
				unsigned int virt_irq)
{
	__asm__ __volatile__("stwa	%0, [%1] %2"
			     : /* no outputs */
			     : "r" (virt_irq),
			       "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));
}

108
#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
L
Linus Torvalds 已提交
109

110 111 112
static struct {
	unsigned int dev_handle;
	unsigned int dev_ino;
113
	unsigned int in_use;
114
} virt_irq_table[NR_IRQS];
115
static DEFINE_SPINLOCK(virt_irq_alloc_lock);
116

117
unsigned char virt_irq_alloc(unsigned int dev_handle,
118
			     unsigned int dev_ino)
119
{
120
	unsigned long flags;
121 122 123 124
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

125 126
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

127
	for (ent = 1; ent < NR_IRQS; ent++) {
128
		if (!virt_irq_table[ent].in_use)
129 130
			break;
	}
131 132
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
133 134
		ent = 0;
	} else {
135 136 137
		virt_irq_table[ent].dev_handle = dev_handle;
		virt_irq_table[ent].dev_ino = dev_ino;
		virt_irq_table[ent].in_use = 1;
138 139
	}

140
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
141 142 143 144

	return ent;
}

145
#ifdef CONFIG_PCI_MSI
146
void virt_irq_free(unsigned int virt_irq)
147
{
148
	unsigned long flags;
149

150 151 152
	if (virt_irq >= NR_IRQS)
		return;

153 154
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

155
	virt_irq_table[virt_irq].in_use = 0;
156

157
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
158
}
159
#endif
160

L
Linus Torvalds 已提交
161
/*
162
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
163 164 165 166
 */

int show_interrupts(struct seq_file *p, void *v)
{
167 168
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
169 170
	unsigned long flags;

171 172 173 174 175 176 177 178 179 180 181 182 183
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
184 185 186
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
187 188
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
L
Linus Torvalds 已提交
189
#endif
190
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
191 192 193
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
194
			seq_printf(p, ", %s", action->name);
195

L
Linus Torvalds 已提交
196
		seq_putc(p, '\n');
197 198
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
L
Linus Torvalds 已提交
199 200 201 202
	}
	return 0;
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

238 239 240
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
241

242
	void		(*pre_handler)(unsigned int, void *, void *);
243 244
	void		*arg1;
	void		*arg2;
245
};
L
Linus Torvalds 已提交
246

247 248
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
249
{
250
	cpumask_t mask = irq_desc[virt_irq].affinity;
251
	int cpuid;
252

253 254 255 256
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
L
Linus Torvalds 已提交
257

258 259 260
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
261

262 263 264 265 266 267 268 269 270
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
L
Linus Torvalds 已提交
271

272 273 274
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
275

276
		cpus_and(tmp, cpu_online_map, mask);
277

278 279
		if (cpus_empty(tmp))
			goto do_round_robin;
280

281
		cpuid = first_cpu(tmp);
L
Linus Torvalds 已提交
282
	}
283

284 285 286 287 288 289
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
L
Linus Torvalds 已提交
290
}
291
#endif
L
Linus Torvalds 已提交
292

293
static void sun4u_irq_enable(unsigned int virt_irq)
294
{
295
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
296

297
	if (likely(data)) {
298
		unsigned long cpuid, imap, val;
299
		unsigned int tid;
300

301 302
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
303

304
		tid = sun4u_compute_tid(imap, cpuid);
305

306 307 308 309 310
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
311
		upa_writeq(ICLR_IDLE, data->iclr);
312 313 314
	}
}

315 316
static void sun4u_set_affinity(unsigned int virt_irq,
			       const struct cpumask *mask)
317 318 319 320
{
	sun4u_irq_enable(virt_irq);
}

321
static void sun4u_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
322
{
323
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
L
Linus Torvalds 已提交
324

325 326
	if (likely(data)) {
		unsigned long imap = data->imap;
327
		unsigned long tmp = upa_readq(imap);
L
Linus Torvalds 已提交
328

329
		tmp &= ~IMAP_VALID;
330
		upa_writeq(tmp, imap);
331 332 333
	}
}

334
static void sun4u_irq_eoi(unsigned int virt_irq)
335
{
336
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
337 338 339 340
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
341

342
	if (likely(data))
343
		upa_writeq(ICLR_IDLE, data->iclr);
344 345
}

346
static void sun4v_irq_enable(unsigned int virt_irq)
347
{
348
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
		       ino, err);
364 365
}

366 367
static void sun4v_set_affinity(unsigned int virt_irq,
			       const struct cpumask *mask)
368
{
369
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
370 371 372 373 374 375 376
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
377 378
}

379
static void sun4v_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
380
{
381
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
382
	int err;
L
Linus Torvalds 已提交
383

384 385 386 387
	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): "
		       "err(%d)\n", ino, err);
388
}
L
Linus Torvalds 已提交
389

390
static void sun4v_irq_eoi(unsigned int virt_irq)
391
{
392
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
393
	struct irq_desc *desc = irq_desc + virt_irq;
394
	int err;
395 396 397

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
L
Linus Torvalds 已提交
398

399 400 401 402
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
403 404
}

405 406
static void sun4v_virq_enable(unsigned int virt_irq)
{
407 408 409 410 411
	unsigned long cpuid, dev_handle, dev_ino;
	int err;

	cpuid = irq_choose_cpu(virt_irq);

412 413
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431

	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_ENABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
432 433
}

434 435
static void sun4v_virt_set_affinity(unsigned int virt_irq,
				    const struct cpumask *mask)
436
{
437 438
	unsigned long cpuid, dev_handle, dev_ino;
	int err;
439

440
	cpuid = irq_choose_cpu(virt_irq);
441

442 443
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
444

445 446 447 448 449
	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
450 451
}

452 453
static void sun4v_virq_disable(unsigned int virt_irq)
{
454 455 456
	unsigned long dev_handle, dev_ino;
	int err;

457 458
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
459 460 461 462 463 464 465

	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_DISABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
466 467
}

468
static void sun4v_virq_eoi(unsigned int virt_irq)
469
{
470
	struct irq_desc *desc = irq_desc + virt_irq;
471 472
	unsigned long dev_handle, dev_ino;
	int err;
473 474 475

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
476

477 478
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
479

480 481 482 483 484 485
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
486 487
}

488
static struct irq_chip sun4u_irq = {
489 490 491
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
492
	.eoi		= sun4u_irq_eoi,
493
	.set_affinity	= sun4u_set_affinity,
494
};
495

496
static struct irq_chip sun4v_irq = {
497 498 499
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
500
	.eoi		= sun4v_irq_eoi,
501
	.set_affinity	= sun4v_set_affinity,
502
};
L
Linus Torvalds 已提交
503

504 505 506 507
static struct irq_chip sun4v_virq = {
	.typename	= "vsun4v",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
508
	.eoi		= sun4v_virq_eoi,
509
	.set_affinity	= sun4v_virt_set_affinity,
510 511
};

512
static void pre_flow_handler(unsigned int virt_irq,
513 514 515 516 517 518 519 520 521 522
				      struct irq_desc *desc)
{
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;

	data->pre_handler(ino, data->arg1, data->arg2);

	handle_fasteoi_irq(virt_irq, desc);
}

523 524 525 526
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
527
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
528
	struct irq_desc *desc = irq_desc + virt_irq;
529

530
	data->pre_handler = func;
531 532
	data->arg1 = arg1;
	data->arg2 = arg2;
533

534
	desc->handle_irq = pre_flow_handler;
535
}
L
Linus Torvalds 已提交
536

537 538 539 540
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
541
	unsigned int virt_irq;
542
	int ino;
L
Linus Torvalds 已提交
543

544
	BUG_ON(tlb_type == hypervisor);
545

546
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
547
	bucket = &ivector_table[ino];
548 549
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
550
		virt_irq = virt_irq_alloc(0, ino);
551
		bucket_set_virt_irq(__pa(bucket), virt_irq);
552 553 554 555
		set_irq_chip_and_handler_name(virt_irq,
					      &sun4u_irq,
					      handle_fasteoi_irq,
					      "IVEC");
556
	}
L
Linus Torvalds 已提交
557

558
	data = get_irq_chip_data(virt_irq);
559
	if (unlikely(data))
560
		goto out;
561

562 563 564 565
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
566
	}
567
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
568

569 570
	data->imap  = imap;
	data->iclr  = iclr;
L
Linus Torvalds 已提交
571

572
out:
573
	return virt_irq;
574
}
L
Linus Torvalds 已提交
575

576 577
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
L
Linus Torvalds 已提交
578
{
579
	struct ino_bucket *bucket;
580
	struct irq_handler_data *data;
581
	unsigned int virt_irq;
582

583
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
584

585
	bucket = &ivector_table[sysino];
586 587
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
588
		virt_irq = virt_irq_alloc(0, sysino);
589
		bucket_set_virt_irq(__pa(bucket), virt_irq);
590 591 592
		set_irq_chip_and_handler_name(virt_irq, chip,
					      handle_fasteoi_irq,
					      "IVEC");
L
Linus Torvalds 已提交
593 594
	}

595
	data = get_irq_chip_data(virt_irq);
596
	if (unlikely(data))
L
Linus Torvalds 已提交
597 598
		goto out;

599 600 601 602 603
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
604
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
605

606 607 608 609 610 611
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
L
Linus Torvalds 已提交
612

613
out:
614
	return virt_irq;
615
}
L
Linus Torvalds 已提交
616

617 618 619 620 621 622 623 624 625
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
626 627
	struct irq_handler_data *data;
	unsigned long hv_err, cookie;
628 629
	struct ino_bucket *bucket;
	struct irq_desc *desc;
630
	unsigned int virt_irq;
631 632 633 634

	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
	if (unlikely(!bucket))
		return 0;
635 636 637
	__flush_dcache_range((unsigned long) bucket,
			     ((unsigned long) bucket +
			      sizeof(struct ino_bucket)));
638

639
	virt_irq = virt_irq_alloc(devhandle, devino);
640
	bucket_set_virt_irq(__pa(bucket), virt_irq);
641 642 643 644

	set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
				      handle_fasteoi_irq,
				      "IVEC");
645

646 647 648
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data))
		return 0;
649

650 651 652 653 654 655 656
	/* In order to make the LDC channel startup sequence easier,
	 * especially wrt. locking, we do not let request_irq() enable
	 * the interrupt.
	 */
	desc = irq_desc + virt_irq;
	desc->status |= IRQ_NOAUTOEN;

657
	set_irq_chip_data(virt_irq, data);
658

659 660 661 662 663 664 665 666 667
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;

	cookie = ~__pa(bucket);
	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
668 669 670 671 672 673
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

674
	return virt_irq;
675 676
}

677 678
void ack_bad_irq(unsigned int virt_irq)
{
679
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
680

681 682
	if (!ino)
		ino = 0xdeadbeef;
683

684 685
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
L
Linus Torvalds 已提交
686 687
}

D
David S. Miller 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];

static __attribute__((always_inline)) void *set_hardirq_stack(void)
{
	void *orig_sp, *sp = hardirq_stack[smp_processor_id()];

	__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
	if (orig_sp < sp ||
	    orig_sp > (sp + THREAD_SIZE)) {
		sp += THREAD_SIZE - 192 - STACK_BIAS;
		__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
	}

	return orig_sp;
}
static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
	__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}

L
Linus Torvalds 已提交
709 710
void handler_irq(int irq, struct pt_regs *regs)
{
711
	unsigned long pstate, bucket_pa;
A
Al Viro 已提交
712
	struct pt_regs *old_regs;
D
David S. Miller 已提交
713
	void *orig_sp;
L
Linus Torvalds 已提交
714 715 716

	clear_softint(1 << irq);

A
Al Viro 已提交
717
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
718 719
	irq_enter();

720 721 722 723 724 725
	/* Grab an atomic snapshot of the pending IVECs.  */
	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
			     "wrpr	%0, %3, %%pstate\n\t"
			     "ldx	[%2], %1\n\t"
			     "stx	%%g0, [%2]\n\t"
			     "wrpr	%0, 0x0, %%pstate\n\t"
726 727
			     : "=&r" (pstate), "=&r" (bucket_pa)
			     : "r" (irq_work_pa(smp_processor_id())),
728 729 730
			       "i" (PSTATE_IE)
			     : "memory");

D
David S. Miller 已提交
731 732
	orig_sp = set_hardirq_stack();

733
	while (bucket_pa) {
734
		struct irq_desc *desc;
735 736
		unsigned long next_pa;
		unsigned int virt_irq;
L
Linus Torvalds 已提交
737

738 739 740
		next_pa = bucket_get_chain_pa(bucket_pa);
		virt_irq = bucket_get_virt_irq(bucket_pa);
		bucket_clear_chain_pa(bucket_pa);
741

742 743 744
		desc = irq_desc + virt_irq;

		desc->handle_irq(virt_irq, desc);
745 746

		bucket_pa = next_pa;
L
Linus Torvalds 已提交
747
	}
748

D
David S. Miller 已提交
749 750
	restore_hardirq_stack(orig_sp);

L
Linus Torvalds 已提交
751
	irq_exit();
A
Al Viro 已提交
752
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
753 754
}

D
David S. Miller 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
void do_softirq(void)
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	if (local_softirq_pending()) {
		void *orig_sp, *sp = softirq_stack[smp_processor_id()];

		sp += THREAD_SIZE - 192 - STACK_BIAS;

		__asm__ __volatile__("mov %%sp, %0\n\t"
				     "mov %1, %%sp"
				     : "=&r" (orig_sp)
				     : "r" (sp));
		__do_softirq();
		__asm__ __volatile__("mov %0, %%sp"
				     : : "r" (orig_sp));
	}

	local_irq_restore(flags);
}

781 782 783 784 785 786 787 788 789 790 791 792 793
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
	unsigned int irq;

	for (irq = 0; irq < NR_IRQS; irq++) {
		unsigned long flags;

		spin_lock_irqsave(&irq_desc[irq].lock, flags);
		if (irq_desc[irq].action &&
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
			if (irq_desc[irq].chip->set_affinity)
				irq_desc[irq].chip->set_affinity(irq,
794
					&irq_desc[irq].affinity);
795 796 797
		}
		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
	}
798 799

	tick_ops->disable_irq();
800 801 802
}
#endif

803 804 805 806 807 808
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
809

810
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
811 812 813 814
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
815
	struct device_node *dp;
816
	const unsigned int *addr;
L
Linus Torvalds 已提交
817 818

	/* PROM timer node hangs out in the top level of device siblings... */
819 820 821 822 823 824 825
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
826 827 828 829

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
830
	if (!dp) {
L
Linus Torvalds 已提交
831 832 833 834 835
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
836 837
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

872
void notrace init_irqwork_curcpu(void)
L
Linus Torvalds 已提交
873 874 875
{
	int cpu = hard_smp_processor_id();

876
	trap_block[cpu].irq_worklist_pa = 0UL;
L
Linus Torvalds 已提交
877 878
}

879 880 881 882 883 884 885 886 887 888 889 890
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
891
{
892
	unsigned long num_entries = (qmask + 1) / 64;
893 894 895 896 897 898
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
899 900 901 902
		prom_halt();
	}
}

903
void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
904
{
905 906
	struct trap_per_cpu *tb = &trap_block[this_cpu];

907 908 909 910 911 912 913 914
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
915 916
}

917
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
918
{
919
	unsigned long size = PAGE_ALIGN(qmask + 1);
920
	void *p = __alloc_bootmem(size, size, 0);
921
	if (!p) {
922 923 924 925
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

926
	*pa_ptr = __pa(p);
927 928
}

929
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
930
{
931
	unsigned long size = PAGE_ALIGN(qmask + 1);
932
	void *p = __alloc_bootmem(size, size, 0);
933

934
	if (!p) {
935 936 937 938
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

939
	*pa_ptr = __pa(p);
940 941
}

942
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
943 944
{
#ifdef CONFIG_SMP
945
	void *page;
946 947 948

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

949
	page = alloc_bootmem_pages(PAGE_SIZE);
950 951 952 953 954 955 956 957 958 959
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

960 961
/* Allocate mondo and error queues for all possible cpus.  */
static void __init sun4v_init_mondo_queues(void)
962
{
963
	int cpu;
964

965 966
	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
967

968 969 970 971 972 973 974
		alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
		alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
		alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
		alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
			       tb->nonresum_qmask);
975 976 977 978 979 980 981 982 983
	}
}

static void __init init_send_mondo_info(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
984

985
		init_cpu_send_mondo_info(tb);
986
	}
987 988
}

989 990 991 992
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
993 994 995
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
996 997
	unsigned long size;

L
Linus Torvalds 已提交
998 999 1000
	map_prom_timers();
	kill_prom_timer();

1001
	size = sizeof(struct ino_bucket) * NUM_IVECS;
1002
	ivector_table = alloc_bootmem(size);
1003 1004 1005 1006
	if (!ivector_table) {
		prom_printf("Fatal error, cannot allocate ivector_table\n");
		prom_halt();
	}
1007 1008
	__flush_dcache_range((unsigned long) ivector_table,
			     ((unsigned long) ivector_table) + size);
1009 1010

	ivector_table_pa = __pa(ivector_table);
1011

1012
	if (tlb_type == hypervisor)
1013
		sun4v_init_mondo_queues();
1014

1015 1016 1017 1018 1019 1020 1021
	init_send_mondo_info();

	if (tlb_type == hypervisor) {
		/* Load up the boot cpu's entries.  */
		sun4v_register_mondo_queues(hard_smp_processor_id());
	}

L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

1040
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
1041
}