irq.c 18.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
 * irq.c: UltraSparc IRQ handling/init/registry.
 *
 * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu)
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
23
#include <linux/bootmem.h>
24
#include <linux/irq.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
31
#include <asm/io.h>
L
Linus Torvalds 已提交
32 33 34 35
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
36
#include <asm/prom.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
43
#include <asm/auxio.h>
44
#include <asm/head.h>
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53 54 55 56

/* UPA nodes send interrupt packet to UltraSparc with first data reg
 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
 * delivered.  We must translate this into a non-vector IRQ so we can
 * set the softint on this cpu.
 *
 * To make processing these packets efficient and race free we use
 * an array of irq buckets below.  The interrupt vector handler in
 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
 * The IVEC handler does not need to act atomically, the PIL dispatch
 * code uses CAS to get an atomic snapshot of the list and clear it
 * at the same time.
57 58 59
 *
 * If you make changes to ino_bucket, please update hand coded assembler
 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
L
Linus Torvalds 已提交
60
 */
61 62 63 64 65 66
struct ino_bucket {
	/* Next handler in per-CPU IRQ worklist.  We know that
	 * bucket pointers have the high 32-bits clear, so to
	 * save space we only store the bits we need.
	 */
/*0x00*/unsigned int irq_chain;
L
Linus Torvalds 已提交
67

68 69 70 71 72
	/* Virtual interrupt number assigned to this INO.  */
/*0x04*/unsigned int virt_irq;
};

#define NUM_IVECS	(IMAP_INR + 1)
L
Linus Torvalds 已提交
73 74
struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));

75 76 77 78 79
#define __irq_ino(irq) \
        (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))

L
Linus Torvalds 已提交
80 81 82 83 84 85 86
/* This has to be in the main kernel image, it cannot be
 * turned into per-cpu data.  The reason is that the main
 * kernel image is locked into the TLB and this structure
 * is accessed from the vectored interrupt trap handler.  If
 * access to this structure takes a TLB miss it could cause
 * the 5-level sparc v9 trap stack to overflow.
 */
87
#define irq_work(__cpu)	&(trap_block[(__cpu)].irq_worklist)
L
Linus Torvalds 已提交
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
static unsigned int virt_to_real_irq_table[NR_IRQS];
static unsigned char virt_irq_cur = 1;

static unsigned char virt_irq_alloc(unsigned int real_irq)
{
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

	ent = virt_irq_cur;
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
		return 0;
	}

	virt_irq_cur = ent + 1;
	virt_to_real_irq_table[ent] = real_irq;

	return ent;
}

#if 0 /* Currently unused. */
static unsigned char real_to_virt_irq(unsigned int real_irq)
{
	struct ino_bucket *bucket = __bucket(real_irq);

	return bucket->virt_irq;
}
#endif

static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
	return virt_to_real_irq_table[virt_irq];
}

L
Linus Torvalds 已提交
124
/*
125
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
126 127 128 129
 */

int show_interrupts(struct seq_file *p, void *v)
{
130 131
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
132 133
	unsigned long flags;

134 135 136 137 138 139 140 141 142 143 144 145 146
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
147 148 149
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
150 151
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
L
Linus Torvalds 已提交
152
#endif
153
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
154 155 156
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
157
			seq_printf(p, ", %s", action->name);
158

L
Linus Torvalds 已提交
159
		seq_putc(p, '\n');
160 161
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
L
Linus Torvalds 已提交
162 163 164 165
	}
	return 0;
}

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
extern unsigned long real_hard_smp_processor_id(void);

static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

203 204 205
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
206

207 208 209 210
	void		(*pre_handler)(unsigned int, void *, void *);
	void		*pre_handler_arg1;
	void		*pre_handler_arg2;
};
L
Linus Torvalds 已提交
211

212
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
L
Linus Torvalds 已提交
213
{
214
	unsigned int real_irq = virt_to_real_irq(virt_irq);
215
	struct ino_bucket *bucket = NULL;
L
Linus Torvalds 已提交
216

217 218
	if (likely(real_irq))
		bucket = __bucket(real_irq);
219

220
	return bucket;
L
Linus Torvalds 已提交
221 222
}

223 224
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
225
{
226
	cpumask_t mask = irq_desc[virt_irq].affinity;
227
	int cpuid;
228

229 230 231 232
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
L
Linus Torvalds 已提交
233

234 235 236
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
237

238 239 240 241 242 243 244 245 246
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
L
Linus Torvalds 已提交
247

248 249 250
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
251

252
		cpus_and(tmp, cpu_online_map, mask);
253

254 255
		if (cpus_empty(tmp))
			goto do_round_robin;
256

257
		cpuid = first_cpu(tmp);
L
Linus Torvalds 已提交
258
	}
259

260 261 262 263 264 265
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
L
Linus Torvalds 已提交
266
}
267
#endif
L
Linus Torvalds 已提交
268

269
static void sun4u_irq_enable(unsigned int virt_irq)
270
{
271 272
	irq_desc_t *desc = irq_desc + virt_irq;
	struct irq_handler_data *data = desc->handler_data;
273

274 275 276
	if (likely(data)) {
		unsigned long cpuid, imap;
		unsigned int tid;
277

278 279
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
280

281
		tid = sun4u_compute_tid(imap, cpuid);
282

283
		upa_writel(tid | IMAP_VALID, imap);
284 285 286
	}
}

287
static void sun4u_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
288
{
289 290
	irq_desc_t *desc = irq_desc + virt_irq;
	struct irq_handler_data *data = desc->handler_data;
L
Linus Torvalds 已提交
291

292 293 294
	if (likely(data)) {
		unsigned long imap = data->imap;
		u32 tmp = upa_readl(imap);
L
Linus Torvalds 已提交
295

296 297
		tmp &= ~IMAP_VALID;
		upa_writel(tmp, imap);
298 299 300
	}
}

301
static void sun4u_irq_end(unsigned int virt_irq)
302
{
303 304
	irq_desc_t *desc = irq_desc + virt_irq;
	struct irq_handler_data *data = desc->handler_data;
305

306 307
	if (likely(data))
		upa_writel(ICLR_IDLE, data->iclr);
308 309
}

310
static void sun4v_irq_enable(unsigned int virt_irq)
311
{
312 313
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
314

315 316 317
	if (likely(bucket)) {
		unsigned long cpuid;
		int err;
318

319
		cpuid = irq_choose_cpu(virt_irq);
320

321 322 323 324 325 326 327 328
		err = sun4v_intr_settarget(ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
			       ino, cpuid, err);
		err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): err(%d)\n",
			       ino, err);
329 330 331
	}
}

332
static void sun4v_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
333
{
334 335
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
L
Linus Torvalds 已提交
336

337 338
	if (likely(bucket)) {
		int err;
L
Linus Torvalds 已提交
339

340 341 342 343
		err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): "
			       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
344
	}
345
}
L
Linus Torvalds 已提交
346

347 348 349 350
static void sun4v_irq_end(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
L
Linus Torvalds 已提交
351

352 353
	if (likely(bucket)) {
		int err;
L
Linus Torvalds 已提交
354

355 356 357 358
		err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_intr_setstate(%x): "
			       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
359 360 361
	}
}

362
static void run_pre_handler(unsigned int virt_irq)
L
Linus Torvalds 已提交
363
{
364 365 366
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	irq_desc_t *desc = irq_desc + virt_irq;
	struct irq_handler_data *data = desc->handler_data;
L
Linus Torvalds 已提交
367

368 369 370 371
	if (likely(data->pre_handler)) {
		data->pre_handler(__irq_ino(__irq(bucket)),
				  data->pre_handler_arg1,
				  data->pre_handler_arg2);
L
Linus Torvalds 已提交
372
	}
373 374
}

375 376 377 378 379 380
static struct hw_interrupt_type sun4u_irq = {
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.end		= sun4u_irq_end,
};
381

382 383 384 385 386 387 388
static struct hw_interrupt_type sun4u_irq_ack = {
	.typename	= "sun4u+ack",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4u_irq_end,
};
389

390 391 392 393 394 395
static struct hw_interrupt_type sun4v_irq = {
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.end		= sun4v_irq_end,
};
L
Linus Torvalds 已提交
396

397 398 399 400 401 402 403
static struct hw_interrupt_type sun4v_irq_ack = {
	.typename	= "sun4v+ack",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_irq_end,
};
L
Linus Torvalds 已提交
404

405 406 407 408 409 410
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
	irq_desc_t *desc = irq_desc + virt_irq;
	struct irq_handler_data *data = desc->handler_data;
411

412 413 414
	data->pre_handler = func;
	data->pre_handler_arg1 = arg1;
	data->pre_handler_arg2 = arg2;
L
Linus Torvalds 已提交
415

416 417 418 419
	if (desc->chip == &sun4u_irq_ack ||
	    desc->chip == &sun4v_irq_ack)
		return;

420 421
	desc->chip = (desc->chip == &sun4u_irq ?
		      &sun4u_irq_ack : &sun4v_irq_ack);
422
}
L
Linus Torvalds 已提交
423

424 425 426 427 428 429
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
	irq_desc_t *desc;
	int ino;
L
Linus Torvalds 已提交
430

431
	BUG_ON(tlb_type == hypervisor);
432

433 434 435 436
	ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
	bucket = &ivector_table[ino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
437
		irq_desc[bucket->virt_irq].chip = &sun4u_irq;
438
	}
L
Linus Torvalds 已提交
439

440 441 442
	desc = irq_desc + bucket->virt_irq;
	if (unlikely(desc->handler_data))
		goto out;
443

444 445 446 447
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
448
	}
449
	desc->handler_data = data;
L
Linus Torvalds 已提交
450

451 452
	data->imap  = imap;
	data->iclr  = iclr;
L
Linus Torvalds 已提交
453

454 455 456
out:
	return bucket->virt_irq;
}
L
Linus Torvalds 已提交
457

458
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
L
Linus Torvalds 已提交
459
{
460
	struct ino_bucket *bucket;
461 462 463
	struct irq_handler_data *data;
	unsigned long sysino;
	irq_desc_t *desc;
464

465
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
466

467 468 469 470
	sysino = sun4v_devino_to_sysino(devhandle, devino);
	bucket = &ivector_table[sysino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
471
		irq_desc[bucket->virt_irq].chip = &sun4v_irq;
L
Linus Torvalds 已提交
472 473
	}

474 475
	desc = irq_desc + bucket->virt_irq;
	if (unlikely(desc->handler_data))
L
Linus Torvalds 已提交
476 477
		goto out;

478 479 480 481 482 483
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
	desc->handler_data = data;
L
Linus Torvalds 已提交
484

485 486 487 488 489 490
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
L
Linus Torvalds 已提交
491

492 493 494
out:
	return bucket->virt_irq;
}
L
Linus Torvalds 已提交
495

496 497 498 499 500
void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned long pstate;
	unsigned int *ent;
501

502 503 504 505 506 507 508 509 510
	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
	__asm__ __volatile__("wrpr %0, %1, %%pstate"
			     : : "r" (pstate), "i" (PSTATE_IE));
	ent = irq_work(smp_processor_id());
	bucket->irq_chain = *ent;
	*ent = __irq(bucket);
	set_softint(1 << PIL_DEVICE_IRQ);
	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
}
511

512 513 514 515
void ack_bad_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = 0xdeadbeef;
516

517 518
	if (bucket)
		ino = bucket - &ivector_table[0];
519

520 521
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
L
Linus Torvalds 已提交
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
#ifndef CONFIG_SMP
extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);

void timer_irq(int irq, struct pt_regs *regs)
{
	unsigned long clr_mask = 1 << irq;
	unsigned long tick_mask = tick_ops->softint_mask;

	if (get_softint() & tick_mask) {
		irq = 0;
		clr_mask = tick_mask;
	}
	clear_softint(clr_mask);

	irq_enter();
539

540
	kstat_this_cpu.irqs[0]++;
541
	timer_interrupt(irq, NULL, regs);
542

543 544 545 546
	irq_exit();
}
#endif

L
Linus Torvalds 已提交
547 548
void handler_irq(int irq, struct pt_regs *regs)
{
549
	struct ino_bucket *bucket;
A
Al Viro 已提交
550
	struct pt_regs *old_regs;
L
Linus Torvalds 已提交
551 552 553

	clear_softint(1 << irq);

A
Al Viro 已提交
554
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
555 556 557
	irq_enter();

	/* Sliiiick... */
558 559 560
	bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
	while (bucket) {
		struct ino_bucket *next = __bucket(bucket->irq_chain);
L
Linus Torvalds 已提交
561

562
		bucket->irq_chain = 0;
A
Al Viro 已提交
563
		__do_IRQ(bucket->virt_irq);
564

565
		bucket = next;
L
Linus Torvalds 已提交
566
	}
567

L
Linus Torvalds 已提交
568
	irq_exit();
A
Al Viro 已提交
569
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
570 571
}

572 573 574 575 576 577
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
578

579
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
580 581 582 583
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
584 585
	struct device_node *dp;
	unsigned int *addr;
L
Linus Torvalds 已提交
586 587

	/* PROM timer node hangs out in the top level of device siblings... */
588 589 590 591 592 593 594
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
595 596 597 598

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
599
	if (!dp) {
L
Linus Torvalds 已提交
600 601 602 603 604
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
605 606
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

void init_irqwork_curcpu(void)
{
	int cpu = hard_smp_processor_id();

645
	trap_block[cpu].irq_worklist = 0;
L
Linus Torvalds 已提交
646 647
}

648
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
649
{
650 651 652 653 654 655 656
	unsigned long num_entries = 128;
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
657 658 659 660
		prom_halt();
	}
}

661
static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
662
{
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
	struct trap_per_cpu *tb = &trap_block[this_cpu];

	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
}

static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
{
	void *page;

	if (use_bootmem)
		page = alloc_bootmem_low_pages(PAGE_SIZE);
	else
		page = (void *) get_zeroed_page(GFP_ATOMIC);

	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

	*pa_ptr = __pa(page);
}

static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
{
	void *page;

	if (use_bootmem)
		page = alloc_bootmem_low_pages(PAGE_SIZE);
	else
		page = (void *) get_zeroed_page(GFP_ATOMIC);
696 697 698 699 700 701 702 703 704

	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

	*pa_ptr = __pa(page);
}

705
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
706 707
{
#ifdef CONFIG_SMP
708
	void *page;
709 710 711

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

712 713 714 715 716
	if (use_bootmem)
		page = alloc_bootmem_low_pages(PAGE_SIZE);
	else
		page = (void *) get_zeroed_page(GFP_ATOMIC);

717 718 719 720 721 722 723 724 725 726
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

727
/* Allocate and register the mondo and error queues for this cpu.  */
728
void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
729 730 731
{
	struct trap_per_cpu *tb = &trap_block[cpu];

732 733 734 735 736 737 738
	if (alloc) {
		alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
		alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
		alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
		alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
739

740 741
		init_cpu_send_mondo_info(tb, use_bootmem);
	}
742

743 744 745 746 747 748 749 750
	if (load) {
		if (cpu != hard_smp_processor_id()) {
			prom_printf("SUN4V: init mondo on cpu %d not %d\n",
				    cpu, hard_smp_processor_id());
			prom_halt();
		}
		sun4v_register_mondo_queues(cpu);
	}
751 752
}

753 754 755 756
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
757 758 759 760 761 762 763
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
	map_prom_timers();
	kill_prom_timer();
	memset(&ivector_table[0], 0, sizeof(ivector_table));

764
	if (tlb_type == hypervisor)
765
		sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
766

L
Linus Torvalds 已提交
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

785
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
786
}