irq_64.c 24.7 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7 8 9
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
10
#include <linux/linkage.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
23
#include <linux/bootmem.h>
24
#include <linux/irq.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
31
#include <asm/io.h>
L
Linus Torvalds 已提交
32 33 34
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
35
#include <asm/prom.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
42
#include <asm/auxio.h>
43
#include <asm/head.h>
44
#include <asm/hypervisor.h>
45
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
46

47
#include "entry.h"
48 49

#define NUM_IVECS	(IMAP_INR + 1)
50

51
struct ino_bucket *ivector_table;
52
unsigned long ivector_table_pa;
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/* On several sun4u processors, it is illegal to mix bypass and
 * non-bypass accesses.  Therefore we access all INO buckets
 * using bypass accesses only.
 */
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
	unsigned long ret;

	__asm__ __volatile__("ldxa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
	__asm__ __volatile__("stxa	%%g0, [%0] %1"
			     : /* no outputs */
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));
}

static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
{
	unsigned int ret;

	__asm__ __volatile__("lduwa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_set_virt_irq(unsigned long bucket_pa,
				unsigned int virt_irq)
{
	__asm__ __volatile__("stwa	%0, [%1] %2"
			     : /* no outputs */
			     : "r" (virt_irq),
			       "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __virt_irq)),
			       "i" (ASI_PHYS_USE_EC));
}

108
#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
L
Linus Torvalds 已提交
109

110 111 112
static struct {
	unsigned int dev_handle;
	unsigned int dev_ino;
113
	unsigned int in_use;
114
} virt_irq_table[NR_IRQS];
115
static DEFINE_SPINLOCK(virt_irq_alloc_lock);
116

117
unsigned char virt_irq_alloc(unsigned int dev_handle,
118
			     unsigned int dev_ino)
119
{
120
	unsigned long flags;
121 122 123 124
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

125 126
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

127
	for (ent = 1; ent < NR_IRQS; ent++) {
128
		if (!virt_irq_table[ent].in_use)
129 130
			break;
	}
131 132
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
133 134
		ent = 0;
	} else {
135 136 137
		virt_irq_table[ent].dev_handle = dev_handle;
		virt_irq_table[ent].dev_ino = dev_ino;
		virt_irq_table[ent].in_use = 1;
138 139
	}

140
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
141 142 143 144

	return ent;
}

145
#ifdef CONFIG_PCI_MSI
146
void virt_irq_free(unsigned int virt_irq)
147
{
148
	unsigned long flags;
149

150 151 152
	if (virt_irq >= NR_IRQS)
		return;

153 154
	spin_lock_irqsave(&virt_irq_alloc_lock, flags);

155
	virt_irq_table[virt_irq].in_use = 0;
156

157
	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
158
}
159
#endif
160

L
Linus Torvalds 已提交
161
/*
162
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
163 164 165 166
 */

int show_interrupts(struct seq_file *p, void *v)
{
167 168
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
169 170
	unsigned long flags;

171 172 173 174 175 176 177 178 179 180 181 182 183
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
184 185 186
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
187 188
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
L
Linus Torvalds 已提交
189
#endif
190
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
191 192 193
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
194
			seq_printf(p, ", %s", action->name);
195

L
Linus Torvalds 已提交
196
		seq_putc(p, '\n');
197 198
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 200 201 202 203
	} else if (i == NR_IRQS) {
		seq_printf(p, "NMI: ");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
		seq_printf(p, "     Non-maskable interrupts\n");
L
Linus Torvalds 已提交
204 205 206 207
	}
	return 0;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

243 244 245
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
246

247
	void		(*pre_handler)(unsigned int, void *, void *);
248 249
	void		*arg1;
	void		*arg2;
250
};
L
Linus Torvalds 已提交
251

252 253
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
254
{
255
	cpumask_t mask = irq_desc[virt_irq].affinity;
256
	int cpuid;
257

258 259 260 261
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
L
Linus Torvalds 已提交
262

263 264 265
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
266

267 268 269 270 271 272 273 274 275
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
L
Linus Torvalds 已提交
276

277 278 279
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
280

281
		cpus_and(tmp, cpu_online_map, mask);
282

283 284
		if (cpus_empty(tmp))
			goto do_round_robin;
285

286
		cpuid = first_cpu(tmp);
L
Linus Torvalds 已提交
287
	}
288

289 290 291 292 293 294
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
L
Linus Torvalds 已提交
295
}
296
#endif
L
Linus Torvalds 已提交
297

298
static void sun4u_irq_enable(unsigned int virt_irq)
299
{
300
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
301

302
	if (likely(data)) {
303
		unsigned long cpuid, imap, val;
304
		unsigned int tid;
305

306 307
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
308

309
		tid = sun4u_compute_tid(imap, cpuid);
310

311 312 313 314 315
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
316
		upa_writeq(ICLR_IDLE, data->iclr);
317 318 319
	}
}

320 321
static void sun4u_set_affinity(unsigned int virt_irq,
			       const struct cpumask *mask)
322 323 324 325
{
	sun4u_irq_enable(virt_irq);
}

326
static void sun4u_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
327
{
328
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
L
Linus Torvalds 已提交
329

330 331
	if (likely(data)) {
		unsigned long imap = data->imap;
332
		unsigned long tmp = upa_readq(imap);
L
Linus Torvalds 已提交
333

334
		tmp &= ~IMAP_VALID;
335
		upa_writeq(tmp, imap);
336 337 338
	}
}

339
static void sun4u_irq_eoi(unsigned int virt_irq)
340
{
341
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
342 343 344 345
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
346

347
	if (likely(data))
348
		upa_writeq(ICLR_IDLE, data->iclr);
349 350
}

351
static void sun4v_irq_enable(unsigned int virt_irq)
352
{
353
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
		       ino, err);
369 370
}

371 372
static void sun4v_set_affinity(unsigned int virt_irq,
			       const struct cpumask *mask)
373
{
374
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
375 376 377 378 379 380 381
	unsigned long cpuid = irq_choose_cpu(virt_irq);
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
382 383
}

384
static void sun4v_irq_disable(unsigned int virt_irq)
L
Linus Torvalds 已提交
385
{
386
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
387
	int err;
L
Linus Torvalds 已提交
388

389 390 391 392
	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): "
		       "err(%d)\n", ino, err);
393
}
L
Linus Torvalds 已提交
394

395
static void sun4v_irq_eoi(unsigned int virt_irq)
396
{
397
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
398
	struct irq_desc *desc = irq_desc + virt_irq;
399
	int err;
400 401 402

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
L
Linus Torvalds 已提交
403

404 405 406 407
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
408 409
}

410 411
static void sun4v_virq_enable(unsigned int virt_irq)
{
412 413 414 415 416
	unsigned long cpuid, dev_handle, dev_ino;
	int err;

	cpuid = irq_choose_cpu(virt_irq);

417 418
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436

	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_ENABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
437 438
}

439 440
static void sun4v_virt_set_affinity(unsigned int virt_irq,
				    const struct cpumask *mask)
441
{
442 443
	unsigned long cpuid, dev_handle, dev_ino;
	int err;
444

445
	cpuid = irq_choose_cpu(virt_irq);
446

447 448
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
449

450 451 452 453 454
	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
455 456
}

457 458
static void sun4v_virq_disable(unsigned int virt_irq)
{
459 460 461
	unsigned long dev_handle, dev_ino;
	int err;

462 463
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
464 465 466 467 468 469 470

	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_DISABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
471 472
}

473
static void sun4v_virq_eoi(unsigned int virt_irq)
474
{
475
	struct irq_desc *desc = irq_desc + virt_irq;
476 477
	unsigned long dev_handle, dev_ino;
	int err;
478 479 480

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
481

482 483
	dev_handle = virt_irq_table[virt_irq].dev_handle;
	dev_ino = virt_irq_table[virt_irq].dev_ino;
484

485 486 487 488 489 490
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
491 492
}

493
static struct irq_chip sun4u_irq = {
494 495 496
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
497
	.eoi		= sun4u_irq_eoi,
498
	.set_affinity	= sun4u_set_affinity,
499
};
500

501
static struct irq_chip sun4v_irq = {
502 503 504
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
505
	.eoi		= sun4v_irq_eoi,
506
	.set_affinity	= sun4v_set_affinity,
507
};
L
Linus Torvalds 已提交
508

509 510 511 512
static struct irq_chip sun4v_virq = {
	.typename	= "vsun4v",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
513
	.eoi		= sun4v_virq_eoi,
514
	.set_affinity	= sun4v_virt_set_affinity,
515 516
};

517
static void pre_flow_handler(unsigned int virt_irq,
518 519 520 521 522 523 524 525 526 527
				      struct irq_desc *desc)
{
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;

	data->pre_handler(ino, data->arg1, data->arg2);

	handle_fasteoi_irq(virt_irq, desc);
}

528 529 530 531
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
532
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
533
	struct irq_desc *desc = irq_desc + virt_irq;
534

535
	data->pre_handler = func;
536 537
	data->arg1 = arg1;
	data->arg2 = arg2;
538

539
	desc->handle_irq = pre_flow_handler;
540
}
L
Linus Torvalds 已提交
541

542 543 544 545
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
546
	unsigned int virt_irq;
547
	int ino;
L
Linus Torvalds 已提交
548

549
	BUG_ON(tlb_type == hypervisor);
550

551
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
552
	bucket = &ivector_table[ino];
553 554
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
555
		virt_irq = virt_irq_alloc(0, ino);
556
		bucket_set_virt_irq(__pa(bucket), virt_irq);
557 558 559 560
		set_irq_chip_and_handler_name(virt_irq,
					      &sun4u_irq,
					      handle_fasteoi_irq,
					      "IVEC");
561
	}
L
Linus Torvalds 已提交
562

563
	data = get_irq_chip_data(virt_irq);
564
	if (unlikely(data))
565
		goto out;
566

567 568 569 570
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
571
	}
572
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
573

574 575
	data->imap  = imap;
	data->iclr  = iclr;
L
Linus Torvalds 已提交
576

577
out:
578
	return virt_irq;
579
}
L
Linus Torvalds 已提交
580

581 582
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
L
Linus Torvalds 已提交
583
{
584
	struct ino_bucket *bucket;
585
	struct irq_handler_data *data;
586
	unsigned int virt_irq;
587

588
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
589

590
	bucket = &ivector_table[sysino];
591 592
	virt_irq = bucket_get_virt_irq(__pa(bucket));
	if (!virt_irq) {
593
		virt_irq = virt_irq_alloc(0, sysino);
594
		bucket_set_virt_irq(__pa(bucket), virt_irq);
595 596 597
		set_irq_chip_and_handler_name(virt_irq, chip,
					      handle_fasteoi_irq,
					      "IVEC");
L
Linus Torvalds 已提交
598 599
	}

600
	data = get_irq_chip_data(virt_irq);
601
	if (unlikely(data))
L
Linus Torvalds 已提交
602 603
		goto out;

604 605 606 607 608
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
609
	set_irq_chip_data(virt_irq, data);
L
Linus Torvalds 已提交
610

611 612 613 614 615 616
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
L
Linus Torvalds 已提交
617

618
out:
619
	return virt_irq;
620
}
L
Linus Torvalds 已提交
621

622 623 624 625 626 627 628 629 630
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
631 632
	struct irq_handler_data *data;
	unsigned long hv_err, cookie;
633 634
	struct ino_bucket *bucket;
	struct irq_desc *desc;
635
	unsigned int virt_irq;
636 637 638 639

	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
	if (unlikely(!bucket))
		return 0;
640 641 642
	__flush_dcache_range((unsigned long) bucket,
			     ((unsigned long) bucket +
			      sizeof(struct ino_bucket)));
643

644
	virt_irq = virt_irq_alloc(devhandle, devino);
645
	bucket_set_virt_irq(__pa(bucket), virt_irq);
646 647 648 649

	set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
				      handle_fasteoi_irq,
				      "IVEC");
650

651 652 653
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data))
		return 0;
654

655 656 657 658 659 660 661
	/* In order to make the LDC channel startup sequence easier,
	 * especially wrt. locking, we do not let request_irq() enable
	 * the interrupt.
	 */
	desc = irq_desc + virt_irq;
	desc->status |= IRQ_NOAUTOEN;

662
	set_irq_chip_data(virt_irq, data);
663

664 665 666 667 668 669 670 671 672
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;

	cookie = ~__pa(bucket);
	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
673 674 675 676 677 678
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

679
	return virt_irq;
680 681
}

682 683
void ack_bad_irq(unsigned int virt_irq)
{
684
	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
685

686 687
	if (!ino)
		ino = 0xdeadbeef;
688

689 690
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
L
Linus Torvalds 已提交
691 692
}

D
David S. Miller 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];

static __attribute__((always_inline)) void *set_hardirq_stack(void)
{
	void *orig_sp, *sp = hardirq_stack[smp_processor_id()];

	__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
	if (orig_sp < sp ||
	    orig_sp > (sp + THREAD_SIZE)) {
		sp += THREAD_SIZE - 192 - STACK_BIAS;
		__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
	}

	return orig_sp;
}
static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
	__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}

L
Linus Torvalds 已提交
714 715
void handler_irq(int irq, struct pt_regs *regs)
{
716
	unsigned long pstate, bucket_pa;
A
Al Viro 已提交
717
	struct pt_regs *old_regs;
D
David S. Miller 已提交
718
	void *orig_sp;
L
Linus Torvalds 已提交
719 720 721

	clear_softint(1 << irq);

A
Al Viro 已提交
722
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
723 724
	irq_enter();

725 726 727 728 729 730
	/* Grab an atomic snapshot of the pending IVECs.  */
	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
			     "wrpr	%0, %3, %%pstate\n\t"
			     "ldx	[%2], %1\n\t"
			     "stx	%%g0, [%2]\n\t"
			     "wrpr	%0, 0x0, %%pstate\n\t"
731 732
			     : "=&r" (pstate), "=&r" (bucket_pa)
			     : "r" (irq_work_pa(smp_processor_id())),
733 734 735
			       "i" (PSTATE_IE)
			     : "memory");

D
David S. Miller 已提交
736 737
	orig_sp = set_hardirq_stack();

738
	while (bucket_pa) {
739
		struct irq_desc *desc;
740 741
		unsigned long next_pa;
		unsigned int virt_irq;
L
Linus Torvalds 已提交
742

743 744 745
		next_pa = bucket_get_chain_pa(bucket_pa);
		virt_irq = bucket_get_virt_irq(bucket_pa);
		bucket_clear_chain_pa(bucket_pa);
746

747 748 749
		desc = irq_desc + virt_irq;

		desc->handle_irq(virt_irq, desc);
750 751

		bucket_pa = next_pa;
L
Linus Torvalds 已提交
752
	}
753

D
David S. Miller 已提交
754 755
	restore_hardirq_stack(orig_sp);

L
Linus Torvalds 已提交
756
	irq_exit();
A
Al Viro 已提交
757
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
758 759
}

D
David S. Miller 已提交
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
void do_softirq(void)
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	if (local_softirq_pending()) {
		void *orig_sp, *sp = softirq_stack[smp_processor_id()];

		sp += THREAD_SIZE - 192 - STACK_BIAS;

		__asm__ __volatile__("mov %%sp, %0\n\t"
				     "mov %1, %%sp"
				     : "=&r" (orig_sp)
				     : "r" (sp));
		__do_softirq();
		__asm__ __volatile__("mov %0, %%sp"
				     : : "r" (orig_sp));
	}

	local_irq_restore(flags);
}

786 787 788 789 790 791 792 793 794 795 796 797 798
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
	unsigned int irq;

	for (irq = 0; irq < NR_IRQS; irq++) {
		unsigned long flags;

		spin_lock_irqsave(&irq_desc[irq].lock, flags);
		if (irq_desc[irq].action &&
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
			if (irq_desc[irq].chip->set_affinity)
				irq_desc[irq].chip->set_affinity(irq,
799
					&irq_desc[irq].affinity);
800 801 802
		}
		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
	}
803 804

	tick_ops->disable_irq();
805 806 807
}
#endif

808 809 810 811 812 813
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
814

815
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
816 817 818 819
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
820
	struct device_node *dp;
821
	const unsigned int *addr;
L
Linus Torvalds 已提交
822 823

	/* PROM timer node hangs out in the top level of device siblings... */
824 825 826 827 828 829 830
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
831 832 833 834

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
835
	if (!dp) {
L
Linus Torvalds 已提交
836 837 838 839 840
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
841 842
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

877
void notrace init_irqwork_curcpu(void)
L
Linus Torvalds 已提交
878 879 880
{
	int cpu = hard_smp_processor_id();

881
	trap_block[cpu].irq_worklist_pa = 0UL;
L
Linus Torvalds 已提交
882 883
}

884 885 886 887 888 889 890 891 892 893 894 895
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
896
{
897
	unsigned long num_entries = (qmask + 1) / 64;
898 899 900 901 902 903
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
904 905 906 907
		prom_halt();
	}
}

908
void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
909
{
910 911
	struct trap_per_cpu *tb = &trap_block[this_cpu];

912 913 914 915 916 917 918 919
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
920 921
}

922
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
923
{
924
	unsigned long size = PAGE_ALIGN(qmask + 1);
925
	void *p = __alloc_bootmem(size, size, 0);
926
	if (!p) {
927 928 929 930
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

931
	*pa_ptr = __pa(p);
932 933
}

934
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
935
{
936
	unsigned long size = PAGE_ALIGN(qmask + 1);
937
	void *p = __alloc_bootmem(size, size, 0);
938

939
	if (!p) {
940 941 942 943
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

944
	*pa_ptr = __pa(p);
945 946
}

947
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
948 949
{
#ifdef CONFIG_SMP
950
	void *page;
951 952 953

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

954
	page = alloc_bootmem_pages(PAGE_SIZE);
955 956 957 958 959 960 961 962 963 964
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

965 966
/* Allocate mondo and error queues for all possible cpus.  */
static void __init sun4v_init_mondo_queues(void)
967
{
968
	int cpu;
969

970 971
	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
972

973 974 975 976 977 978 979
		alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
		alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
		alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
		alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
			       tb->nonresum_qmask);
980 981 982 983 984 985 986 987 988
	}
}

static void __init init_send_mondo_info(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
989

990
		init_cpu_send_mondo_info(tb);
991
	}
992 993
}

994 995 996 997
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
998 999 1000
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
1001 1002
	unsigned long size;

L
Linus Torvalds 已提交
1003 1004 1005
	map_prom_timers();
	kill_prom_timer();

1006
	size = sizeof(struct ino_bucket) * NUM_IVECS;
1007
	ivector_table = alloc_bootmem(size);
1008 1009 1010 1011
	if (!ivector_table) {
		prom_printf("Fatal error, cannot allocate ivector_table\n");
		prom_halt();
	}
1012 1013
	__flush_dcache_range((unsigned long) ivector_table,
			     ((unsigned long) ivector_table) + size);
1014 1015

	ivector_table_pa = __pa(ivector_table);
1016

1017
	if (tlb_type == hypervisor)
1018
		sun4v_init_mondo_queues();
1019

1020 1021 1022 1023 1024 1025 1026
	init_send_mondo_info();

	if (tlb_type == hypervisor) {
		/* Load up the boot cpu's entries.  */
		sun4v_register_mondo_queues(hard_smp_processor_id());
	}

L
Linus Torvalds 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

1045
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
1046
}