irq_64.c 25.3 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7 8 9
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
10
#include <linux/linkage.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
23
#include <linux/ftrace.h>
24
#include <linux/irq.h>
25
#include <linux/kmemleak.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
32
#include <asm/io.h>
L
Linus Torvalds 已提交
33 34 35
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
36
#include <asm/prom.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
43
#include <asm/auxio.h>
44
#include <asm/head.h>
45
#include <asm/hypervisor.h>
46
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
47

48
#include "entry.h"
49
#include "cpumap.h"
50
#include "kstack.h"
51 52

#define NUM_IVECS	(IMAP_INR + 1)
53

54
struct ino_bucket *ivector_table;
55
unsigned long ivector_table_pa;
L
Linus Torvalds 已提交
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
/* On several sun4u processors, it is illegal to mix bypass and
 * non-bypass accesses.  Therefore we access all INO buckets
 * using bypass accesses only.
 */
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
	unsigned long ret;

	__asm__ __volatile__("ldxa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
	__asm__ __volatile__("stxa	%%g0, [%0] %1"
			     : /* no outputs */
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
					     __irq_chain_pa)),
			       "i" (ASI_PHYS_USE_EC));
}

85
static unsigned int bucket_get_irq(unsigned long bucket_pa)
86 87 88 89 90 91 92
{
	unsigned int ret;

	__asm__ __volatile__("lduwa	[%1] %2, %0"
			     : "=&r" (ret)
			     : "r" (bucket_pa +
				    offsetof(struct ino_bucket,
93
					     __irq)),
94 95 96 97 98
			       "i" (ASI_PHYS_USE_EC));

	return ret;
}

99
static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
100 101 102
{
	__asm__ __volatile__("stwa	%0, [%1] %2"
			     : /* no outputs */
103
			     : "r" (irq),
104 105
			       "r" (bucket_pa +
				    offsetof(struct ino_bucket,
106
					     __irq)),
107 108 109
			       "i" (ASI_PHYS_USE_EC));
}

110
#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
L
Linus Torvalds 已提交
111

112 113 114
static struct {
	unsigned int dev_handle;
	unsigned int dev_ino;
115
	unsigned int in_use;
116 117
} irq_table[NR_IRQS];
static DEFINE_SPINLOCK(irq_alloc_lock);
118

119
unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
120
{
121
	unsigned long flags;
122 123 124 125
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

126
	spin_lock_irqsave(&irq_alloc_lock, flags);
127

128
	for (ent = 1; ent < NR_IRQS; ent++) {
129
		if (!irq_table[ent].in_use)
130 131
			break;
	}
132 133
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
134 135
		ent = 0;
	} else {
136 137 138
		irq_table[ent].dev_handle = dev_handle;
		irq_table[ent].dev_ino = dev_ino;
		irq_table[ent].in_use = 1;
139 140
	}

141
	spin_unlock_irqrestore(&irq_alloc_lock, flags);
142 143 144 145

	return ent;
}

146
#ifdef CONFIG_PCI_MSI
147
void irq_free(unsigned int irq)
148
{
149
	unsigned long flags;
150

151
	if (irq >= NR_IRQS)
152 153
		return;

154
	spin_lock_irqsave(&irq_alloc_lock, flags);
155

156
	irq_table[irq].in_use = 0;
157

158
	spin_unlock_irqrestore(&irq_alloc_lock, flags);
159
}
160
#endif
161

L
Linus Torvalds 已提交
162
/*
163
 * /proc/interrupts printing:
L
Linus Torvalds 已提交
164 165 166 167
 */

int show_interrupts(struct seq_file *p, void *v)
{
168 169
	int i = *(loff_t *) v, j;
	struct irqaction * action;
L
Linus Torvalds 已提交
170 171
	unsigned long flags;

172 173 174 175 176 177 178 179
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
180
		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
181 182 183 184
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
L
Linus Torvalds 已提交
185 186 187
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
188
		for_each_online_cpu(j)
189
			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
L
Linus Torvalds 已提交
190
#endif
191
		seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
192 193 194
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
195
			seq_printf(p, ", %s", action->name);
196

L
Linus Torvalds 已提交
197
		seq_putc(p, '\n');
198
skip:
199
		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
200 201 202 203 204
	} else if (i == NR_IRQS) {
		seq_printf(p, "NMI: ");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
		seq_printf(p, "     Non-maskable interrupts\n");
L
Linus Torvalds 已提交
205 206 207 208
	}
	return 0;
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
233
					IMAP_NID_SAFARI);
234 235 236 237 238 239 240 241 242 243
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

244 245 246
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
247

248
	void		(*pre_handler)(unsigned int, void *, void *);
249 250
	void		*arg1;
	void		*arg2;
251
};
L
Linus Torvalds 已提交
252

253
#ifdef CONFIG_SMP
254
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
255
{
256
	cpumask_t mask;
257
	int cpuid;
258

259
	cpumask_copy(&mask, affinity);
260
	if (cpus_equal(mask, cpu_online_map)) {
261
		cpuid = map_to_cpu(irq);
262 263
	} else {
		cpumask_t tmp;
264

265
		cpus_and(tmp, cpu_online_map, mask);
266
		cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
L
Linus Torvalds 已提交
267
	}
268

269 270 271
	return cpuid;
}
#else
272
#define irq_choose_cpu(irq, affinity)	\
D
David S. Miller 已提交
273
	real_hard_smp_processor_id()
274
#endif
L
Linus Torvalds 已提交
275

276
static void sun4u_irq_enable(struct irq_data *data)
277
{
278
	struct irq_handler_data *handler_data = data->handler_data;
279

280
	if (likely(handler_data)) {
281
		unsigned long cpuid, imap, val;
282
		unsigned int tid;
283

284
		cpuid = irq_choose_cpu(data->irq, data->affinity);
285
		imap = handler_data->imap;
286

287
		tid = sun4u_compute_tid(imap, cpuid);
288

289 290 291 292 293
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
294
		upa_writeq(ICLR_IDLE, handler_data->iclr);
295 296 297
	}
}

298 299
static int sun4u_set_affinity(struct irq_data *data,
			       const struct cpumask *mask, bool force)
300
{
301
	struct irq_handler_data *handler_data = data->handler_data;
302

303
	if (likely(handler_data)) {
304 305 306
		unsigned long cpuid, imap, val;
		unsigned int tid;

307
		cpuid = irq_choose_cpu(data->irq, mask);
308
		imap = handler_data->imap;
309 310 311 312 313 314 315 316

		tid = sun4u_compute_tid(imap, cpuid);

		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
317
		upa_writeq(ICLR_IDLE, handler_data->iclr);
318
	}
319 320

	return 0;
321 322
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/* Don't do anything.  The desc->status check for IRQ_DISABLED in
 * handler_irq() will skip the handler call and that will leave the
 * interrupt in the sent state.  The next ->enable() call will hit the
 * ICLR register to reset the state machine.
 *
 * This scheme is necessary, instead of clearing the Valid bit in the
 * IMAP register, to handle the case of IMAP registers being shared by
 * multiple INOs (and thus ICLR registers).  Since we use a different
 * virtual IRQ for each shared IMAP instance, the generic code thinks
 * there is only one user so it prematurely calls ->disable() on
 * free_irq().
 *
 * We have to provide an explicit ->disable() method instead of using
 * NULL to get the default.  The reason is that if the generic code
 * sees that, it also hooks up a default ->shutdown method which
 * invokes ->mask() which we do not want.  See irq_chip_set_defaults().
 */
340
static void sun4u_irq_disable(struct irq_data *data)
L
Linus Torvalds 已提交
341
{
342 343
}

344
static void sun4u_irq_eoi(struct irq_data *data)
345
{
346 347
	struct irq_handler_data *handler_data = data->handler_data;
	struct irq_desc *desc = irq_desc + data->irq;
348 349 350

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
351

352 353
	if (likely(handler_data))
		upa_writeq(ICLR_IDLE, handler_data->iclr);
354 355
}

356
static void sun4v_irq_enable(struct irq_data *data)
357
{
358
	unsigned int ino = irq_table[data->irq].dev_ino;
359
	unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
360 361 362 363 364 365 366 367 368 369 370 371 372 373
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
		       ino, err);
374 375
}

376 377
static int sun4v_set_affinity(struct irq_data *data,
			       const struct cpumask *mask, bool force)
378
{
379
	unsigned int ino = irq_table[data->irq].dev_ino;
380
	unsigned long cpuid = irq_choose_cpu(data->irq, mask);
381 382 383 384 385 386
	int err;

	err = sun4v_intr_settarget(ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
		       "err(%d)\n", ino, cpuid, err);
387 388

	return 0;
389 390
}

391
static void sun4v_irq_disable(struct irq_data *data)
L
Linus Torvalds 已提交
392
{
393
	unsigned int ino = irq_table[data->irq].dev_ino;
394
	int err;
L
Linus Torvalds 已提交
395

396 397 398 399
	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setenabled(%x): "
		       "err(%d)\n", ino, err);
400
}
L
Linus Torvalds 已提交
401

402
static void sun4v_irq_eoi(struct irq_data *data)
403
{
404
	unsigned int ino = irq_table[data->irq].dev_ino;
405
	struct irq_desc *desc = irq_desc + data->irq;
406
	int err;
407 408 409

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
L
Linus Torvalds 已提交
410

411 412 413 414
	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_intr_setstate(%x): "
		       "err(%d)\n", ino, err);
L
Linus Torvalds 已提交
415 416
}

417
static void sun4v_virq_enable(struct irq_data *data)
418
{
419 420 421
	unsigned long cpuid, dev_handle, dev_ino;
	int err;

422
	cpuid = irq_choose_cpu(data->irq, data->affinity);
423

424 425
	dev_handle = irq_table[data->irq].dev_handle;
	dev_ino = irq_table[data->irq].dev_ino;
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443

	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_ENABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_ENABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
444 445
}

446 447
static int sun4v_virt_set_affinity(struct irq_data *data,
				    const struct cpumask *mask, bool force)
448
{
449 450
	unsigned long cpuid, dev_handle, dev_ino;
	int err;
451

452
	cpuid = irq_choose_cpu(data->irq, mask);
453

454 455
	dev_handle = irq_table[data->irq].dev_handle;
	dev_ino = irq_table[data->irq].dev_ino;
456

457 458 459 460 461
	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
		       "err(%d)\n",
		       dev_handle, dev_ino, cpuid, err);
462 463

	return 0;
464 465
}

466
static void sun4v_virq_disable(struct irq_data *data)
467
{
468 469 470
	unsigned long dev_handle, dev_ino;
	int err;

471 472
	dev_handle = irq_table[data->irq].dev_handle;
	dev_ino = irq_table[data->irq].dev_ino;
473 474 475 476 477 478 479

	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
				    HV_INTR_DISABLED);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_DISABLED): err(%d)\n",
		       dev_handle, dev_ino, err);
480 481
}

482
static void sun4v_virq_eoi(struct irq_data *data)
483
{
484
	struct irq_desc *desc = irq_desc + data->irq;
485 486
	unsigned long dev_handle, dev_ino;
	int err;
487 488 489

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
490

491 492
	dev_handle = irq_table[data->irq].dev_handle;
	dev_ino = irq_table[data->irq].dev_ino;
493

494 495 496 497 498 499
	err = sun4v_vintr_set_state(dev_handle, dev_ino,
				    HV_INTR_STATE_IDLE);
	if (err != HV_EOK)
		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
		       "HV_INTR_STATE_IDLE): err(%d)\n",
		       dev_handle, dev_ino, err);
500 501
}

502
static struct irq_chip sun4u_irq = {
503 504 505 506 507
	.name			= "sun4u",
	.irq_enable		= sun4u_irq_enable,
	.irq_disable		= sun4u_irq_disable,
	.irq_eoi		= sun4u_irq_eoi,
	.irq_set_affinity	= sun4u_set_affinity,
508
};
509

510
static struct irq_chip sun4v_irq = {
511 512 513 514 515
	.name			= "sun4v",
	.irq_enable		= sun4v_irq_enable,
	.irq_disable		= sun4v_irq_disable,
	.irq_eoi		= sun4v_irq_eoi,
	.irq_set_affinity	= sun4v_set_affinity,
516
};
L
Linus Torvalds 已提交
517

518
static struct irq_chip sun4v_virq = {
519 520 521 522 523
	.name			= "vsun4v",
	.irq_enable		= sun4v_virq_enable,
	.irq_disable		= sun4v_virq_disable,
	.irq_eoi		= sun4v_virq_eoi,
	.irq_set_affinity	= sun4v_virt_set_affinity,
524 525
};

526
static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
527
{
528 529
	struct irq_handler_data *handler_data = get_irq_data(irq);
	unsigned int ino = irq_table[irq].dev_ino;
530

531
	handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
532

533
	handle_fasteoi_irq(irq, desc);
534 535
}

536
void irq_install_pre_handler(int irq,
537 538 539
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
540 541
	struct irq_handler_data *handler_data = get_irq_data(irq);
	struct irq_desc *desc = irq_desc + irq;
542

543 544 545
	handler_data->pre_handler = func;
	handler_data->arg1 = arg1;
	handler_data->arg2 = arg2;
546

547
	desc->handle_irq = pre_flow_handler;
548
}
L
Linus Torvalds 已提交
549

550 551 552
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
553
	struct irq_handler_data *handler_data;
554
	unsigned int irq;
555
	int ino;
L
Linus Torvalds 已提交
556

557
	BUG_ON(tlb_type == hypervisor);
558

559
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
560
	bucket = &ivector_table[ino];
561 562 563 564 565
	irq = bucket_get_irq(__pa(bucket));
	if (!irq) {
		irq = irq_alloc(0, ino);
		bucket_set_irq(__pa(bucket), irq);
		set_irq_chip_and_handler_name(irq,
566 567 568
					      &sun4u_irq,
					      handle_fasteoi_irq,
					      "IVEC");
569
	}
L
Linus Torvalds 已提交
570

571
	handler_data = get_irq_data(irq);
572
	if (unlikely(handler_data))
573
		goto out;
574

575 576
	handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!handler_data)) {
577 578
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
L
Linus Torvalds 已提交
579
	}
580
	set_irq_data(irq, handler_data);
L
Linus Torvalds 已提交
581

582 583
	handler_data->imap  = imap;
	handler_data->iclr  = iclr;
L
Linus Torvalds 已提交
584

585
out:
586
	return irq;
587
}
L
Linus Torvalds 已提交
588

589 590
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
L
Linus Torvalds 已提交
591
{
592
	struct ino_bucket *bucket;
593
	struct irq_handler_data *handler_data;
594
	unsigned int irq;
595

596
	BUG_ON(tlb_type != hypervisor);
L
Linus Torvalds 已提交
597

598
	bucket = &ivector_table[sysino];
599 600 601 602 603
	irq = bucket_get_irq(__pa(bucket));
	if (!irq) {
		irq = irq_alloc(0, sysino);
		bucket_set_irq(__pa(bucket), irq);
		set_irq_chip_and_handler_name(irq, chip,
604 605
					      handle_fasteoi_irq,
					      "IVEC");
L
Linus Torvalds 已提交
606 607
	}

608
	handler_data = get_irq_data(irq);
609
	if (unlikely(handler_data))
L
Linus Torvalds 已提交
610 611
		goto out;

612 613
	handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!handler_data)) {
614 615 616
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
617
	set_irq_data(irq, handler_data);
L
Linus Torvalds 已提交
618

619 620 621 622
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
623 624
	handler_data->imap = ~0UL;
	handler_data->iclr = ~0UL;
L
Linus Torvalds 已提交
625

626
out:
627
	return irq;
628
}
L
Linus Torvalds 已提交
629

630 631 632 633 634 635 636 637 638
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
639
	struct irq_handler_data *handler_data;
640
	unsigned long hv_err, cookie;
641 642
	struct ino_bucket *bucket;
	struct irq_desc *desc;
643
	unsigned int irq;
644 645 646 647

	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
	if (unlikely(!bucket))
		return 0;
648 649 650 651 652 653 654 655

	/* The only reference we store to the IRQ bucket is
	 * by physical address which kmemleak can't see, tell
	 * it that this object explicitly is not a leak and
	 * should be scanned.
	 */
	kmemleak_not_leak(bucket);

656 657 658
	__flush_dcache_range((unsigned long) bucket,
			     ((unsigned long) bucket +
			      sizeof(struct ino_bucket)));
659

660 661
	irq = irq_alloc(devhandle, devino);
	bucket_set_irq(__pa(bucket), irq);
662

663
	set_irq_chip_and_handler_name(irq, &sun4v_virq,
664 665
				      handle_fasteoi_irq,
				      "IVEC");
666

667 668
	handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!handler_data))
669
		return 0;
670

671 672 673 674
	/* In order to make the LDC channel startup sequence easier,
	 * especially wrt. locking, we do not let request_irq() enable
	 * the interrupt.
	 */
675
	desc = irq_desc + irq;
676 677
	desc->status |= IRQ_NOAUTOEN;

678
	set_irq_data(irq, handler_data);
679

680 681 682 683
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
684 685
	handler_data->imap = ~0UL;
	handler_data->iclr = ~0UL;
686 687 688

	cookie = ~__pa(bucket);
	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
689 690 691 692 693 694
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

695
	return irq;
696 697
}

698
void ack_bad_irq(unsigned int irq)
699
{
700
	unsigned int ino = irq_table[irq].dev_ino;
701

702 703
	if (!ino)
		ino = 0xdeadbeef;
704

705 706
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
	       ino, irq);
L
Linus Torvalds 已提交
707 708
}

D
David S. Miller 已提交
709 710 711
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];

712
void __irq_entry handler_irq(int pil, struct pt_regs *regs)
L
Linus Torvalds 已提交
713
{
714
	unsigned long pstate, bucket_pa;
A
Al Viro 已提交
715
	struct pt_regs *old_regs;
D
David S. Miller 已提交
716
	void *orig_sp;
L
Linus Torvalds 已提交
717

718
	clear_softint(1 << pil);
L
Linus Torvalds 已提交
719

A
Al Viro 已提交
720
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
721 722
	irq_enter();

723 724 725 726 727 728
	/* Grab an atomic snapshot of the pending IVECs.  */
	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
			     "wrpr	%0, %3, %%pstate\n\t"
			     "ldx	[%2], %1\n\t"
			     "stx	%%g0, [%2]\n\t"
			     "wrpr	%0, 0x0, %%pstate\n\t"
729 730
			     : "=&r" (pstate), "=&r" (bucket_pa)
			     : "r" (irq_work_pa(smp_processor_id())),
731 732 733
			       "i" (PSTATE_IE)
			     : "memory");

D
David S. Miller 已提交
734 735
	orig_sp = set_hardirq_stack();

736
	while (bucket_pa) {
737
		struct irq_desc *desc;
738
		unsigned long next_pa;
739
		unsigned int irq;
L
Linus Torvalds 已提交
740

741
		next_pa = bucket_get_chain_pa(bucket_pa);
742
		irq = bucket_get_irq(bucket_pa);
743
		bucket_clear_chain_pa(bucket_pa);
744

745
		desc = irq_desc + irq;
746

747
		if (!(desc->status & IRQ_DISABLED))
748
			desc->handle_irq(irq, desc);
749 750

		bucket_pa = next_pa;
L
Linus Torvalds 已提交
751
	}
752

D
David S. Miller 已提交
753 754
	restore_hardirq_stack(orig_sp);

L
Linus Torvalds 已提交
755
	irq_exit();
A
Al Viro 已提交
756
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
757 758
}

D
David S. Miller 已提交
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
void do_softirq(void)
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	if (local_softirq_pending()) {
		void *orig_sp, *sp = softirq_stack[smp_processor_id()];

		sp += THREAD_SIZE - 192 - STACK_BIAS;

		__asm__ __volatile__("mov %%sp, %0\n\t"
				     "mov %1, %%sp"
				     : "=&r" (orig_sp)
				     : "r" (sp));
		__do_softirq();
		__asm__ __volatile__("mov %0, %%sp"
				     : : "r" (orig_sp));
	}

	local_irq_restore(flags);
}

785 786 787 788 789 790 791 792
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
	unsigned int irq;

	for (irq = 0; irq < NR_IRQS; irq++) {
		unsigned long flags;

793
		raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
794 795
		if (irq_desc[irq].action &&
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
796 797 798 799 800 801
			struct irq_data *data = irq_get_irq_data(irq);

			if (data->chip->irq_set_affinity)
				data->chip->irq_set_affinity(data,
				                             data->affinity,
				                             false);
802
		}
803
		raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
804
	}
805 806

	tick_ops->disable_irq();
807 808 809
}
#endif

810 811 812 813 814 815
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
L
Linus Torvalds 已提交
816

817
static struct sun5_timer *prom_timers;
L
Linus Torvalds 已提交
818 819 820 821
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
822
	struct device_node *dp;
823
	const unsigned int *addr;
L
Linus Torvalds 已提交
824 825

	/* PROM timer node hangs out in the top level of device siblings... */
826 827 828 829 830 831 832
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
L
Linus Torvalds 已提交
833 834 835 836

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
837
	if (!dp) {
L
Linus Torvalds 已提交
838 839 840 841 842
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
843 844
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
L
Linus Torvalds 已提交
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

879
void notrace init_irqwork_curcpu(void)
L
Linus Torvalds 已提交
880 881 882
{
	int cpu = hard_smp_processor_id();

883
	trap_block[cpu].irq_worklist_pa = 0UL;
L
Linus Torvalds 已提交
884 885
}

886 887 888 889 890 891 892 893 894 895 896
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
897
static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
898
{
899
	unsigned long num_entries = (qmask + 1) / 64;
900 901 902 903 904 905
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
906 907 908 909
		prom_halt();
	}
}

910
void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
911
{
912 913
	struct trap_per_cpu *tb = &trap_block[this_cpu];

914 915 916 917 918 919 920 921
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
922 923
}

924 925 926 927 928
/* Each queue region must be a power of 2 multiple of 64 bytes in
 * size.  The base real address must be aligned to the size of the
 * region.  Thus, an 8KB queue must be 8KB aligned, for example.
 */
static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
929
{
930
	unsigned long size = PAGE_ALIGN(qmask + 1);
931 932
	unsigned long order = get_order(size);
	unsigned long p;
933

934
	p = __get_free_pages(GFP_KERNEL, order);
935
	if (!p) {
936
		prom_printf("SUN4V: Error, cannot allocate queue.\n");
937 938 939
		prom_halt();
	}

940
	*pa_ptr = __pa(p);
941 942
}

943
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
944 945
{
#ifdef CONFIG_SMP
946
	unsigned long page;
947 948 949

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

950
	page = get_zeroed_page(GFP_KERNEL);
951 952 953 954 955 956 957 958 959 960
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

961 962
/* Allocate mondo and error queues for all possible cpus.  */
static void __init sun4v_init_mondo_queues(void)
963
{
964
	int cpu;
965

966 967
	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
968

969 970 971 972 973 974 975
		alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
		alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
		alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
		alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
		alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
		alloc_one_queue(&tb->nonresum_kernel_buf_pa,
				tb->nonresum_qmask);
976 977 978 979 980 981 982 983 984
	}
}

static void __init init_send_mondo_info(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct trap_per_cpu *tb = &trap_block[cpu];
985

986
		init_cpu_send_mondo_info(tb);
987
	}
988 989
}

990 991 992 993
static struct irqaction timer_irq_action = {
	.name = "timer",
};

L
Linus Torvalds 已提交
994 995 996
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
997 998
	unsigned long size;

L
Linus Torvalds 已提交
999 1000 1001
	map_prom_timers();
	kill_prom_timer();

1002
	size = sizeof(struct ino_bucket) * NUM_IVECS;
1003
	ivector_table = kzalloc(size, GFP_KERNEL);
1004 1005 1006 1007
	if (!ivector_table) {
		prom_printf("Fatal error, cannot allocate ivector_table\n");
		prom_halt();
	}
1008 1009
	__flush_dcache_range((unsigned long) ivector_table,
			     ((unsigned long) ivector_table) + size);
1010 1011

	ivector_table_pa = __pa(ivector_table);
1012

1013
	if (tlb_type == hypervisor)
1014
		sun4v_init_mondo_queues();
1015

1016 1017 1018 1019 1020 1021 1022
	init_send_mondo_info();

	if (tlb_type == hypervisor) {
		/* Load up the boot cpu's entries.  */
		sun4v_register_mondo_queues(hard_smp_processor_id());
	}

L
Linus Torvalds 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

1041
	irq_desc[0].action = &timer_irq_action;
L
Linus Torvalds 已提交
1042
}