xics.c 21.2 KB
Newer Older
1 2
/*
 * arch/powerpc/platforms/pseries/xics.c
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10
 *
 * Copyright 2000 IBM Corporation.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
11

L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/radix-tree.h>
#include <linux/cpu.h>
21
#include <linux/msi.h>
M
Milton Miller 已提交
22
#include <linux/of.h>
23
#include <linux/percpu.h>
24

25
#include <asm/firmware.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>

33
#include "xics.h"
34
#include "plpar_wrappers.h"
35

36 37
static struct irq_host *xics_host;

L
Linus Torvalds 已提交
38 39 40 41 42 43
#define XICS_IPI		2
#define XICS_IRQ_SPURIOUS	0

/* Want a priority other than 0.  Various HW issues require this. */
#define	DEFAULT_PRIORITY	5

44
/*
L
Linus Torvalds 已提交
45
 * Mark IPIs as higher priority so we can take them inside interrupts that
46
 * arent marked IRQF_DISABLED
L
Linus Torvalds 已提交
47 48 49
 */
#define IPI_PRIORITY		4

50 51 52 53 54 55
/* The least favored priority */
#define LOWEST_PRIORITY		0xFF

/* The number of priorities defined above */
#define MAX_NUM_PRIORITIES	3

56 57 58 59 60 61 62 63 64 65
static unsigned int default_server = 0xFF;
static unsigned int default_distrib_server = 0;
static unsigned int interrupt_server_size = 8;

/* RTAS service tokens */
static int ibm_get_xive;
static int ibm_set_xive;
static int ibm_int_on;
static int ibm_int_off;

66 67 68 69 70 71
struct xics_cppr {
	unsigned char stack[MAX_NUM_PRIORITIES];
	int index;
};

static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
72 73 74 75

/* Direct hardware low level accessors */

/* The part of the interrupt presentation layer that we care about */
L
Linus Torvalds 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
struct xics_ipl {
	union {
		u32 word;
		u8 bytes[4];
	} xirr_poll;
	union {
		u32 word;
		u8 bytes[4];
	} xirr;
	u32 dummy;
	union {
		u32 word;
		u8 bytes[4];
	} qirr;
};

static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];

94
static inline unsigned int direct_xirr_info_get(void)
L
Linus Torvalds 已提交
95
{
96 97 98
	int cpu = smp_processor_id();

	return in_be32(&xics_per_cpu[cpu]->xirr.word);
L
Linus Torvalds 已提交
99 100
}

101
static inline void direct_xirr_info_set(unsigned int value)
L
Linus Torvalds 已提交
102
{
103 104 105
	int cpu = smp_processor_id();

	out_be32(&xics_per_cpu[cpu]->xirr.word, value);
L
Linus Torvalds 已提交
106 107
}

108
static inline void direct_cppr_info(u8 value)
L
Linus Torvalds 已提交
109
{
110 111 112
	int cpu = smp_processor_id();

	out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
L
Linus Torvalds 已提交
113 114
}

115
static inline void direct_qirr_info(int n_cpu, u8 value)
L
Linus Torvalds 已提交
116 117 118 119 120
{
	out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
}


121
/* LPAR low level accessors */
L
Linus Torvalds 已提交
122

123
static inline unsigned int lpar_xirr_info_get(void)
L
Linus Torvalds 已提交
124 125
{
	unsigned long lpar_rc;
126
	unsigned long return_value;
L
Linus Torvalds 已提交
127 128

	lpar_rc = plpar_xirr(&return_value);
129
	if (lpar_rc != H_SUCCESS)
130
		panic(" bad return code xirr - rc = %lx \n", lpar_rc);
131
	return (unsigned int)return_value;
L
Linus Torvalds 已提交
132 133
}

134
static inline void lpar_xirr_info_set(unsigned int value)
L
Linus Torvalds 已提交
135 136 137
{
	unsigned long lpar_rc;

138
	lpar_rc = plpar_eoi(value);
139
	if (lpar_rc != H_SUCCESS)
140 141
		panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc,
		      value);
L
Linus Torvalds 已提交
142 143
}

144
static inline void lpar_cppr_info(u8 value)
L
Linus Torvalds 已提交
145 146 147 148
{
	unsigned long lpar_rc;

	lpar_rc = plpar_cppr(value);
149
	if (lpar_rc != H_SUCCESS)
150
		panic("bad return code cppr - rc = %lx\n", lpar_rc);
L
Linus Torvalds 已提交
151 152
}

153
static inline void lpar_qirr_info(int n_cpu , u8 value)
L
Linus Torvalds 已提交
154 155 156 157
{
	unsigned long lpar_rc;

	lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
158
	if (lpar_rc != H_SUCCESS)
159
		panic("bad return code qirr - rc = %lx\n", lpar_rc);
L
Linus Torvalds 已提交
160 161 162
}


163
/* Interface to generic irq subsystem */
L
Linus Torvalds 已提交
164 165

#ifdef CONFIG_SMP
166
static int get_irq_server(unsigned int virq, unsigned int strict_check)
L
Linus Torvalds 已提交
167
{
168
	int server;
L
Linus Torvalds 已提交
169
	/* For the moment only implement delivery to all cpus or one cpu */
170
	cpumask_t cpumask;
L
Linus Torvalds 已提交
171 172
	cpumask_t tmp = CPU_MASK_NONE;

M
Michael Ellerman 已提交
173
	cpumask_copy(&cpumask, irq_to_desc(virq)->affinity);
L
Linus Torvalds 已提交
174 175 176
	if (!distribute_irqs)
		return default_server;

177
	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
L
Linus Torvalds 已提交
178 179
		cpus_and(tmp, cpu_online_map, cpumask);

180 181 182 183 184 185 186
		server = first_cpu(tmp);

		if (server < NR_CPUS)
			return get_hard_smp_processor_id(server);

		if (strict_check)
			return -1;
L
Linus Torvalds 已提交
187 188
	}

189 190
	if (cpus_equal(cpu_online_map, cpu_present_map))
		return default_distrib_server;
L
Linus Torvalds 已提交
191

192
	return default_server;
L
Linus Torvalds 已提交
193 194
}
#else
195
static int get_irq_server(unsigned int virq, unsigned int strict_check)
L
Linus Torvalds 已提交
196 197 198 199 200
{
	return default_server;
}
#endif

201
static void xics_unmask_irq(unsigned int virq)
L
Linus Torvalds 已提交
202 203 204
{
	unsigned int irq;
	int call_status;
205
	int server;
L
Linus Torvalds 已提交
206

207
	pr_devel("xics: unmask virq %d\n", virq);
208 209

	irq = (unsigned int)irq_map[virq].hwirq;
210
	pr_devel(" -> map to hwirq 0x%x\n", irq);
211
	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
L
Linus Torvalds 已提交
212 213
		return;

214
	server = get_irq_server(virq, 0);
215

L
Linus Torvalds 已提交
216 217 218
	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
				DEFAULT_PRIORITY);
	if (call_status != 0) {
219 220 221
		printk(KERN_ERR
			"%s: ibm_set_xive irq %u server %x returned %d\n",
			__func__, irq, server, call_status);
L
Linus Torvalds 已提交
222 223 224 225 226 227
		return;
	}

	/* Now unmask the interrupt (often a no-op) */
	call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
	if (call_status != 0) {
228 229
		printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
			__func__, irq, call_status);
L
Linus Torvalds 已提交
230 231 232 233
		return;
	}
}

234 235
static unsigned int xics_startup(unsigned int virq)
{
236 237 238 239 240 241 242 243
	/*
	 * The generic MSI code returns with the interrupt disabled on the
	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
	 * at that level, so we do it here by hand.
	 */
	if (irq_to_desc(virq)->msi_desc)
		unmask_msi_irq(virq);

244 245 246 247 248
	/* unmask it */
	xics_unmask_irq(virq);
	return 0;
}

249
static void xics_mask_real_irq(unsigned int irq)
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257
{
	int call_status;

	if (irq == XICS_IPI)
		return;

	call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
	if (call_status != 0) {
258 259
		printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
			__func__, irq, call_status);
L
Linus Torvalds 已提交
260 261 262 263
		return;
	}

	/* Have to set XIVE to 0xff to be able to remove a slot */
264 265
	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
				default_server, 0xff);
L
Linus Torvalds 已提交
266
	if (call_status != 0) {
267 268
		printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
			__func__, irq, call_status);
L
Linus Torvalds 已提交
269 270 271 272
		return;
	}
}

273
static void xics_mask_irq(unsigned int virq)
L
Linus Torvalds 已提交
274 275 276
{
	unsigned int irq;

277
	pr_devel("xics: mask virq %d\n", virq);
278 279 280 281 282

	irq = (unsigned int)irq_map[virq].hwirq;
	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
		return;
	xics_mask_real_irq(irq);
283 284
}

285
static void xics_mask_unknown_vec(unsigned int vec)
L
Linus Torvalds 已提交
286
{
287 288
	printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
	xics_mask_real_irq(vec);
L
Linus Torvalds 已提交
289 290
}

291
static inline unsigned int xics_xirr_vector(unsigned int xirr)
L
Linus Torvalds 已提交
292
{
293 294 295 296 297 298 299
	/*
	 * The top byte is the old cppr, to be restored on EOI.
	 * The remaining 24 bits are the vector.
	 */
	return xirr & 0x00ffffff;
}

300 301 302 303 304 305 306 307 308 309 310 311 312
static void push_cppr(unsigned int vec)
{
	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);

	if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
		return;

	if (vec == XICS_IPI)
		os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
	else
		os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
}

313 314 315 316 317
static unsigned int xics_get_irq_direct(void)
{
	unsigned int xirr = direct_xirr_info_get();
	unsigned int vec = xics_xirr_vector(xirr);
	unsigned int irq;
L
Linus Torvalds 已提交
318

319 320
	if (vec == XICS_IRQ_SPURIOUS)
		return NO_IRQ;
321

322
	irq = irq_radix_revmap_lookup(xics_host, vec);
323 324
	if (likely(irq != NO_IRQ)) {
		push_cppr(vec);
325
		return irq;
326
	}
327

328 329
	/* We don't have a linux mapping, so have rtas mask it. */
	xics_mask_unknown_vec(vec);
L
Linus Torvalds 已提交
330

331 332 333
	/* We might learn about it later, so EOI it */
	direct_xirr_info_set(xirr);
	return NO_IRQ;
334 335
}

O
Olaf Hering 已提交
336
static unsigned int xics_get_irq_lpar(void)
L
Linus Torvalds 已提交
337
{
338 339 340 341 342 343 344 345
	unsigned int xirr = lpar_xirr_info_get();
	unsigned int vec = xics_xirr_vector(xirr);
	unsigned int irq;

	if (vec == XICS_IRQ_SPURIOUS)
		return NO_IRQ;

	irq = irq_radix_revmap_lookup(xics_host, vec);
346 347
	if (likely(irq != NO_IRQ)) {
		push_cppr(vec);
348
		return irq;
349
	}
350 351 352 353 354 355 356

	/* We don't have a linux mapping, so have RTAS mask it. */
	xics_mask_unknown_vec(vec);

	/* We might learn about it later, so EOI it */
	lpar_xirr_info_set(xirr);
	return NO_IRQ;
357 358
}

359 360 361 362 363 364 365 366 367 368
static unsigned char pop_cppr(void)
{
	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);

	if (WARN_ON(os_cppr->index < 1))
		return LOWEST_PRIORITY;

	return os_cppr->stack[--os_cppr->index];
}

369
static void xics_eoi_direct(unsigned int virq)
370
{
371
	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
372

373
	iosync();
374
	direct_xirr_info_set((pop_cppr() << 24) | irq);
375 376
}

377
static void xics_eoi_lpar(unsigned int virq)
378
{
379
	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
L
Linus Torvalds 已提交
380

381
	iosync();
382
	lpar_xirr_info_set((pop_cppr() << 24) | irq);
383 384
}

385
static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
386 387 388 389
{
	unsigned int irq;
	int status;
	int xics_status[2];
390
	int irq_server;
391

392 393
	irq = (unsigned int)irq_map[virq].hwirq;
	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
394
		return -1;
395 396 397 398

	status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);

	if (status) {
399 400
		printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
			__func__, irq, status);
401
		return -1;
402 403
	}

404 405 406 407
	/*
	 * For the moment only implement delivery to all cpus or one cpu.
	 * Get current irq_server for the given irq
	 */
408
	irq_server = get_irq_server(virq, 1);
409 410 411
	if (irq_server == -1) {
		char cpulist[128];
		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
412 413 414
		printk(KERN_WARNING
			"%s: No online cpus in the mask %s for irq %d\n",
			__func__, cpulist, virq);
415
		return -1;
416 417 418
	}

	status = rtas_call(ibm_set_xive, 3, 1, NULL,
419
				irq, irq_server, xics_status[1]);
420 421

	if (status) {
422 423
		printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
			__func__, irq, status);
424
		return -1;
425
	}
426 427

	return 0;
428 429 430
}

static struct irq_chip xics_pic_direct = {
431
	.name = " XICS     ",
432 433 434 435 436 437 438 439
	.startup = xics_startup,
	.mask = xics_mask_irq,
	.unmask = xics_unmask_irq,
	.eoi = xics_eoi_direct,
	.set_affinity = xics_set_affinity
};

static struct irq_chip xics_pic_lpar = {
440
	.name = " XICS     ",
441 442 443 444 445 446 447
	.startup = xics_startup,
	.mask = xics_mask_irq,
	.unmask = xics_unmask_irq,
	.eoi = xics_eoi_lpar,
	.set_affinity = xics_set_affinity
};

448 449 450

/* Interface to arch irq controller subsystem layer */

451 452
/* Points to the irq_chip we're actually using */
static struct irq_chip *xics_irq_chip;
453

454
static int xics_host_match(struct irq_host *h, struct device_node *node)
L
Linus Torvalds 已提交
455
{
456 457 458 459
	/* IBM machines have interrupt parents of various funky types for things
	 * like vdevices, events, etc... The trick we use here is to match
	 * everything here except the legacy 8259 which is compatible "chrp,iic"
	 */
460
	return !of_device_is_compatible(node, "chrp,iic");
461
}
L
Linus Torvalds 已提交
462

463 464
static int xics_host_map(struct irq_host *h, unsigned int virq,
			 irq_hw_number_t hw)
465
{
466
	pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
467

468 469 470
	/* Insert the interrupt mapping into the radix tree for fast lookup */
	irq_radix_revmap_insert(xics_host, virq, hw);

M
Michael Ellerman 已提交
471
	irq_to_desc(virq)->status |= IRQ_LEVEL;
472
	set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
473 474 475 476
	return 0;
}

static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
477
			   const u32 *intspec, unsigned int intsize,
478 479 480 481 482 483
			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)

{
	/* Current xics implementation translates everything
	 * to level. It is not technically right for MSIs but this
	 * is irrelevant at this point. We might get smarter in the future
484
	 */
485 486 487 488 489 490
	*out_hwirq = intspec[0];
	*out_flags = IRQ_TYPE_LEVEL_LOW;

	return 0;
}

491
static struct irq_host_ops xics_host_ops = {
492
	.match = xics_host_match,
493
	.map = xics_host_map,
494 495 496 497 498 499
	.xlate = xics_host_xlate,
};

static void __init xics_init_host(void)
{
	if (firmware_has_feature(FW_FEATURE_LPAR))
500
		xics_irq_chip = &xics_pic_lpar;
501
	else
502 503 504
		xics_irq_chip = &xics_pic_direct;

	xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
505 506 507
				   XICS_IRQ_SPURIOUS);
	BUG_ON(xics_host == NULL);
	irq_set_default_host(xics_host);
508
}
L
Linus Torvalds 已提交
509

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552

/* Inter-processor interrupt support */

#ifdef CONFIG_SMP
/*
 * XICS only has a single IPI, so encode the messages per CPU
 */
struct xics_ipi_struct {
        unsigned long value;
	} ____cacheline_aligned;

static struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;

static inline void smp_xics_do_message(int cpu, int msg)
{
	set_bit(msg, &xics_ipi_message[cpu].value);
	mb();
	if (firmware_has_feature(FW_FEATURE_LPAR))
		lpar_qirr_info(cpu, IPI_PRIORITY);
	else
		direct_qirr_info(cpu, IPI_PRIORITY);
}

void smp_xics_message_pass(int target, int msg)
{
	unsigned int i;

	if (target < NR_CPUS) {
		smp_xics_do_message(target, msg);
	} else {
		for_each_online_cpu(i) {
			if (target == MSG_ALL_BUT_SELF
			    && i == smp_processor_id())
				continue;
			smp_xics_do_message(i, msg);
		}
	}
}

static irqreturn_t xics_ipi_dispatch(int cpu)
{
	WARN_ON(cpu_is_offline(cpu));

553
	mb();	/* order mmio clearing qirr */
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	while (xics_ipi_message[cpu].value) {
		if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
				       &xics_ipi_message[cpu].value)) {
			smp_message_recv(PPC_MSG_CALL_FUNCTION);
		}
		if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
				       &xics_ipi_message[cpu].value)) {
			smp_message_recv(PPC_MSG_RESCHEDULE);
		}
		if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
				       &xics_ipi_message[cpu].value)) {
			smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
		}
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
		if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
				       &xics_ipi_message[cpu].value)) {
			smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
		}
#endif
	}
	return IRQ_HANDLED;
}

static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
{
	int cpu = smp_processor_id();

	direct_qirr_info(cpu, 0xff);

	return xics_ipi_dispatch(cpu);
}

static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
{
	int cpu = smp_processor_id();

	lpar_qirr_info(cpu, 0xff);

	return xics_ipi_dispatch(cpu);
}

static void xics_request_ipi(void)
{
	unsigned int ipi;
	int rc;

	ipi = irq_create_mapping(xics_host, XICS_IPI);
	BUG_ON(ipi == NO_IRQ);

	/*
	 * IPIs are marked IRQF_DISABLED as they must run with irqs
	 * disabled
	 */
	set_irq_handler(ipi, handle_percpu_irq);
	if (firmware_has_feature(FW_FEATURE_LPAR))
609 610
		rc = request_irq(ipi, xics_ipi_action_lpar,
				IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
611
	else
612 613
		rc = request_irq(ipi, xics_ipi_action_direct,
				IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	BUG_ON(rc);
}

int __init smp_xics_probe(void)
{
	xics_request_ipi();

	return cpus_weight(cpu_possible_map);
}

#endif /* CONFIG_SMP */


/* Initialization */

static void xics_update_irq_servers(void)
{
	int i, j;
	struct device_node *np;
	u32 ilen;
634
	const u32 *ireg;
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
	u32 hcpuid;

	/* Find the server numbers for the boot cpu. */
	np = of_get_cpu_node(boot_cpuid, NULL);
	BUG_ON(!np);

	ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
	if (!ireg) {
		of_node_put(np);
		return;
	}

	i = ilen / sizeof(int);
	hcpuid = get_hard_smp_processor_id(boot_cpuid);

	/* Global interrupt distribution server is specified in the last
	 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
	 * entry fom this property for current boot cpu id and use it as
	 * default distribution server
	 */
	for (j = 0; j < i; j += 2) {
		if (ireg[j] == hcpuid) {
			default_server = hcpuid;
			default_distrib_server = ireg[j+1];
		}
	}

	of_node_put(np);
}

665 666
static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
				     unsigned long size)
L
Linus Torvalds 已提交
667 668 669
{
	int i;

670 671 672 673 674 675 676 677 678 679 680 681
	/* This may look gross but it's good enough for now, we don't quite
	 * have a hard -> linux processor id matching.
	 */
	for_each_possible_cpu(i) {
		if (!cpu_present(i))
			continue;
		if (hw_id == get_hard_smp_processor_id(i)) {
			xics_per_cpu[i] = ioremap(addr, size);
			return;
		}
	}
}
L
Linus Torvalds 已提交
682

683 684 685 686
static void __init xics_init_one_node(struct device_node *np,
				      unsigned int *indx)
{
	unsigned int ilen;
687
	const u32 *ireg;
L
Linus Torvalds 已提交
688

689 690 691 692 693
	/* This code does the theorically broken assumption that the interrupt
	 * server numbers are the same as the hard CPU numbers.
	 * This happens to be the case so far but we are playing with fire...
	 * should be fixed one of these days. -BenH.
	 */
694
	ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
L
Linus Torvalds 已提交
695

696 697 698 699
	/* Do that ever happen ? we'll know soon enough... but even good'old
	 * f80 does have that property ..
	 */
	WARN_ON(ireg == NULL);
L
Linus Torvalds 已提交
700 701 702 703
	if (ireg) {
		/*
		 * set node starting index for this node
		 */
704
		*indx = *ireg;
L
Linus Torvalds 已提交
705
	}
706
	ireg = of_get_property(np, "reg", &ilen);
L
Linus Torvalds 已提交
707 708
	if (!ireg)
		panic("xics_init_IRQ: can't find interrupt reg property");
709

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
	while (ilen >= (4 * sizeof(u32))) {
		unsigned long addr, size;

		/* XXX Use proper OF parsing code here !!! */
		addr = (unsigned long)*ireg++ << 32;
		ilen -= sizeof(u32);
		addr |= *ireg++;
		ilen -= sizeof(u32);
		size = (unsigned long)*ireg++ << 32;
		ilen -= sizeof(u32);
		size |= *ireg++;
		ilen -= sizeof(u32);
		xics_map_one_cpu(*indx, addr, size);
		(*indx)++;
	}
}

void __init xics_init_IRQ(void)
{
	struct device_node *np;
730
	u32 indx = 0;
731
	int found = 0;
732
	const u32 *isize;
733 734 735 736 737 738 739 740 741 742

	ppc64_boot_msg(0x20, "XICS Init");

	ibm_get_xive = rtas_token("ibm,get-xive");
	ibm_set_xive = rtas_token("ibm,set-xive");
	ibm_int_on  = rtas_token("ibm,int-on");
	ibm_int_off = rtas_token("ibm,int-off");

	for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
		found = 1;
743 744
		if (firmware_has_feature(FW_FEATURE_LPAR)) {
			of_node_put(np);
745
			break;
746
			}
747 748 749 750 751
		xics_init_one_node(np, &indx);
	}
	if (found == 0)
		return;

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	/* get the bit size of server numbers */
	found = 0;

	for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
		isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);

		if (!isize)
			continue;

		if (!found) {
			interrupt_server_size = *isize;
			found = 1;
		} else if (*isize != interrupt_server_size) {
			printk(KERN_WARNING "XICS: "
			       "mismatched ibm,interrupt-server#-size\n");
			interrupt_server_size = max(*isize,
						    interrupt_server_size);
		}
	}

772
	xics_update_irq_servers();
773
	xics_init_host();
L
Linus Torvalds 已提交
774

775 776 777
	if (firmware_has_feature(FW_FEATURE_LPAR))
		ppc_md.get_irq = xics_get_irq_lpar;
	else
778
		ppc_md.get_irq = xics_get_irq_direct;
L
Linus Torvalds 已提交
779

780
	xics_setup_cpu();
L
Linus Torvalds 已提交
781

782
	ppc64_boot_msg(0x21, "XICS Done");
L
Linus Torvalds 已提交
783
}
784

785
/* Cpu startup, shutdown, and hotplug */
L
Linus Torvalds 已提交
786

787
static void xics_set_cpu_priority(unsigned char cppr)
L
Linus Torvalds 已提交
788
{
789 790 791 792 793 794
	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);

	BUG_ON(os_cppr->index != 0);

	os_cppr->stack[os_cppr->index] = cppr;

795
	if (firmware_has_feature(FW_FEATURE_LPAR))
796
		lpar_cppr_info(cppr);
797
	else
798 799
		direct_cppr_info(cppr);
	iosync();
L
Linus Torvalds 已提交
800
}
801

802 803 804
/* Have the calling processor join or leave the specified global queue */
static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
{
805 806 807 808 809 810 811 812 813 814 815 816
	int index;
	int status;

	if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
		return;

	index = (1UL << interrupt_server_size) - 1 - gserver;

	status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);

	WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
	     GLOBAL_INTERRUPT_QUEUE, index, join, status);
817
}
818 819

void xics_setup_cpu(void)
820
{
821
	xics_set_cpu_priority(LOWEST_PRIORITY);
822

823
	xics_set_cpu_giq(default_distrib_server, 1);
824 825
}

A
Al Viro 已提交
826
void xics_teardown_cpu(void)
827 828 829
{
	int cpu = smp_processor_id();

830
	xics_set_cpu_priority(0);
831

832
	/* Clear any pending IPI request */
833 834 835 836
	if (firmware_has_feature(FW_FEATURE_LPAR))
		lpar_qirr_info(cpu, 0xff);
	else
		direct_qirr_info(cpu, 0xff);
837 838 839 840 841
}

void xics_kexec_teardown_cpu(int secondary)
{
	xics_teardown_cpu();
842

843
	/*
844 845
	 * we take the ipi irq but and never return so we
	 * need to EOI the IPI, but want to leave our priority 0
846
	 *
847
	 * should we check all the other interrupts too?
848 849 850
	 * should we be flagging idle loop instead?
	 * or creating some task to be scheduled?
	 */
851

852 853 854 855
	if (firmware_has_feature(FW_FEATURE_LPAR))
		lpar_xirr_info_set((0x00 << 24) | XICS_IPI);
	else
		direct_xirr_info_set((0x00 << 24) | XICS_IPI);
856

857
	/*
858 859
	 * Some machines need to have at least one cpu in the GIQ,
	 * so leave the master cpu in the group.
860
	 */
861
	if (secondary)
862
		xics_set_cpu_giq(default_distrib_server, 0);
863 864
}

L
Linus Torvalds 已提交
865 866 867 868 869
#ifdef CONFIG_HOTPLUG_CPU

/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
870 871
	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
	unsigned int irq, virq;
L
Linus Torvalds 已提交
872

873 874 875 876
	/* If we used to be the default server, move to the new "boot_cpuid" */
	if (hw_cpu == default_server)
		xics_update_irq_servers();

L
Linus Torvalds 已提交
877
	/* Reject any interrupt that was queued to us... */
878
	xics_set_cpu_priority(0);
L
Linus Torvalds 已提交
879

880 881
	/* Remove ourselves from the global interrupt queue */
	xics_set_cpu_giq(default_distrib_server, 0);
L
Linus Torvalds 已提交
882 883

	/* Allow IPIs again... */
884
	xics_set_cpu_priority(DEFAULT_PRIORITY);
L
Linus Torvalds 已提交
885 886

	for_each_irq(virq) {
887
		struct irq_desc *desc;
L
Linus Torvalds 已提交
888
		int xics_status[2];
889
		int status;
L
Linus Torvalds 已提交
890 891 892
		unsigned long flags;

		/* We cant set affinity on ISA interrupts */
893
		if (virq < NUM_ISA_INTERRUPTS)
L
Linus Torvalds 已提交
894
			continue;
895 896 897
		if (irq_map[virq].host != xics_host)
			continue;
		irq = (unsigned int)irq_map[virq].hwirq;
L
Linus Torvalds 已提交
898
		/* We need to get IPIs still. */
899
		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
L
Linus Torvalds 已提交
900
			continue;
M
Michael Ellerman 已提交
901
		desc = irq_to_desc(virq);
L
Linus Torvalds 已提交
902 903

		/* We only need to migrate enabled IRQS */
904
		if (desc == NULL || desc->chip == NULL
L
Linus Torvalds 已提交
905
		    || desc->action == NULL
906
		    || desc->chip->set_affinity == NULL)
L
Linus Torvalds 已提交
907 908 909 910 911 912
			continue;

		spin_lock_irqsave(&desc->lock, flags);

		status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
		if (status) {
913 914
			printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
					__func__, irq, status);
L
Linus Torvalds 已提交
915 916 917 918 919 920 921 922
			goto unlock;
		}

		/*
		 * We only support delivery to all cpus or to one cpu.
		 * The irq has to be migrated only in the single cpu
		 * case.
		 */
923
		if (xics_status[0] != hw_cpu)
L
Linus Torvalds 已提交
924 925
			goto unlock;

A
Anton Blanchard 已提交
926
		printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
L
Linus Torvalds 已提交
927 928 929
		       virq, cpu);

		/* Reset affinity to all cpus */
M
Michael Ellerman 已提交
930
		cpumask_setall(irq_to_desc(virq)->affinity);
931
		desc->chip->set_affinity(virq, cpu_all_mask);
L
Linus Torvalds 已提交
932 933 934 935 936
unlock:
		spin_unlock_irqrestore(&desc->lock, flags);
	}
}
#endif