io_apic.c 99.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *	Intel IO-APIC support for multi-Pentium hosts.
 *
I
Ingo Molnar 已提交
4
 *	Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 *	Many thanks to Stig Venaas for trying out countless experimental
 *	patches and reporting/debugging problems patiently!
 *
 *	(c) 1999, Multiple IO-APIC support, developed by
 *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
 *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
 *	further tested and cleaned up by Zach Brown <zab@redhat.com>
 *	and Ingo Molnar <mingo@redhat.com>
 *
 *	Fixes
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
 *					thanks to Eric Gilmore
 *					and Rolf G. Tews
 *					for testing these extensively
 *	Paul Diefenbaugh	:	Added full ACPI support
 */

#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
28
#include <linux/pci.h>
L
Linus Torvalds 已提交
29 30 31
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
32
#include <linux/module.h>
L
Linus Torvalds 已提交
33
#include <linux/sysdev.h>
34
#include <linux/msi.h>
35
#include <linux/htirq.h>
36
#include <linux/freezer.h>
37
#include <linux/kthread.h>
38
#include <linux/jiffies.h>	/* time_after() */
39
#include <linux/slab.h>
40 41 42 43 44
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
#include <linux/bootmem.h>
#include <linux/dmar.h>
45
#include <linux/hpet.h>
46

47
#include <asm/idle.h>
L
Linus Torvalds 已提交
48 49
#include <asm/io.h>
#include <asm/smp.h>
50
#include <asm/cpu.h>
L
Linus Torvalds 已提交
51
#include <asm/desc.h>
52 53 54
#include <asm/proto.h>
#include <asm/acpi.h>
#include <asm/dma.h>
L
Linus Torvalds 已提交
55
#include <asm/timer.h>
56
#include <asm/i8259.h>
57
#include <asm/nmi.h>
58
#include <asm/msidef.h>
59
#include <asm/hypertransport.h>
60
#include <asm/setup.h>
61
#include <asm/irq_remapping.h>
62
#include <asm/hpet.h>
63
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
64

I
Ingo Molnar 已提交
65
#include <asm/apic.h>
L
Linus Torvalds 已提交
66

67
#define __apicdebuginit(type) static type __init
68 69
#define for_each_irq_pin(entry, head) \
	for (entry = head; entry; entry = entry->next)
70

L
Linus Torvalds 已提交
71
/*
72 73
 *      Is the SiS APIC rmw bug present ?
 *      -1 = don't know, 0 = no, 1 = yes
L
Linus Torvalds 已提交
74 75 76
 */
int sis_apic_bug = -1;

77 78
static DEFINE_RAW_SPINLOCK(ioapic_lock);
static DEFINE_RAW_SPINLOCK(vector_lock);
Y
Yinghai Lu 已提交
79

L
Linus Torvalds 已提交
80 81 82 83 84
/*
 * # of IRQ routing registers
 */
int nr_ioapic_registers[MAX_IO_APICS];

85
/* I/O APIC entries */
86
struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
87 88
int nr_ioapics;

89 90 91
/* IO APIC gsi routing info */
struct mp_ioapic_gsi  mp_gsi_routing[MAX_IO_APICS];

92 93
/* The one past the highest gsi number used */
u32 gsi_top;
94

95
/* MP IRQ source entries */
96
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
97 98 99 100

/* # of MP IRQ source entries */
int mp_irq_entries;

101 102 103
/* GSI interrupts */
static int nr_irqs_gsi = NR_IRQS_LEGACY;

104 105 106 107 108 109
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
int mp_bus_id_to_type[MAX_MP_BUSSES];
#endif

DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);

Y
Yinghai Lu 已提交
110 111
int skip_ioapic_setup;

112 113 114 115 116 117 118 119 120
void arch_disable_smp_support(void)
{
#ifdef CONFIG_PCI
	noioapicquirk = 1;
	noioapicreroute = -1;
#endif
	skip_ioapic_setup = 1;
}

121
static int __init parse_noapic(char *str)
Y
Yinghai Lu 已提交
122 123
{
	/* disable IO-APIC */
124
	arch_disable_smp_support();
Y
Yinghai Lu 已提交
125 126 127
	return 0;
}
early_param("noapic", parse_noapic);
128

129 130 131 132 133
struct irq_pin_list {
	int apic, pin;
	struct irq_pin_list *next;
};

T
Thomas Gleixner 已提交
134
static struct irq_pin_list *alloc_irq_pin_list(int node)
135
{
T
Thomas Gleixner 已提交
136
	return kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
137 138
}

Y
Yinghai Lu 已提交
139
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
140
#ifdef CONFIG_SPARSE_IRQ
141
static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
142
#else
143
static struct irq_cfg irq_cfgx[NR_IRQS];
144
#endif
Y
Yinghai Lu 已提交
145

146
int __init arch_early_irq_init(void)
147
{
148
	struct irq_cfg *cfg;
149
	int count, node, i;
T
Thomas Gleixner 已提交
150

151 152 153 154 155
	if (!legacy_pic->nr_legacy_irqs) {
		nr_irqs_gsi = 0;
		io_apic_irqs = ~0UL;
	}

156 157
	cfg = irq_cfgx;
	count = ARRAY_SIZE(irq_cfgx);
158
	node = cpu_to_node(0);
159

160 161 162
	/* Make sure the legacy interrupts are marked in the bitmap */
	irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);

163
	for (i = 0; i < count; i++) {
164
		set_irq_chip_data(i, &cfg[i]);
165 166
		zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
		zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
167 168 169 170
		/*
		 * For legacy IRQ's, start with assigning irq0 to irq15 to
		 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
		 */
171
		if (i < legacy_pic->nr_legacy_irqs) {
172 173 174
			cfg[i].vector = IRQ0_VECTOR + i;
			cpumask_set_cpu(0, cfg[i].domain);
		}
175
	}
176 177

	return 0;
178
}
179

180
#ifdef CONFIG_SPARSE_IRQ
181
static struct irq_cfg *irq_cfg(unsigned int irq)
182
{
183
	return get_irq_chip_data(irq);
184
}
T
Thomas Gleixner 已提交
185

186
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
187
{
188
	struct irq_cfg *cfg;
189

190
	cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
191 192 193 194 195 196
	if (!cfg)
		return NULL;
	if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node))
		goto out_cfg;
	if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_ATOMIC, node))
		goto out_domain;
197
	return cfg;
198 199 200 201 202
out_domain:
	free_cpumask_var(cfg->domain);
out_cfg:
	kfree(cfg);
	return NULL;
203 204
}

205
static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
206
{
207 208 209
	if (!cfg)
		return;
	set_irq_chip_data(at, NULL);
210 211 212 213 214
	free_cpumask_var(cfg->domain);
	free_cpumask_var(cfg->old_domain);
	kfree(cfg);
}

215
#else
216

217
struct irq_cfg *irq_cfg(unsigned int irq)
218 219
{
	return irq < nr_irqs ? irq_cfgx + irq : NULL;
220
}
L
Linus Torvalds 已提交
221

222
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
223 224 225 226
{
	return irq_cfgx + irq;
}

227
static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
228

229 230
#endif

231 232 233 234 235 236 237 238 239 240 241 242 243
static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
{
	int res = irq_alloc_desc_at(at, node);
	struct irq_cfg *cfg;

	if (res < 0) {
		if (res != -EEXIST)
			return NULL;
		cfg = get_irq_chip_data(at);
		if (cfg)
			return cfg;
	}

244
	cfg = alloc_irq_cfg(at, node);
245 246 247 248 249 250 251 252 253 254 255 256 257 258
	if (cfg)
		set_irq_chip_data(at, cfg);
	else
		irq_free_desc(at);
	return cfg;
}

static int alloc_irq_from(unsigned int from, int node)
{
	return irq_alloc_desc_from(from, node);
}

static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
{
259
	free_irq_cfg(at, cfg);
260 261 262
	irq_free_desc(at);
}

L
Linus Torvalds 已提交
263 264 265 266
struct io_apic {
	unsigned int index;
	unsigned int unused[3];
	unsigned int data;
267 268
	unsigned int unused2[11];
	unsigned int eoi;
L
Linus Torvalds 已提交
269 270 271 272 273
};

static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{
	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
274
		+ (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
L
Linus Torvalds 已提交
275 276
}

277 278 279 280 281 282
static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(vector, &io_apic->eoi);
}

L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(reg, &io_apic->index);
	return readl(&io_apic->data);
}

static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(reg, &io_apic->index);
	writel(value, &io_apic->data);
}

/*
 * Re-write a value: to be used for read-modify-write
 * cycles where the read already set up the index register.
 *
 * Older SiS APIC requires we rewrite the index register
 */
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
305
	struct io_apic __iomem *io_apic = io_apic_base(apic);
T
Thomas Gleixner 已提交
306 307 308

	if (sis_apic_bug)
		writel(reg, &io_apic->index);
L
Linus Torvalds 已提交
309 310 311
	writel(value, &io_apic->data);
}

Y
Yinghai Lu 已提交
312
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
313 314 315 316
{
	struct irq_pin_list *entry;
	unsigned long flags;

317
	raw_spin_lock_irqsave(&ioapic_lock, flags);
318
	for_each_irq_pin(entry, cfg->irq_2_pin) {
319 320 321 322 323 324 325
		unsigned int reg;
		int pin;

		pin = entry->pin;
		reg = io_apic_read(entry->apic, 0x10 + pin*2);
		/* Is the remote IRR bit set? */
		if (reg & IO_APIC_REDIR_REMOTE_IRR) {
326
			raw_spin_unlock_irqrestore(&ioapic_lock, flags);
327 328 329
			return true;
		}
	}
330
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
331 332 333 334

	return false;
}

335 336 337 338 339 340 341 342 343
union entry_union {
	struct { u32 w1, w2; };
	struct IO_APIC_route_entry entry;
};

static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
	union entry_union eu;
	unsigned long flags;
344
	raw_spin_lock_irqsave(&ioapic_lock, flags);
345 346
	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
347
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
348 349 350
	return eu.entry;
}

351 352 353 354 355 356
/*
 * When we write a new IO APIC routing entry, we need to write the high
 * word first! If the mask bit in the low word is clear, we will enable
 * the interrupt, and we need to make sure the entry is fully populated
 * before that happens.
 */
357 358
static void
__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
359
{
360 361
	union entry_union eu = {{0, 0}};

362
	eu.entry = e;
363 364
	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
365 366
}

367
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
368 369
{
	unsigned long flags;
370
	raw_spin_lock_irqsave(&ioapic_lock, flags);
371
	__ioapic_write_entry(apic, pin, e);
372
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
373 374 375 376 377 378 379 380 381 382 383 384
}

/*
 * When we mask an IO APIC routing entry, we need to write the low
 * word first, in order to set the mask bit before we change the
 * high bits!
 */
static void ioapic_mask_entry(int apic, int pin)
{
	unsigned long flags;
	union entry_union eu = { .entry.mask = 1 };

385
	raw_spin_lock_irqsave(&ioapic_lock, flags);
386 387
	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
388
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
389 390
}

L
Linus Torvalds 已提交
391 392 393 394 395
/*
 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
 * shared ISA-space IRQs, so we have to support them. We are super
 * fast in the common case, and fast for shared ISA-space IRQs.
 */
396
static int
T
Thomas Gleixner 已提交
397
__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
L
Linus Torvalds 已提交
398
{
399
	struct irq_pin_list **last, *entry;
400

401 402 403
	/* don't allow duplicates */
	last = &cfg->irq_2_pin;
	for_each_irq_pin(entry, cfg->irq_2_pin) {
404
		if (entry->apic == apic && entry->pin == pin)
405
			return 0;
406
		last = &entry->next;
L
Linus Torvalds 已提交
407
	}
408

T
Thomas Gleixner 已提交
409
	entry = alloc_irq_pin_list(node);
410
	if (!entry) {
411 412 413
		printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
				node, apic, pin);
		return -ENOMEM;
414
	}
L
Linus Torvalds 已提交
415 416
	entry->apic = apic;
	entry->pin = pin;
417

418
	*last = entry;
419 420 421 422 423
	return 0;
}

static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
{
T
Thomas Gleixner 已提交
424
	if (__add_pin_to_irq_node(cfg, node, apic, pin))
425
		panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
L
Linus Torvalds 已提交
426 427 428 429 430
}

/*
 * Reroute an IRQ to a different pin.
 */
431
static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
432 433
					   int oldapic, int oldpin,
					   int newapic, int newpin)
L
Linus Torvalds 已提交
434
{
435
	struct irq_pin_list *entry;
L
Linus Torvalds 已提交
436

437
	for_each_irq_pin(entry, cfg->irq_2_pin) {
L
Linus Torvalds 已提交
438 439 440
		if (entry->apic == oldapic && entry->pin == oldpin) {
			entry->apic = newapic;
			entry->pin = newpin;
441
			/* every one is different, right? */
442
			return;
443
		}
L
Linus Torvalds 已提交
444
	}
445

446 447
	/* old apic/pin didn't exist, so just add new ones */
	add_pin_to_irq_node(cfg, node, newapic, newpin);
L
Linus Torvalds 已提交
448 449
}

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static void __io_apic_modify_irq(struct irq_pin_list *entry,
				 int mask_and, int mask_or,
				 void (*final)(struct irq_pin_list *entry))
{
	unsigned int reg, pin;

	pin = entry->pin;
	reg = io_apic_read(entry->apic, 0x10 + pin * 2);
	reg &= mask_and;
	reg |= mask_or;
	io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
	if (final)
		final(entry);
}

465 466 467
static void io_apic_modify_irq(struct irq_cfg *cfg,
			       int mask_and, int mask_or,
			       void (*final)(struct irq_pin_list *entry))
468 469
{
	struct irq_pin_list *entry;
470

471 472 473 474 475 476 477 478 479 480 481 482 483 484
	for_each_irq_pin(entry, cfg->irq_2_pin)
		__io_apic_modify_irq(entry, mask_and, mask_or, final);
}

static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
{
	__io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
			     IO_APIC_REDIR_MASKED, NULL);
}

static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
{
	__io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
			     IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
485
}
486

487
static void io_apic_sync(struct irq_pin_list *entry)
L
Linus Torvalds 已提交
488
{
489 490 491 492 493 494
	/*
	 * Synchronize the IO-APIC and the CPU by doing
	 * a dummy read from the IO-APIC
	 */
	struct io_apic __iomem *io_apic;
	io_apic = io_apic_base(entry->apic);
Y
Yinghai Lu 已提交
495
	readl(&io_apic->data);
L
Linus Torvalds 已提交
496 497
}

T
Thomas Gleixner 已提交
498
static void mask_ioapic(struct irq_cfg *cfg)
499
{
T
Thomas Gleixner 已提交
500 501 502
	unsigned long flags;

	raw_spin_lock_irqsave(&ioapic_lock, flags);
Y
Yinghai Lu 已提交
503
	io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
T
Thomas Gleixner 已提交
504
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
505
}
L
Linus Torvalds 已提交
506

507
static void mask_ioapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
508
{
509
	mask_ioapic(data->chip_data);
T
Thomas Gleixner 已提交
510
}
Y
Yinghai Lu 已提交
511

T
Thomas Gleixner 已提交
512 513 514
static void __unmask_ioapic(struct irq_cfg *cfg)
{
	io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
L
Linus Torvalds 已提交
515 516
}

T
Thomas Gleixner 已提交
517
static void unmask_ioapic(struct irq_cfg *cfg)
L
Linus Torvalds 已提交
518 519 520
{
	unsigned long flags;

521
	raw_spin_lock_irqsave(&ioapic_lock, flags);
T
Thomas Gleixner 已提交
522
	__unmask_ioapic(cfg);
523
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
524 525
}

526
static void unmask_ioapic_irq(struct irq_data *data)
Y
Yinghai Lu 已提交
527
{
528
	unmask_ioapic(data->chip_data);
Y
Yinghai Lu 已提交
529 530
}

L
Linus Torvalds 已提交
531 532 533
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
{
	struct IO_APIC_route_entry entry;
534

L
Linus Torvalds 已提交
535
	/* Check delivery_mode to be sure we're not clearing an SMI pin */
536
	entry = ioapic_read_entry(apic, pin);
L
Linus Torvalds 已提交
537 538 539 540 541
	if (entry.delivery_mode == dest_SMI)
		return;
	/*
	 * Disable it in the IO-APIC irq-routing table:
	 */
542
	ioapic_mask_entry(apic, pin);
L
Linus Torvalds 已提交
543 544
}

545
static void clear_IO_APIC (void)
L
Linus Torvalds 已提交
546 547 548 549 550 551 552 553
{
	int apic, pin;

	for (apic = 0; apic < nr_ioapics; apic++)
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
			clear_IO_APIC_pin(apic, pin);
}

554
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
555 556 557 558 559 560
/*
 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
 * specific CPU-side IRQs.
 */

#define MAX_PIRQS 8
Y
Yinghai Lu 已提交
561 562 563
static int pirq_entries[MAX_PIRQS] = {
	[0 ... MAX_PIRQS - 1] = -1
};
L
Linus Torvalds 已提交
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589

static int __init ioapic_pirq_setup(char *str)
{
	int i, max;
	int ints[MAX_PIRQS+1];

	get_options(str, ARRAY_SIZE(ints), ints);

	apic_printk(APIC_VERBOSE, KERN_INFO
			"PIRQ redirection, working around broken MP-BIOS.\n");
	max = MAX_PIRQS;
	if (ints[0] < MAX_PIRQS)
		max = ints[0];

	for (i = 0; i < max; i++) {
		apic_printk(APIC_VERBOSE, KERN_DEBUG
				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
		/*
		 * PIRQs are mapped upside down, usually.
		 */
		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
	}
	return 1;
}

__setup("pirq=", ioapic_pirq_setup);
590 591
#endif /* CONFIG_X86_32 */

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{
	int apic;
	struct IO_APIC_route_entry **ioapic_entries;

	ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
				GFP_ATOMIC);
	if (!ioapic_entries)
		return 0;

	for (apic = 0; apic < nr_ioapics; apic++) {
		ioapic_entries[apic] =
			kzalloc(sizeof(struct IO_APIC_route_entry) *
				nr_ioapic_registers[apic], GFP_ATOMIC);
		if (!ioapic_entries[apic])
			goto nomem;
	}

	return ioapic_entries;

nomem:
	while (--apic >= 0)
		kfree(ioapic_entries[apic]);
	kfree(ioapic_entries);

	return 0;
}
619 620

/*
621
 * Saves all the IO-APIC RTE's
622
 */
623
int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
624 625 626
{
	int apic, pin;

627 628
	if (!ioapic_entries)
		return -ENOMEM;
629 630

	for (apic = 0; apic < nr_ioapics; apic++) {
631 632
		if (!ioapic_entries[apic])
			return -ENOMEM;
633

634
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
635
			ioapic_entries[apic][pin] =
636
				ioapic_read_entry(apic, pin);
637
	}
638

639 640 641
	return 0;
}

642 643 644 645
/*
 * Mask all IO APIC entries.
 */
void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
646 647 648
{
	int apic, pin;

649 650 651
	if (!ioapic_entries)
		return;

652
	for (apic = 0; apic < nr_ioapics; apic++) {
653
		if (!ioapic_entries[apic])
654
			break;
655

656 657 658
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
			struct IO_APIC_route_entry entry;

659
			entry = ioapic_entries[apic][pin];
660 661 662 663 664 665 666 667
			if (!entry.mask) {
				entry.mask = 1;
				ioapic_write_entry(apic, pin, entry);
			}
		}
	}
}

668 669 670 671
/*
 * Restore IO APIC entries which was saved in ioapic_entries.
 */
int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
672 673 674
{
	int apic, pin;

675 676 677
	if (!ioapic_entries)
		return -ENOMEM;

678
	for (apic = 0; apic < nr_ioapics; apic++) {
679 680 681
		if (!ioapic_entries[apic])
			return -ENOMEM;

682 683
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
			ioapic_write_entry(apic, pin,
684
					ioapic_entries[apic][pin]);
685
	}
686
	return 0;
687 688
}

689 690 691 692 693 694 695 696
void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
{
	int apic;

	for (apic = 0; apic < nr_ioapics; apic++)
		kfree(ioapic_entries[apic]);

	kfree(ioapic_entries);
697
}
L
Linus Torvalds 已提交
698 699 700 701 702 703 704 705 706

/*
 * Find the IRQ entry number of a certain pin.
 */
static int find_irq_entry(int apic, int pin, int type)
{
	int i;

	for (i = 0; i < mp_irq_entries; i++)
707 708 709 710
		if (mp_irqs[i].irqtype == type &&
		    (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
		     mp_irqs[i].dstapic == MP_APIC_ALL) &&
		    mp_irqs[i].dstirq == pin)
L
Linus Torvalds 已提交
711 712 713 714 715 716 717 718
			return i;

	return -1;
}

/*
 * Find the pin to which IRQ[irq] (ISA) is connected
 */
719
static int __init find_isa_irq_pin(int irq, int type)
L
Linus Torvalds 已提交
720 721 722 723
{
	int i;

	for (i = 0; i < mp_irq_entries; i++) {
724
		int lbus = mp_irqs[i].srcbus;
L
Linus Torvalds 已提交
725

A
Alexey Starikovskiy 已提交
726
		if (test_bit(lbus, mp_bus_not_pci) &&
727 728
		    (mp_irqs[i].irqtype == type) &&
		    (mp_irqs[i].srcbusirq == irq))
L
Linus Torvalds 已提交
729

730
			return mp_irqs[i].dstirq;
L
Linus Torvalds 已提交
731 732 733 734
	}
	return -1;
}

735 736 737 738 739
static int __init find_isa_irq_apic(int irq, int type)
{
	int i;

	for (i = 0; i < mp_irq_entries; i++) {
740
		int lbus = mp_irqs[i].srcbus;
741

A
Alexey Starikovskiy 已提交
742
		if (test_bit(lbus, mp_bus_not_pci) &&
743 744
		    (mp_irqs[i].irqtype == type) &&
		    (mp_irqs[i].srcbusirq == irq))
745 746 747 748
			break;
	}
	if (i < mp_irq_entries) {
		int apic;
749
		for(apic = 0; apic < nr_ioapics; apic++) {
750
			if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
751 752 753 754 755 756 757
				return apic;
		}
	}

	return -1;
}

758
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
L
Linus Torvalds 已提交
759 760 761 762 763
/*
 * EISA Edge/Level control register, ELCR
 */
static int EISA_ELCR(unsigned int irq)
{
764
	if (irq < legacy_pic->nr_legacy_irqs) {
L
Linus Torvalds 已提交
765 766 767 768 769 770 771
		unsigned int port = 0x4d0 + (irq >> 3);
		return (inb(port) >> (irq & 7)) & 1;
	}
	apic_printk(APIC_VERBOSE, KERN_INFO
			"Broken MPtable reports ISA irq %d\n", irq);
	return 0;
}
772

773
#endif
L
Linus Torvalds 已提交
774

A
Alexey Starikovskiy 已提交
775 776 777 778 779 780
/* ISA interrupts are always polarity zero edge triggered,
 * when listed as conforming in the MP table. */

#define default_ISA_trigger(idx)	(0)
#define default_ISA_polarity(idx)	(0)

L
Linus Torvalds 已提交
781 782 783 784 785
/* EISA interrupts are always polarity zero and can be edge or level
 * trigger depending on the ELCR value.  If an interrupt is listed as
 * EISA conforming in the MP table, that means its trigger type must
 * be read in from the ELCR */

786
#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].srcbusirq))
A
Alexey Starikovskiy 已提交
787
#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796 797 798

/* PCI interrupts are always polarity one level triggered,
 * when listed as conforming in the MP table. */

#define default_PCI_trigger(idx)	(1)
#define default_PCI_polarity(idx)	(1)

/* MCA interrupts are always polarity zero level triggered,
 * when listed as conforming in the MP table. */

#define default_MCA_trigger(idx)	(1)
A
Alexey Starikovskiy 已提交
799
#define default_MCA_polarity(idx)	default_ISA_polarity(idx)
L
Linus Torvalds 已提交
800

801
static int MPBIOS_polarity(int idx)
L
Linus Torvalds 已提交
802
{
803
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
804 805 806 807 808
	int polarity;

	/*
	 * Determine IRQ line polarity (high active or low active):
	 */
809
	switch (mp_irqs[idx].irqflag & 3)
810
	{
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
		case 0: /* conforms, ie. bus-type dependent polarity */
			if (test_bit(bus, mp_bus_not_pci))
				polarity = default_ISA_polarity(idx);
			else
				polarity = default_PCI_polarity(idx);
			break;
		case 1: /* high active */
		{
			polarity = 0;
			break;
		}
		case 2: /* reserved */
		{
			printk(KERN_WARNING "broken BIOS!!\n");
			polarity = 1;
			break;
		}
		case 3: /* low active */
		{
			polarity = 1;
			break;
		}
		default: /* invalid */
		{
			printk(KERN_WARNING "broken BIOS!!\n");
			polarity = 1;
			break;
		}
L
Linus Torvalds 已提交
839 840 841 842 843 844
	}
	return polarity;
}

static int MPBIOS_trigger(int idx)
{
845
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
846 847 848 849 850
	int trigger;

	/*
	 * Determine IRQ trigger mode (edge or level sensitive):
	 */
851
	switch ((mp_irqs[idx].irqflag>>2) & 3)
L
Linus Torvalds 已提交
852
	{
853 854 855 856 857
		case 0: /* conforms, ie. bus-type dependent */
			if (test_bit(bus, mp_bus_not_pci))
				trigger = default_ISA_trigger(idx);
			else
				trigger = default_PCI_trigger(idx);
858
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
			switch (mp_bus_id_to_type[bus]) {
				case MP_BUS_ISA: /* ISA pin */
				{
					/* set before the switch */
					break;
				}
				case MP_BUS_EISA: /* EISA pin */
				{
					trigger = default_EISA_trigger(idx);
					break;
				}
				case MP_BUS_PCI: /* PCI pin */
				{
					/* set before the switch */
					break;
				}
				case MP_BUS_MCA: /* MCA pin */
				{
					trigger = default_MCA_trigger(idx);
					break;
				}
				default:
				{
					printk(KERN_WARNING "broken BIOS!!\n");
					trigger = 1;
					break;
				}
			}
#endif
L
Linus Torvalds 已提交
888
			break;
889
		case 1: /* edge */
L
Linus Torvalds 已提交
890
		{
891
			trigger = 0;
L
Linus Torvalds 已提交
892 893
			break;
		}
894
		case 2: /* reserved */
L
Linus Torvalds 已提交
895
		{
896 897
			printk(KERN_WARNING "broken BIOS!!\n");
			trigger = 1;
L
Linus Torvalds 已提交
898 899
			break;
		}
900
		case 3: /* level */
L
Linus Torvalds 已提交
901
		{
902
			trigger = 1;
L
Linus Torvalds 已提交
903 904
			break;
		}
905
		default: /* invalid */
L
Linus Torvalds 已提交
906 907
		{
			printk(KERN_WARNING "broken BIOS!!\n");
908
			trigger = 0;
L
Linus Torvalds 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
			break;
		}
	}
	return trigger;
}

static inline int irq_polarity(int idx)
{
	return MPBIOS_polarity(idx);
}

static inline int irq_trigger(int idx)
{
	return MPBIOS_trigger(idx);
}

static int pin_2_irq(int idx, int apic, int pin)
{
927
	int irq;
928
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
929 930 931 932

	/*
	 * Debugging check, we are in big trouble if this message pops up!
	 */
933
	if (mp_irqs[idx].dstirq != pin)
L
Linus Torvalds 已提交
934 935
		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");

936
	if (test_bit(bus, mp_bus_not_pci)) {
937
		irq = mp_irqs[idx].srcbusirq;
938
	} else {
939
		u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
940 941 942 943

		if (gsi >= NR_IRQS_LEGACY)
			irq = gsi;
		else
944
			irq = gsi_top + gsi;
L
Linus Torvalds 已提交
945 946
	}

947
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	/*
	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
	 */
	if ((pin >= 16) && (pin <= 23)) {
		if (pirq_entries[pin-16] != -1) {
			if (!pirq_entries[pin-16]) {
				apic_printk(APIC_VERBOSE, KERN_DEBUG
						"disabling PIRQ%d\n", pin-16);
			} else {
				irq = pirq_entries[pin-16];
				apic_printk(APIC_VERBOSE, KERN_DEBUG
						"using PIRQ%d -> IRQ %d\n",
						pin-16, irq);
			}
		}
	}
964 965
#endif

L
Linus Torvalds 已提交
966 967 968
	return irq;
}

969 970 971 972 973
/*
 * Find a specific PCI IRQ entry.
 * Not an __init, possibly needed by modules
 */
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
974
				struct io_apic_irq_attr *irq_attr)
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
{
	int apic, i, best_guess = -1;

	apic_printk(APIC_DEBUG,
		    "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
		    bus, slot, pin);
	if (test_bit(bus, mp_bus_not_pci)) {
		apic_printk(APIC_VERBOSE,
			    "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
		return -1;
	}
	for (i = 0; i < mp_irq_entries; i++) {
		int lbus = mp_irqs[i].srcbus;

		for (apic = 0; apic < nr_ioapics; apic++)
			if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
			    mp_irqs[i].dstapic == MP_APIC_ALL)
				break;

		if (!test_bit(lbus, mp_bus_not_pci) &&
		    !mp_irqs[i].irqtype &&
		    (bus == lbus) &&
		    (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
			int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);

			if (!(apic || IO_APIC_IRQ(irq)))
				continue;

			if (pin == (mp_irqs[i].srcbusirq & 3)) {
1004 1005 1006 1007
				set_io_apic_irq_attr(irq_attr, apic,
						     mp_irqs[i].dstirq,
						     irq_trigger(i),
						     irq_polarity(i));
1008 1009 1010 1011 1012 1013 1014
				return irq;
			}
			/*
			 * Use the first all-but-pin matching entry as a
			 * best-guess fuzzy result for broken mptables.
			 */
			if (best_guess < 0) {
1015 1016 1017 1018
				set_io_apic_irq_attr(irq_attr, apic,
						     mp_irqs[i].dstirq,
						     irq_trigger(i),
						     irq_polarity(i));
1019 1020 1021 1022 1023 1024 1025 1026
				best_guess = irq;
			}
		}
	}
	return best_guess;
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);

1027 1028 1029 1030 1031
void lock_vector_lock(void)
{
	/* Used to the online set of cpus does not change
	 * during assign_irq_vector.
	 */
1032
	raw_spin_lock(&vector_lock);
1033
}
L
Linus Torvalds 已提交
1034

1035
void unlock_vector_lock(void)
L
Linus Torvalds 已提交
1036
{
1037
	raw_spin_unlock(&vector_lock);
1038
}
L
Linus Torvalds 已提交
1039

1040 1041
static int
__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1042
{
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
1054
	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1055
	static int current_offset = VECTOR_OFFSET_START % 8;
1056
	unsigned int old_vector;
1057 1058
	int cpu, err;
	cpumask_var_t tmp_mask;
1059

1060
	if (cfg->move_in_progress)
1061
		return -EBUSY;
1062

1063 1064
	if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
		return -ENOMEM;
1065

1066 1067
	old_vector = cfg->vector;
	if (old_vector) {
1068 1069 1070 1071
		cpumask_and(tmp_mask, mask, cpu_online_mask);
		cpumask_and(tmp_mask, cfg->domain, tmp_mask);
		if (!cpumask_empty(tmp_mask)) {
			free_cpumask_var(tmp_mask);
1072
			return 0;
1073
		}
1074
	}
1075

1076
	/* Only try and allocate irqs on cpus that are present */
1077 1078
	err = -ENOSPC;
	for_each_cpu_and(cpu, mask, cpu_online_mask) {
1079 1080
		int new_cpu;
		int vector, offset;
1081

1082
		apic->vector_allocation_domain(cpu, tmp_mask);
1083

1084 1085
		vector = current_vector;
		offset = current_offset;
1086
next:
1087 1088
		vector += 8;
		if (vector >= first_system_vector) {
1089
			/* If out of vectors on large boxen, must share them. */
1090
			offset = (offset + 1) % 8;
1091
			vector = FIRST_EXTERNAL_VECTOR + offset;
1092 1093 1094
		}
		if (unlikely(current_vector == vector))
			continue;
1095 1096

		if (test_bit(vector, used_vectors))
1097
			goto next;
1098

1099
		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1100 1101 1102 1103 1104 1105 1106
			if (per_cpu(vector_irq, new_cpu)[vector] != -1)
				goto next;
		/* Found one! */
		current_vector = vector;
		current_offset = offset;
		if (old_vector) {
			cfg->move_in_progress = 1;
1107
			cpumask_copy(cfg->old_domain, cfg->domain);
1108
		}
1109
		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1110 1111
			per_cpu(vector_irq, new_cpu)[vector] = irq;
		cfg->vector = vector;
1112 1113 1114
		cpumask_copy(cfg->domain, tmp_mask);
		err = 0;
		break;
1115
	}
1116 1117
	free_cpumask_var(tmp_mask);
	return err;
1118 1119
}

1120
int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1121 1122
{
	int err;
1123 1124
	unsigned long flags;

1125
	raw_spin_lock_irqsave(&vector_lock, flags);
Y
Yinghai Lu 已提交
1126
	err = __assign_irq_vector(irq, cfg, mask);
1127
	raw_spin_unlock_irqrestore(&vector_lock, flags);
1128 1129 1130
	return err;
}

Y
Yinghai Lu 已提交
1131
static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1132 1133 1134 1135 1136 1137
{
	int cpu, vector;

	BUG_ON(!cfg->vector);

	vector = cfg->vector;
1138
	for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1139 1140 1141
		per_cpu(vector_irq, cpu)[vector] = -1;

	cfg->vector = 0;
1142
	cpumask_clear(cfg->domain);
1143 1144 1145

	if (likely(!cfg->move_in_progress))
		return;
1146
	for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1147 1148 1149 1150 1151 1152 1153 1154 1155
		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
								vector++) {
			if (per_cpu(vector_irq, cpu)[vector] != irq)
				continue;
			per_cpu(vector_irq, cpu)[vector] = -1;
			break;
		}
	}
	cfg->move_in_progress = 0;
1156 1157 1158 1159 1160 1161 1162 1163
}

void __setup_vector_irq(int cpu)
{
	/* Initialize vector_irq on a new cpu */
	int irq, vector;
	struct irq_cfg *cfg;

1164 1165 1166 1167 1168
	/*
	 * vector_lock will make sure that we don't run into irq vector
	 * assignments that might be happening on another cpu in parallel,
	 * while we setup our initial vector to irq mappings.
	 */
1169
	raw_spin_lock(&vector_lock);
1170
	/* Mark the inuse vectors */
T
Thomas Gleixner 已提交
1171 1172 1173 1174
	for_each_active_irq(irq) {
		cfg = get_irq_chip_data(irq);
		if (!cfg)
			continue;
1175 1176 1177 1178 1179 1180 1181
		/*
		 * If it is a legacy IRQ handled by the legacy PIC, this cpu
		 * will be part of the irq_cfg's domain.
		 */
		if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
			cpumask_set_cpu(cpu, cfg->domain);

1182
		if (!cpumask_test_cpu(cpu, cfg->domain))
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
			continue;
		vector = cfg->vector;
		per_cpu(vector_irq, cpu)[vector] = irq;
	}
	/* Mark the free vectors */
	for (vector = 0; vector < NR_VECTORS; ++vector) {
		irq = per_cpu(vector_irq, cpu)[vector];
		if (irq < 0)
			continue;

		cfg = irq_cfg(irq);
1194
		if (!cpumask_test_cpu(cpu, cfg->domain))
1195
			per_cpu(vector_irq, cpu)[vector] = -1;
1196
	}
1197
	raw_spin_unlock(&vector_lock);
L
Linus Torvalds 已提交
1198
}
1199

1200
static struct irq_chip ioapic_chip;
1201
static struct irq_chip ir_ioapic_chip;
L
Linus Torvalds 已提交
1202

1203 1204 1205
#define IOAPIC_AUTO     -1
#define IOAPIC_EDGE     0
#define IOAPIC_LEVEL    1
L
Linus Torvalds 已提交
1206

1207
#ifdef CONFIG_X86_32
1208 1209
static inline int IO_APIC_irq_trigger(int irq)
{
T
Thomas Gleixner 已提交
1210
	int apic, idx, pin;
1211

T
Thomas Gleixner 已提交
1212 1213 1214 1215 1216 1217 1218 1219
	for (apic = 0; apic < nr_ioapics; apic++) {
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
			idx = find_irq_entry(apic, pin, mp_INT);
			if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
				return irq_trigger(idx);
		}
	}
	/*
1220 1221
         * nonexistent IRQs are edge default
         */
T
Thomas Gleixner 已提交
1222
	return 0;
1223
}
1224 1225 1226
#else
static inline int IO_APIC_irq_trigger(int irq)
{
1227
	return 1;
1228 1229
}
#endif
1230

1231
static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
L
Linus Torvalds 已提交
1232
{
Y
Yinghai Lu 已提交
1233

1234
	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1235
	    trigger == IOAPIC_LEVEL)
1236
		irq_set_status_flags(irq, IRQ_LEVEL);
1237
	else
1238
		irq_clear_status_flags(irq, IRQ_LEVEL);
1239

1240
	if (irq_remapped(get_irq_chip_data(irq))) {
1241
		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1242 1243 1244 1245 1246 1247 1248 1249 1250
		if (trigger)
			set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
						      handle_fasteoi_irq,
						     "fasteoi");
		else
			set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
						      handle_edge_irq, "edge");
		return;
	}
1251

1252 1253
	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
	    trigger == IOAPIC_LEVEL)
1254
		set_irq_chip_and_handler_name(irq, &ioapic_chip,
1255 1256
					      handle_fasteoi_irq,
					      "fasteoi");
1257
	else
1258
		set_irq_chip_and_handler_name(irq, &ioapic_chip,
1259
					      handle_edge_irq, "edge");
L
Linus Torvalds 已提交
1260 1261
}

1262 1263 1264 1265
static int setup_ioapic_entry(int apic_id, int irq,
			      struct IO_APIC_route_entry *entry,
			      unsigned int destination, int trigger,
			      int polarity, int vector, int pin)
L
Linus Torvalds 已提交
1266
{
1267 1268 1269 1270 1271
	/*
	 * add it to the IO-APIC irq-routing table:
	 */
	memset(entry,0,sizeof(*entry));

1272
	if (intr_remapping_enabled) {
I
Ingo Molnar 已提交
1273
		struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1274 1275 1276 1277 1278 1279
		struct irte irte;
		struct IR_IO_APIC_route_entry *ir_entry =
			(struct IR_IO_APIC_route_entry *) entry;
		int index;

		if (!iommu)
I
Ingo Molnar 已提交
1280
			panic("No mapping iommu for ioapic %d\n", apic_id);
1281 1282 1283

		index = alloc_irte(iommu, irq, 1);
		if (index < 0)
I
Ingo Molnar 已提交
1284
			panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1285

1286
		prepare_irte(&irte, vector, destination);
1287

1288 1289 1290
		/* Set source-id of interrupt request */
		set_ioapic_sid(&irte, apic_id);

1291 1292 1293 1294 1295 1296
		modify_irte(irq, &irte);

		ir_entry->index2 = (index >> 15) & 0x1;
		ir_entry->zero = 0;
		ir_entry->format = 1;
		ir_entry->index = (index & 0x7fff);
1297 1298 1299 1300 1301
		/*
		 * IO-APIC RTE will be configured with virtual vector.
		 * irq handler will do the explicit EOI to the io-apic.
		 */
		ir_entry->vector = pin;
1302
	} else {
1303 1304
		entry->delivery_mode = apic->irq_delivery_mode;
		entry->dest_mode = apic->irq_dest_mode;
1305
		entry->dest = destination;
1306
		entry->vector = vector;
1307
	}
1308

1309
	entry->mask = 0;				/* enable IRQ */
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	entry->trigger = trigger;
	entry->polarity = polarity;

	/* Mask level triggered irqs.
	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
	 */
	if (trigger)
		entry->mask = 1;
	return 0;
}

1321 1322
static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
			     struct irq_cfg *cfg, int trigger, int polarity)
1323
{
L
Linus Torvalds 已提交
1324
	struct IO_APIC_route_entry entry;
1325
	unsigned int dest;
1326 1327 1328

	if (!IO_APIC_IRQ(irq))
		return;
1329 1330 1331 1332 1333
	/*
	 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
	 * controllers like 8259. Now that IO-APIC can handle this irq, update
	 * the cfg->domain.
	 */
1334
	if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1335 1336
		apic->vector_allocation_domain(0, cfg->domain);

1337
	if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1338 1339
		return;

1340
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1341 1342 1343 1344

	apic_printk(APIC_VERBOSE,KERN_DEBUG
		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
		    "IRQ %d Mode:%i Active:%i)\n",
I
Ingo Molnar 已提交
1345
		    apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1346 1347 1348
		    irq, trigger, polarity);


I
Ingo Molnar 已提交
1349
	if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1350
			       dest, trigger, polarity, cfg->vector, pin)) {
1351
		printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
I
Ingo Molnar 已提交
1352
		       mp_ioapics[apic_id].apicid, pin);
Y
Yinghai Lu 已提交
1353
		__clear_irq_vector(irq, cfg);
1354 1355 1356
		return;
	}

1357
	ioapic_register_intr(irq, trigger);
1358
	if (irq < legacy_pic->nr_legacy_irqs)
1359
		legacy_pic->mask(irq);
1360

I
Ingo Molnar 已提交
1361
	ioapic_write_entry(apic_id, pin, entry);
1362 1363
}

1364 1365 1366 1367
static struct {
	DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
} mp_ioapic_routing[MAX_IO_APICS];

1368 1369
static void __init setup_IO_APIC_irqs(void)
{
1370
	int apic_id, pin, idx, irq, notcon = 0;
1371
	int node = cpu_to_node(0);
1372
	struct irq_cfg *cfg;
L
Linus Torvalds 已提交
1373 1374 1375

	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");

E
Eric W. Biederman 已提交
1376
	for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
	for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
		idx = find_irq_entry(apic_id, pin, mp_INT);
		if (idx == -1) {
			if (!notcon) {
				notcon = 1;
				apic_printk(APIC_VERBOSE,
					KERN_DEBUG " %d-%d",
					mp_ioapics[apic_id].apicid, pin);
			} else
				apic_printk(APIC_VERBOSE, " %d-%d",
					mp_ioapics[apic_id].apicid, pin);
			continue;
		}
		if (notcon) {
			apic_printk(APIC_VERBOSE,
				" (apicid-pin) not connected\n");
			notcon = 0;
		}
1395

1396
		irq = pin_2_irq(idx, apic_id, pin);
1397

E
Eric W. Biederman 已提交
1398 1399 1400
		if ((apic_id > 0) && (irq > 16))
			continue;

1401 1402 1403 1404 1405 1406 1407
		/*
		 * Skip the timer IRQ if there's a quirk handler
		 * installed and if it returns 1:
		 */
		if (apic->multi_timer_check &&
				apic->multi_timer_check(apic_id, irq))
			continue;
1408

1409 1410
		cfg = alloc_irq_and_cfg_at(irq, node);
		if (!cfg)
1411
			continue;
1412

1413
		add_pin_to_irq_node(cfg, node, apic_id, pin);
1414 1415 1416 1417
		/*
		 * don't mark it in pin_programmed, so later acpi could
		 * set it correctly when irq < 16
		 */
1418 1419
		setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
				  irq_polarity(idx));
L
Linus Torvalds 已提交
1420 1421
	}

1422 1423
	if (notcon)
		apic_printk(APIC_VERBOSE,
1424
			" (apicid-pin) not connected\n");
L
Linus Torvalds 已提交
1425 1426
}

Y
Yinghai Lu 已提交
1427 1428 1429 1430 1431 1432 1433
/*
 * for the gsit that is not in first ioapic
 * but could not use acpi_register_gsi()
 * like some special sci in IBM x3330
 */
void setup_IO_APIC_irq_extra(u32 gsi)
{
1434
	int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
Y
Yinghai Lu 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	struct irq_cfg *cfg;

	/*
	 * Convert 'gsi' to 'ioapic.pin'.
	 */
	apic_id = mp_find_ioapic(gsi);
	if (apic_id < 0)
		return;

	pin = mp_find_ioapic_pin(apic_id, gsi);
	idx = find_irq_entry(apic_id, pin, mp_INT);
	if (idx == -1)
		return;

	irq = pin_2_irq(idx, apic_id, pin);
1450 1451 1452

	/* Only handle the non legacy irqs on secondary ioapics */
	if (apic_id == 0 || irq < NR_IRQS_LEGACY)
Y
Yinghai Lu 已提交
1453
		return;
1454

1455 1456
	cfg = alloc_irq_and_cfg_at(irq, node);
	if (!cfg)
Y
Yinghai Lu 已提交
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
		return;

	add_pin_to_irq_node(cfg, node, apic_id, pin);

	if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
		pr_debug("Pin %d-%d already programmed\n",
			 mp_ioapics[apic_id].apicid, pin);
		return;
	}
	set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);

1468
	setup_ioapic_irq(apic_id, pin, irq, cfg,
Y
Yinghai Lu 已提交
1469 1470 1471
			irq_trigger(idx), irq_polarity(idx));
}

L
Linus Torvalds 已提交
1472
/*
1473
 * Set up the timer pin, possibly with the 8259A-master behind.
L
Linus Torvalds 已提交
1474
 */
I
Ingo Molnar 已提交
1475
static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1476
					int vector)
L
Linus Torvalds 已提交
1477 1478 1479
{
	struct IO_APIC_route_entry entry;

1480 1481 1482
	if (intr_remapping_enabled)
		return;

1483
	memset(&entry, 0, sizeof(entry));
L
Linus Torvalds 已提交
1484 1485 1486 1487 1488

	/*
	 * We use logical delivery to get the timer IRQ
	 * to the first CPU.
	 */
1489
	entry.dest_mode = apic->irq_dest_mode;
Y
Yinghai Lu 已提交
1490
	entry.mask = 0;			/* don't mask IRQ for edge */
1491
	entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1492
	entry.delivery_mode = apic->irq_delivery_mode;
L
Linus Torvalds 已提交
1493 1494 1495 1496 1497 1498
	entry.polarity = 0;
	entry.trigger = 0;
	entry.vector = vector;

	/*
	 * The timer IRQ doesn't have to know that behind the
1499
	 * scene we may have a 8259A-master in AEOI mode ...
L
Linus Torvalds 已提交
1500
	 */
1501
	set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
L
Linus Torvalds 已提交
1502 1503 1504 1505

	/*
	 * Add it to the IO-APIC irq-routing table:
	 */
I
Ingo Molnar 已提交
1506
	ioapic_write_entry(apic_id, pin, entry);
L
Linus Torvalds 已提交
1507 1508
}

1509 1510

__apicdebuginit(void) print_IO_APIC(void)
L
Linus Torvalds 已提交
1511 1512 1513 1514 1515 1516 1517
{
	int apic, i;
	union IO_APIC_reg_00 reg_00;
	union IO_APIC_reg_01 reg_01;
	union IO_APIC_reg_02 reg_02;
	union IO_APIC_reg_03 reg_03;
	unsigned long flags;
1518
	struct irq_cfg *cfg;
1519
	unsigned int irq;
L
Linus Torvalds 已提交
1520

1521
	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
L
Linus Torvalds 已提交
1522 1523
	for (i = 0; i < nr_ioapics; i++)
		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1524
		       mp_ioapics[i].apicid, nr_ioapic_registers[i]);
L
Linus Torvalds 已提交
1525 1526 1527 1528 1529 1530 1531 1532 1533

	/*
	 * We are a bit conservative about what we expect.  We have to
	 * know about every hardware change ASAP.
	 */
	printk(KERN_INFO "testing the IO APIC.......................\n");

	for (apic = 0; apic < nr_ioapics; apic++) {

1534
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
1535 1536 1537 1538
	reg_00.raw = io_apic_read(apic, 0);
	reg_01.raw = io_apic_read(apic, 1);
	if (reg_01.bits.version >= 0x10)
		reg_02.raw = io_apic_read(apic, 2);
T
Thomas Gleixner 已提交
1539 1540
	if (reg_01.bits.version >= 0x20)
		reg_03.raw = io_apic_read(apic, 3);
1541
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
1542

1543
	printk("\n");
1544
	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
L
Linus Torvalds 已提交
1545 1546 1547 1548 1549
	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);

1550
	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
L
Linus Torvalds 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);

	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
	 * but the value of reg_02 is read as the previous read register
	 * value, so ignore it if reg_02 == reg_01.
	 */
	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
	}

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
	 * or reg_03, but the value of reg_0[23] is read as the previous read
	 * register value, so ignore it if reg_03 == reg_0[12].
	 */
	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
	    reg_03.raw != reg_01.raw) {
		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
	}

	printk(KERN_DEBUG ".... IRQ redirection table:\n");

1579
	printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1580
			  " Stat Dmod Deli Vect:\n");
L
Linus Torvalds 已提交
1581 1582 1583 1584

	for (i = 0; i <= reg_01.bits.entries; i++) {
		struct IO_APIC_route_entry entry;

1585
		entry = ioapic_read_entry(apic, i);
L
Linus Torvalds 已提交
1586

1587 1588 1589 1590
		printk(KERN_DEBUG " %02x %03X ",
			i,
			entry.dest
		);
L
Linus Torvalds 已提交
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604

		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
			entry.mask,
			entry.trigger,
			entry.irr,
			entry.polarity,
			entry.delivery_status,
			entry.dest_mode,
			entry.delivery_mode,
			entry.vector
		);
	}
	}
	printk(KERN_DEBUG "IRQ to pin mappings:\n");
T
Thomas Gleixner 已提交
1605
	for_each_active_irq(irq) {
1606 1607
		struct irq_pin_list *entry;

T
Thomas Gleixner 已提交
1608
		cfg = get_irq_chip_data(irq);
1609 1610
		if (!cfg)
			continue;
1611
		entry = cfg->irq_2_pin;
1612
		if (!entry)
L
Linus Torvalds 已提交
1613
			continue;
1614
		printk(KERN_DEBUG "IRQ%d ", irq);
1615
		for_each_irq_pin(entry, cfg->irq_2_pin)
L
Linus Torvalds 已提交
1616 1617 1618 1619 1620 1621 1622 1623 1624
			printk("-> %d:%d", entry->apic, entry->pin);
		printk("\n");
	}

	printk(KERN_INFO ".................................... done.\n");

	return;
}

1625
__apicdebuginit(void) print_APIC_field(int base)
L
Linus Torvalds 已提交
1626
{
1627
	int i;
L
Linus Torvalds 已提交
1628

1629 1630 1631 1632 1633 1634
	printk(KERN_DEBUG);

	for (i = 0; i < 8; i++)
		printk(KERN_CONT "%08x", apic_read(base + i*0x10));

	printk(KERN_CONT "\n");
L
Linus Torvalds 已提交
1635 1636
}

1637
__apicdebuginit(void) print_local_APIC(void *dummy)
L
Linus Torvalds 已提交
1638
{
1639
	unsigned int i, v, ver, maxlvt;
1640
	u64 icr;
L
Linus Torvalds 已提交
1641

1642
	printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
L
Linus Torvalds 已提交
1643
		smp_processor_id(), hard_smp_processor_id());
1644
	v = apic_read(APIC_ID);
1645
	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
L
Linus Torvalds 已提交
1646 1647 1648
	v = apic_read(APIC_LVR);
	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
	ver = GET_APIC_VERSION(v);
1649
	maxlvt = lapic_get_maxlvt();
L
Linus Torvalds 已提交
1650 1651 1652 1653

	v = apic_read(APIC_TASKPRI);
	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);

1654
	if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
1655 1656 1657 1658 1659
		if (!APIC_XAPIC(ver)) {
			v = apic_read(APIC_ARBPRI);
			printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
			       v & APIC_ARBPRI_MASK);
		}
L
Linus Torvalds 已提交
1660 1661 1662 1663
		v = apic_read(APIC_PROCPRI);
		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
	}

1664 1665 1666 1667 1668 1669 1670 1671 1672
	/*
	 * Remote read supported only in the 82489DX and local APIC for
	 * Pentium processors.
	 */
	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
		v = apic_read(APIC_RRR);
		printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
	}

L
Linus Torvalds 已提交
1673 1674
	v = apic_read(APIC_LDR);
	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1675 1676 1677 1678
	if (!x2apic_enabled()) {
		v = apic_read(APIC_DFR);
		printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
	}
L
Linus Torvalds 已提交
1679 1680 1681 1682
	v = apic_read(APIC_SPIV);
	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);

	printk(KERN_DEBUG "... APIC ISR field:\n");
1683
	print_APIC_field(APIC_ISR);
L
Linus Torvalds 已提交
1684
	printk(KERN_DEBUG "... APIC TMR field:\n");
1685
	print_APIC_field(APIC_TMR);
L
Linus Torvalds 已提交
1686
	printk(KERN_DEBUG "... APIC IRR field:\n");
1687
	print_APIC_field(APIC_IRR);
L
Linus Torvalds 已提交
1688

1689 1690
	if (APIC_INTEGRATED(ver)) {             /* !82489DX */
		if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
L
Linus Torvalds 已提交
1691
			apic_write(APIC_ESR, 0);
1692

L
Linus Torvalds 已提交
1693 1694 1695 1696
		v = apic_read(APIC_ESR);
		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
	}

1697
	icr = apic_icr_read();
1698 1699
	printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
	printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
L
Linus Torvalds 已提交
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723

	v = apic_read(APIC_LVTT);
	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);

	if (maxlvt > 3) {                       /* PC is LVT#4. */
		v = apic_read(APIC_LVTPC);
		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
	}
	v = apic_read(APIC_LVT0);
	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
	v = apic_read(APIC_LVT1);
	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);

	if (maxlvt > 2) {			/* ERR is LVT#3. */
		v = apic_read(APIC_LVTERR);
		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
	}

	v = apic_read(APIC_TMICT);
	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
	v = apic_read(APIC_TMCCT);
	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
	v = apic_read(APIC_TDCR);
	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735

	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
		v = apic_read(APIC_EFEAT);
		maxlvt = (v >> 16) & 0xff;
		printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
		v = apic_read(APIC_ECTRL);
		printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
		for (i = 0; i < maxlvt; i++) {
			v = apic_read(APIC_EILVTn(i));
			printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
		}
	}
L
Linus Torvalds 已提交
1736 1737 1738
	printk("\n");
}

1739
__apicdebuginit(void) print_local_APICs(int maxcpu)
L
Linus Torvalds 已提交
1740
{
1741 1742
	int cpu;

1743 1744 1745
	if (!maxcpu)
		return;

1746
	preempt_disable();
1747 1748 1749
	for_each_online_cpu(cpu) {
		if (cpu >= maxcpu)
			break;
1750
		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1751
	}
1752
	preempt_enable();
L
Linus Torvalds 已提交
1753 1754
}

1755
__apicdebuginit(void) print_PIC(void)
L
Linus Torvalds 已提交
1756 1757 1758 1759
{
	unsigned int v;
	unsigned long flags;

1760
	if (!legacy_pic->nr_legacy_irqs)
L
Linus Torvalds 已提交
1761 1762 1763 1764
		return;

	printk(KERN_DEBUG "\nprinting PIC contents\n");

1765
	raw_spin_lock_irqsave(&i8259A_lock, flags);
L
Linus Torvalds 已提交
1766 1767 1768 1769 1770 1771 1772

	v = inb(0xa1) << 8 | inb(0x21);
	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);

	v = inb(0xa0) << 8 | inb(0x20);
	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);

1773 1774
	outb(0x0b,0xa0);
	outb(0x0b,0x20);
L
Linus Torvalds 已提交
1775
	v = inb(0xa0) << 8 | inb(0x20);
1776 1777
	outb(0x0a,0xa0);
	outb(0x0a,0x20);
L
Linus Torvalds 已提交
1778

1779
	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
L
Linus Torvalds 已提交
1780 1781 1782 1783 1784 1785 1786

	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);

	v = inb(0x4d1) << 8 | inb(0x4d0);
	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
}

1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
static int __initdata show_lapic = 1;
static __init int setup_show_lapic(char *arg)
{
	int num = -1;

	if (strcmp(arg, "all") == 0) {
		show_lapic = CONFIG_NR_CPUS;
	} else {
		get_option(&arg, &num);
		if (num >= 0)
			show_lapic = num;
	}

	return 1;
}
__setup("show_lapic=", setup_show_lapic);

__apicdebuginit(int) print_ICs(void)
1805
{
1806 1807 1808
	if (apic_verbosity == APIC_QUIET)
		return 0;

1809
	print_PIC();
1810 1811

	/* don't print out if apic is not there */
1812
	if (!cpu_has_apic && !apic_from_smp_config())
1813 1814
		return 0;

1815
	print_local_APICs(show_lapic);
1816 1817 1818 1819 1820
	print_IO_APIC();

	return 0;
}

1821
fs_initcall(print_ICs);
1822

L
Linus Torvalds 已提交
1823

Y
Yinghai Lu 已提交
1824 1825 1826
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };

1827
void __init enable_IO_APIC(void)
L
Linus Torvalds 已提交
1828
{
1829
	int i8259_apic, i8259_pin;
1830
	int apic;
1831

1832
	if (!legacy_pic->nr_legacy_irqs)
1833 1834
		return;

1835
	for(apic = 0; apic < nr_ioapics; apic++) {
1836 1837
		int pin;
		/* See if any of the pins is in ExtINT mode */
1838
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1839
			struct IO_APIC_route_entry entry;
1840
			entry = ioapic_read_entry(apic, pin);
1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870

			/* If the interrupt line is enabled and in ExtInt mode
			 * I have found the pin where the i8259 is connected.
			 */
			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
				ioapic_i8259.apic = apic;
				ioapic_i8259.pin  = pin;
				goto found_i8259;
			}
		}
	}
 found_i8259:
	/* Look to see what if the MP table has reported the ExtINT */
	/* If we could not find the appropriate pin by looking at the ioapic
	 * the i8259 probably is not connected the ioapic but give the
	 * mptable a chance anyway.
	 */
	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
	/* Trust the MP table if nothing is setup in the hardware */
	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
		ioapic_i8259.pin  = i8259_pin;
		ioapic_i8259.apic = i8259_apic;
	}
	/* Complain if the MP table and the hardware disagree */
	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
	{
		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
L
Linus Torvalds 已提交
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
	}

	/*
	 * Do not trust the IO-APIC being empty at bootup
	 */
	clear_IO_APIC();
}

/*
 * Not an __init, needed by the reboot code
 */
void disable_IO_APIC(void)
{
	/*
	 * Clear the IO-APIC before rebooting:
	 */
	clear_IO_APIC();

1889
	if (!legacy_pic->nr_legacy_irqs)
1890 1891
		return;

1892
	/*
1893
	 * If the i8259 is routed through an IOAPIC
1894
	 * Put that IOAPIC in virtual wire mode
1895
	 * so legacy interrupts can be delivered.
1896 1897 1898 1899 1900
	 *
	 * With interrupt-remapping, for now we will use virtual wire A mode,
	 * as virtual wire B is little complex (need to configure both
	 * IOAPIC RTE aswell as interrupt-remapping table entry).
	 * As this gets called during crash dump, keep this simple for now.
1901
	 */
1902
	if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
1903 1904 1905 1906 1907 1908 1909 1910 1911
		struct IO_APIC_route_entry entry;

		memset(&entry, 0, sizeof(entry));
		entry.mask            = 0; /* Enabled */
		entry.trigger         = 0; /* Edge */
		entry.irr             = 0;
		entry.polarity        = 0; /* High */
		entry.delivery_status = 0;
		entry.dest_mode       = 0; /* Physical */
1912
		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
1913
		entry.vector          = 0;
1914
		entry.dest            = read_apic_id();
1915 1916 1917 1918

		/*
		 * Add it to the IO-APIC irq-routing table:
		 */
1919
		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1920
	}
1921

1922 1923 1924
	/*
	 * Use virtual wire A mode when interrupt remapping is enabled.
	 */
1925
	if (cpu_has_apic || apic_from_smp_config())
1926 1927
		disconnect_bsp_APIC(!intr_remapping_enabled &&
				ioapic_i8259.pin != -1);
L
Linus Torvalds 已提交
1928 1929
}

1930
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
1931 1932 1933 1934 1935 1936 1937
/*
 * function to set the IO-APIC physical IDs based on the
 * values stored in the MPC table.
 *
 * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
 */

1938
void __init setup_ioapic_ids_from_mpc(void)
L
Linus Torvalds 已提交
1939 1940 1941
{
	union IO_APIC_reg_00 reg_00;
	physid_mask_t phys_id_present_map;
I
Ingo Molnar 已提交
1942
	int apic_id;
L
Linus Torvalds 已提交
1943 1944 1945 1946
	int i;
	unsigned char old_id;
	unsigned long flags;

1947
	if (acpi_ioapic)
1948
		return;
1949 1950 1951 1952
	/*
	 * Don't check I/O APIC IDs for xAPIC systems.  They have
	 * no meaning without the serial APIC bus.
	 */
1953 1954
	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1955
		return;
L
Linus Torvalds 已提交
1956 1957 1958 1959
	/*
	 * This is broken; anything with a real cpu count has to
	 * circumvent this idiocy regardless.
	 */
1960
	apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
L
Linus Torvalds 已提交
1961 1962 1963 1964

	/*
	 * Set the IOAPIC ID to the value stored in the MPC table.
	 */
I
Ingo Molnar 已提交
1965
	for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
L
Linus Torvalds 已提交
1966 1967

		/* Read the register 0 value */
1968
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
1969
		reg_00.raw = io_apic_read(apic_id, 0);
1970
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1971

I
Ingo Molnar 已提交
1972
		old_id = mp_ioapics[apic_id].apicid;
L
Linus Torvalds 已提交
1973

I
Ingo Molnar 已提交
1974
		if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
L
Linus Torvalds 已提交
1975
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
I
Ingo Molnar 已提交
1976
				apic_id, mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
1977 1978
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
				reg_00.bits.ID);
I
Ingo Molnar 已提交
1979
			mp_ioapics[apic_id].apicid = reg_00.bits.ID;
L
Linus Torvalds 已提交
1980 1981 1982 1983 1984 1985 1986
		}

		/*
		 * Sanity check, is the ID really free? Every APIC in a
		 * system must have a unique ID or we get lots of nice
		 * 'stuck on smp_invalidate_needed IPI wait' messages.
		 */
1987
		if (apic->check_apicid_used(&phys_id_present_map,
I
Ingo Molnar 已提交
1988
					mp_ioapics[apic_id].apicid)) {
L
Linus Torvalds 已提交
1989
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
I
Ingo Molnar 已提交
1990
				apic_id, mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
1991 1992 1993 1994 1995 1996 1997 1998
			for (i = 0; i < get_physical_broadcast(); i++)
				if (!physid_isset(i, phys_id_present_map))
					break;
			if (i >= get_physical_broadcast())
				panic("Max APIC ID exceeded!\n");
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
				i);
			physid_set(i, phys_id_present_map);
I
Ingo Molnar 已提交
1999
			mp_ioapics[apic_id].apicid = i;
L
Linus Torvalds 已提交
2000 2001
		} else {
			physid_mask_t tmp;
2002
			apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
L
Linus Torvalds 已提交
2003 2004
			apic_printk(APIC_VERBOSE, "Setting %d in the "
					"phys_id_present_map\n",
I
Ingo Molnar 已提交
2005
					mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
2006 2007 2008 2009 2010 2011 2012 2013
			physids_or(phys_id_present_map, phys_id_present_map, tmp);
		}


		/*
		 * We need to adjust the IRQ routing table
		 * if the ID changed.
		 */
I
Ingo Molnar 已提交
2014
		if (old_id != mp_ioapics[apic_id].apicid)
L
Linus Torvalds 已提交
2015
			for (i = 0; i < mp_irq_entries; i++)
2016 2017
				if (mp_irqs[i].dstapic == old_id)
					mp_irqs[i].dstapic
I
Ingo Molnar 已提交
2018
						= mp_ioapics[apic_id].apicid;
L
Linus Torvalds 已提交
2019 2020 2021 2022

		/*
		 * Read the right value from the MPC table and
		 * write it into the ID register.
2023
		 */
L
Linus Torvalds 已提交
2024 2025
		apic_printk(APIC_VERBOSE, KERN_INFO
			"...changing IO-APIC physical APIC ID to %d ...",
I
Ingo Molnar 已提交
2026
			mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
2027

I
Ingo Molnar 已提交
2028
		reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2029
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2030
		io_apic_write(apic_id, 0, reg_00.raw);
2031
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
2032 2033 2034 2035

		/*
		 * Sanity check
		 */
2036
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2037
		reg_00.raw = io_apic_read(apic_id, 0);
2038
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2039
		if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
L
Linus Torvalds 已提交
2040 2041 2042 2043 2044
			printk("could not set ID!\n");
		else
			apic_printk(APIC_VERBOSE, " ok.\n");
	}
}
2045
#endif
L
Linus Torvalds 已提交
2046

2047
int no_timer_check __initdata;
2048 2049 2050 2051 2052 2053 2054 2055

static int __init notimercheck(char *s)
{
	no_timer_check = 1;
	return 1;
}
__setup("no_timer_check", notimercheck);

L
Linus Torvalds 已提交
2056 2057 2058 2059 2060 2061 2062 2063
/*
 * There is a nasty bug in some older SMP boards, their mptable lies
 * about the timer IRQ. We do the following to work around the situation:
 *
 *	- timer IRQ defaults to IO-APIC IRQ
 *	- if this function detects that timer IRQs are defunct, then we fall
 *	  back to ISA timer IRQs
 */
2064
static int __init timer_irq_works(void)
L
Linus Torvalds 已提交
2065 2066
{
	unsigned long t1 = jiffies;
2067
	unsigned long flags;
L
Linus Torvalds 已提交
2068

2069 2070 2071
	if (no_timer_check)
		return 1;

2072
	local_save_flags(flags);
L
Linus Torvalds 已提交
2073 2074 2075
	local_irq_enable();
	/* Let ten ticks pass... */
	mdelay((10 * 1000) / HZ);
2076
	local_irq_restore(flags);
L
Linus Torvalds 已提交
2077 2078 2079 2080 2081 2082 2083 2084

	/*
	 * Expect a few ticks at least, to be sure some possible
	 * glue logic does not lock up after one or two first
	 * ticks in a non-ExtINT mode.  Also the local APIC
	 * might have cached one ExtINT interrupt.  Finally, at
	 * least one tick may be lost due to delays.
	 */
2085 2086

	/* jiffies wrap? */
2087
	if (time_after(jiffies, t1 + 4))
L
Linus Torvalds 已提交
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
		return 1;
	return 0;
}

/*
 * In the SMP+IOAPIC case it might happen that there are an unspecified
 * number of pending IRQ events unhandled. These cases are very rare,
 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
 * better to do it this way as thus we do not have to be aware of
 * 'pending' interrupts in the IRQ path, except at this point.
 */
/*
 * Edge triggered needs to resend any interrupt
 * that was delayed but this is now handled in the device
 * independent code.
 */

/*
 * Starting up a edge-triggered IO-APIC interrupt is
 * nasty - we need to make sure that we get the edge.
 * If it is already asserted for some reason, we need
 * return 1 to indicate that is was pending.
 *
 * This is not complete - we should be able to fake
 * an edge even if it isn't on the 8259A...
 */
2114

2115
static unsigned int startup_ioapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2116
{
2117
	int was_pending = 0, irq = data->irq;
L
Linus Torvalds 已提交
2118 2119
	unsigned long flags;

2120
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2121
	if (irq < legacy_pic->nr_legacy_irqs) {
2122
		legacy_pic->mask(irq);
2123
		if (legacy_pic->irq_pending(irq))
L
Linus Torvalds 已提交
2124 2125
			was_pending = 1;
	}
2126
	__unmask_ioapic(data->chip_data);
2127
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
2128 2129 2130 2131

	return was_pending;
}

2132
static int ioapic_retrigger_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2133
{
2134
	struct irq_cfg *cfg = data->chip_data;
2135 2136
	unsigned long flags;

2137
	raw_spin_lock_irqsave(&vector_lock, flags);
2138
	apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2139
	raw_spin_unlock_irqrestore(&vector_lock, flags);
2140 2141 2142

	return 1;
}
2143

2144 2145 2146 2147 2148 2149 2150 2151
/*
 * Level and edge triggered IO-APIC interrupts need different handling,
 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
 * handled with the level-triggered descriptor, but that one has slightly
 * more overhead. Level-triggered interrupts cannot be handled with the
 * edge-triggered handler, without risking IRQ storms and other ugly
 * races.
 */
2152

2153
#ifdef CONFIG_SMP
2154
void send_cleanup_vector(struct irq_cfg *cfg)
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
{
	cpumask_var_t cleanup_mask;

	if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
		unsigned int i;
		for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
			apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
	} else {
		cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
		apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
		free_cpumask_var(cleanup_mask);
	}
	cfg->move_in_progress = 0;
}

2170
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2171 2172 2173 2174 2175
{
	int apic, pin;
	struct irq_pin_list *entry;
	u8 vector = cfg->vector;

2176
	for_each_irq_pin(entry, cfg->irq_2_pin) {
2177 2178 2179 2180 2181 2182 2183 2184
		unsigned int reg;

		apic = entry->apic;
		pin = entry->pin;
		/*
		 * With interrupt-remapping, destination information comes
		 * from interrupt-remapping table entry.
		 */
2185
		if (!irq_remapped(cfg))
2186 2187 2188 2189 2190 2191 2192 2193 2194
			io_apic_write(apic, 0x11 + pin*2, dest);
		reg = io_apic_read(apic, 0x10 + pin*2);
		reg &= ~IO_APIC_REDIR_VECTOR_MASK;
		reg |= vector;
		io_apic_modify(apic, 0x10 + pin*2, reg);
	}
}

/*
2195
 * Either sets data->affinity to a valid value, and returns
2196
 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2197
 * leaves data->affinity untouched.
2198
 */
2199 2200
int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
			  unsigned int *dest_id)
2201
{
2202
	struct irq_cfg *cfg = data->chip_data;
2203 2204

	if (!cpumask_intersects(mask, cpu_online_mask))
2205
		return -1;
2206

2207
	if (assign_irq_vector(data->irq, data->chip_data, mask))
2208
		return -1;
2209

2210
	cpumask_copy(data->affinity, mask);
2211

2212
	*dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2213
	return 0;
2214 2215
}

2216
static int
2217 2218
ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		    bool force)
2219
{
2220
	unsigned int dest, irq = data->irq;
2221
	unsigned long flags;
2222
	int ret;
2223

2224
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2225
	ret = __ioapic_set_affinity(data, mask, &dest);
2226
	if (!ret) {
2227 2228
		/* Only the high 8 bits are valid. */
		dest = SET_APIC_LOGICAL_ID(dest);
2229
		__target_IO_APIC_irq(irq, dest, data->chip_data);
2230
	}
2231
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2232
	return ret;
2233 2234
}

2235
#ifdef CONFIG_INTR_REMAP
2236

2237 2238 2239
/*
 * Migrate the IO-APIC irq in the presence of intr-remapping.
 *
2240 2241
 * For both level and edge triggered, irq migration is a simple atomic
 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2242
 *
2243 2244 2245 2246
 * For level triggered, we eliminate the io-apic RTE modification (with the
 * updated vector information), by using a virtual vector (io-apic pin number).
 * Real vector that is used for interrupting cpu will be coming from
 * the interrupt-remapping table entry.
2247
 */
2248
static int
2249 2250
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		       bool force)
2251
{
2252 2253
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
2254
	struct irte irte;
2255

2256
	if (!cpumask_intersects(mask, cpu_online_mask))
2257
		return -EINVAL;
2258

2259
	if (get_irte(irq, &irte))
2260
		return -EBUSY;
2261

Y
Yinghai Lu 已提交
2262
	if (assign_irq_vector(irq, cfg, mask))
2263
		return -EBUSY;
2264

2265
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2266 2267 2268 2269 2270 2271 2272 2273 2274

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * Modified the IRTE and flushes the Interrupt entry cache.
	 */
	modify_irte(irq, &irte);

2275 2276
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);
2277

2278
	cpumask_copy(data->affinity, mask);
2279
	return 0;
2280 2281
}

2282
#else
2283 2284 2285
static inline int
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		       bool force)
2286
{
2287
	return 0;
2288
}
2289 2290 2291 2292 2293
#endif

asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
	unsigned vector, me;
2294

2295 2296 2297 2298 2299 2300 2301
	ack_APIC_irq();
	exit_idle();
	irq_enter();

	me = smp_processor_id();
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		unsigned int irq;
2302
		unsigned int irr;
2303 2304 2305 2306
		struct irq_desc *desc;
		struct irq_cfg *cfg;
		irq = __get_cpu_var(vector_irq)[vector];

2307 2308 2309
		if (irq == -1)
			continue;

2310 2311 2312 2313 2314
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		cfg = irq_cfg(irq);
2315
		raw_spin_lock(&desc->lock);
2316

2317 2318 2319 2320 2321 2322 2323
		/*
		 * Check if the irq migration is in progress. If so, we
		 * haven't received the cleanup request yet for this irq.
		 */
		if (cfg->move_in_progress)
			goto unlock;

2324
		if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2325 2326
			goto unlock;

2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
		/*
		 * Check if the vector that needs to be cleanedup is
		 * registered at the cpu's IRR. If so, then this is not
		 * the best time to clean it up. Lets clean it up in the
		 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
		 * to myself.
		 */
		if (irr  & (1 << (vector % 32))) {
			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
			goto unlock;
		}
2339 2340
		__get_cpu_var(vector_irq)[vector] = -1;
unlock:
2341
		raw_spin_unlock(&desc->lock);
2342 2343 2344 2345 2346
	}

	irq_exit();
}

T
Thomas Gleixner 已提交
2347
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2348
{
2349
	unsigned me;
2350

2351
	if (likely(!cfg->move_in_progress))
2352 2353 2354
		return;

	me = smp_processor_id();
2355

2356
	if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2357
		send_cleanup_vector(cfg);
2358
}
2359

T
Thomas Gleixner 已提交
2360
static void irq_complete_move(struct irq_cfg *cfg)
2361
{
T
Thomas Gleixner 已提交
2362
	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2363 2364 2365 2366
}

void irq_force_complete_move(int irq)
{
T
Thomas Gleixner 已提交
2367
	struct irq_cfg *cfg = get_irq_chip_data(irq);
2368

2369 2370 2371
	if (!cfg)
		return;

T
Thomas Gleixner 已提交
2372
	__irq_complete_move(cfg, cfg->vector);
2373
}
2374
#else
T
Thomas Gleixner 已提交
2375
static inline void irq_complete_move(struct irq_cfg *cfg) { }
2376
#endif
Y
Yinghai Lu 已提交
2377

2378
static void ack_apic_edge(struct irq_data *data)
2379
{
2380 2381
	irq_complete_move(data->chip_data);
	move_native_irq(data->irq);
2382 2383 2384
	ack_APIC_irq();
}

Y
Yinghai Lu 已提交
2385 2386
atomic_t irq_mis_count;

2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
/*
 * IO-APIC versions below 0x20 don't support EOI register.
 * For the record, here is the information about various versions:
 *     0Xh     82489DX
 *     1Xh     I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
 *     2Xh     I/O(x)APIC which is PCI 2.2 Compliant
 *     30h-FFh Reserved
 *
 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
 * version as 0x2. This is an error with documentation and these ICH chips
 * use io-apic's of version 0x20.
 *
 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
 * Otherwise, we simulate the EOI message manually by changing the trigger
 * mode to edge and then back to level, with RTE being masked during this.
*/
T
Thomas Gleixner 已提交
2403
static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2404 2405
{
	struct irq_pin_list *entry;
T
Thomas Gleixner 已提交
2406
	unsigned long flags;
2407

T
Thomas Gleixner 已提交
2408
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2409
	for_each_irq_pin(entry, cfg->irq_2_pin) {
2410 2411 2412 2413 2414 2415 2416
		if (mp_ioapics[entry->apic].apicver >= 0x20) {
			/*
			 * Intr-remapping uses pin number as the virtual vector
			 * in the RTE. Actual vector is programmed in
			 * intr-remapping table entry. Hence for the io-apic
			 * EOI we use the pin number.
			 */
2417
			if (irq_remapped(cfg))
2418 2419 2420 2421 2422 2423 2424
				io_apic_eoi(entry->apic, entry->pin);
			else
				io_apic_eoi(entry->apic, cfg->vector);
		} else {
			__mask_and_edge_IO_APIC_irq(entry);
			__unmask_and_level_IO_APIC_irq(entry);
		}
2425
	}
2426
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2427 2428
}

2429
static void ack_apic_level(struct irq_data *data)
2430
{
2431 2432
	struct irq_cfg *cfg = data->chip_data;
	int i, do_unmask_irq = 0, irq = data->irq;
Y
Yinghai Lu 已提交
2433
	struct irq_desc *desc = irq_to_desc(irq);
Y
Yinghai Lu 已提交
2434
	unsigned long v;
2435

T
Thomas Gleixner 已提交
2436
	irq_complete_move(cfg);
2437
#ifdef CONFIG_GENERIC_PENDING_IRQ
2438
	/* If we are moving the irq we need to mask it */
Y
Yinghai Lu 已提交
2439
	if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2440
		do_unmask_irq = 1;
T
Thomas Gleixner 已提交
2441
		mask_ioapic(cfg);
2442
	}
2443 2444
#endif

Y
Yinghai Lu 已提交
2445
	/*
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
	 * It appears there is an erratum which affects at least version 0x11
	 * of I/O APIC (that's the 82093AA and cores integrated into various
	 * chipsets).  Under certain conditions a level-triggered interrupt is
	 * erroneously delivered as edge-triggered one but the respective IRR
	 * bit gets set nevertheless.  As a result the I/O unit expects an EOI
	 * message but it will never arrive and further interrupts are blocked
	 * from the source.  The exact reason is so far unknown, but the
	 * phenomenon was observed when two consecutive interrupt requests
	 * from a given source get delivered to the same CPU and the source is
	 * temporarily disabled in between.
	 *
	 * A workaround is to simulate an EOI message manually.  We achieve it
	 * by setting the trigger mode to edge and then to level when the edge
	 * trigger mode gets detected in the TMR of a local APIC for a
	 * level-triggered interrupt.  We mask the source for the time of the
	 * operation to prevent an edge-triggered interrupt escaping meanwhile.
	 * The idea is from Manfred Spraul.  --macro
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
	 *
	 * Also in the case when cpu goes offline, fixup_irqs() will forward
	 * any unhandled interrupt on the offlined cpu to the new cpu
	 * destination that is handling the corresponding interrupt. This
	 * interrupt forwarding is done via IPI's. Hence, in this case also
	 * level-triggered io-apic interrupt will be seen as an edge
	 * interrupt in the IRR. And we can't rely on the cpu's EOI
	 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
	 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
	 * supporting EOI register, we do an explicit EOI to clear the
	 * remote IRR and on IO-APIC's which don't have an EOI register,
	 * we use the above logic (mask+edge followed by unmask+level) from
	 * Manfred Spraul to clear the remote IRR.
2476
	 */
Y
Yinghai Lu 已提交
2477
	i = cfg->vector;
Y
Yinghai Lu 已提交
2478 2479
	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));

2480 2481 2482 2483 2484 2485
	/*
	 * We must acknowledge the irq before we move it or the acknowledge will
	 * not propagate properly.
	 */
	ack_APIC_irq();

2486 2487 2488 2489 2490 2491 2492
	/*
	 * Tail end of clearing remote IRR bit (either by delivering the EOI
	 * message via io-apic EOI register write or simulating it using
	 * mask+edge followed by unnask+level logic) manually when the
	 * level triggered interrupt is seen as the edge triggered interrupt
	 * at the cpu.
	 */
2493 2494 2495
	if (!(v & (1 << (i & 0x1f)))) {
		atomic_inc(&irq_mis_count);

T
Thomas Gleixner 已提交
2496
		eoi_ioapic_irq(irq, cfg);
2497 2498
	}

2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
	/* Now we can move and renable the irq */
	if (unlikely(do_unmask_irq)) {
		/* Only migrate the irq if the ack has been received.
		 *
		 * On rare occasions the broadcast level triggered ack gets
		 * delayed going to ioapics, and if we reprogram the
		 * vector while Remote IRR is still set the irq will never
		 * fire again.
		 *
		 * To prevent this scenario we read the Remote IRR bit
		 * of the ioapic.  This has two effects.
		 * - On any sane system the read of the ioapic will
		 *   flush writes (and acks) going to the ioapic from
		 *   this cpu.
		 * - We get to see if the ACK has actually been delivered.
		 *
		 * Based on failed experiments of reprogramming the
		 * ioapic entry from outside of irq context starting
		 * with masking the ioapic entry and then polling until
		 * Remote IRR was clear before reprogramming the
		 * ioapic I don't trust the Remote IRR bit to be
		 * completey accurate.
		 *
		 * However there appears to be no other way to plug
		 * this race, so if the Remote IRR bit is not
		 * accurate and is causing problems then it is a hardware bug
		 * and you can go talk to the chipset vendor about it.
		 */
Y
Yinghai Lu 已提交
2527
		if (!io_apic_level_ack_pending(cfg))
2528
			move_masked_irq(irq);
T
Thomas Gleixner 已提交
2529
		unmask_ioapic(cfg);
2530
	}
Y
Yinghai Lu 已提交
2531
}
2532

2533
#ifdef CONFIG_INTR_REMAP
2534
static void ir_ack_apic_edge(struct irq_data *data)
2535
{
2536
	ack_APIC_irq();
2537 2538
}

2539
static void ir_ack_apic_level(struct irq_data *data)
2540
{
2541
	ack_APIC_irq();
2542
	eoi_ioapic_irq(data->irq, data->chip_data);
2543 2544 2545
}
#endif /* CONFIG_INTR_REMAP */

2546
static struct irq_chip ioapic_chip __read_mostly = {
2547 2548 2549 2550 2551 2552
	.name			= "IO-APIC",
	.irq_startup		= startup_ioapic_irq,
	.irq_mask		= mask_ioapic_irq,
	.irq_unmask		= unmask_ioapic_irq,
	.irq_ack		= ack_apic_edge,
	.irq_eoi		= ack_apic_level,
2553
#ifdef CONFIG_SMP
2554
	.irq_set_affinity	= ioapic_set_affinity,
2555
#endif
2556
	.irq_retrigger		= ioapic_retrigger_irq,
L
Linus Torvalds 已提交
2557 2558
};

2559
static struct irq_chip ir_ioapic_chip __read_mostly = {
2560 2561 2562 2563
	.name			= "IR-IO-APIC",
	.irq_startup		= startup_ioapic_irq,
	.irq_mask		= mask_ioapic_irq,
	.irq_unmask		= unmask_ioapic_irq,
2564
#ifdef CONFIG_INTR_REMAP
2565 2566
	.irq_ack		= ir_ack_apic_edge,
	.irq_eoi		= ir_ack_apic_level,
2567
#ifdef CONFIG_SMP
2568
	.irq_set_affinity	= ir_ioapic_set_affinity,
2569
#endif
2570
#endif
2571
	.irq_retrigger		= ioapic_retrigger_irq,
2572
};
L
Linus Torvalds 已提交
2573 2574 2575

static inline void init_IO_APIC_traps(void)
{
2576
	struct irq_cfg *cfg;
T
Thomas Gleixner 已提交
2577
	unsigned int irq;
L
Linus Torvalds 已提交
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589

	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
T
Thomas Gleixner 已提交
2590 2591
	for_each_active_irq(irq) {
		cfg = get_irq_chip_data(irq);
2592
		if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
L
Linus Torvalds 已提交
2593 2594 2595 2596 2597
			/*
			 * Hmm.. We don't have an entry for this,
			 * so default to an old-fashioned 8259
			 * interrupt if we can..
			 */
2598 2599
			if (irq < legacy_pic->nr_legacy_irqs)
				legacy_pic->make_irq(irq);
2600
			else
L
Linus Torvalds 已提交
2601
				/* Strange. Oh, well.. */
T
Thomas Gleixner 已提交
2602
				set_irq_chip(irq, &no_irq_chip);
L
Linus Torvalds 已提交
2603 2604 2605 2606
		}
	}
}

2607 2608 2609
/*
 * The local APIC irq-chip implementation:
 */
L
Linus Torvalds 已提交
2610

2611
static void mask_lapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2612 2613 2614 2615
{
	unsigned long v;

	v = apic_read(APIC_LVT0);
2616
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
L
Linus Torvalds 已提交
2617 2618
}

2619
static void unmask_lapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2620
{
2621
	unsigned long v;
L
Linus Torvalds 已提交
2622

2623
	v = apic_read(APIC_LVT0);
2624
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2625
}
L
Linus Torvalds 已提交
2626

2627
static void ack_lapic_irq(struct irq_data *data)
2628 2629 2630 2631
{
	ack_APIC_irq();
}

2632
static struct irq_chip lapic_chip __read_mostly = {
2633
	.name		= "local-APIC",
2634 2635 2636
	.irq_mask	= mask_lapic_irq,
	.irq_unmask	= unmask_lapic_irq,
	.irq_ack	= ack_lapic_irq,
L
Linus Torvalds 已提交
2637 2638
};

2639
static void lapic_register_intr(int irq)
2640
{
2641
	irq_clear_status_flags(irq, IRQ_LEVEL);
2642 2643 2644 2645
	set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
				      "edge");
}

2646
static void __init setup_nmi(void)
L
Linus Torvalds 已提交
2647 2648
{
	/*
2649
	 * Dirty trick to enable the NMI watchdog ...
L
Linus Torvalds 已提交
2650 2651 2652 2653 2654 2655
	 * We put the 8259A master into AEOI mode and
	 * unmask on all local APICs LVT0 as NMI.
	 *
	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
	 * is from Maciej W. Rozycki - so we do not have to EOI from
	 * the NMI handler or the timer interrupt.
2656
	 */
L
Linus Torvalds 已提交
2657 2658
	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");

2659
	enable_NMI_through_LVT0();
L
Linus Torvalds 已提交
2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670

	apic_printk(APIC_VERBOSE, " done.\n");
}

/*
 * This looks a bit hackish but it's about the only one way of sending
 * a few INTA cycles to 8259As and any associated glue logic.  ICR does
 * not support the ExtINT mode, unfortunately.  We need to send these
 * cycles as some i82489DX-based boards have glue logic that keeps the
 * 8259A interrupt line asserted until INTA.  --macro
 */
2671
static inline void __init unlock_ExtINT_logic(void)
L
Linus Torvalds 已提交
2672
{
2673
	int apic, pin, i;
L
Linus Torvalds 已提交
2674 2675 2676
	struct IO_APIC_route_entry entry0, entry1;
	unsigned char save_control, save_freq_select;

2677
	pin  = find_isa_irq_pin(8, mp_INT);
2678 2679 2680 2681
	if (pin == -1) {
		WARN_ON_ONCE(1);
		return;
	}
2682
	apic = find_isa_irq_apic(8, mp_INT);
2683 2684
	if (apic == -1) {
		WARN_ON_ONCE(1);
L
Linus Torvalds 已提交
2685
		return;
2686
	}
L
Linus Torvalds 已提交
2687

2688
	entry0 = ioapic_read_entry(apic, pin);
2689
	clear_IO_APIC_pin(apic, pin);
L
Linus Torvalds 已提交
2690 2691 2692 2693 2694

	memset(&entry1, 0, sizeof(entry1));

	entry1.dest_mode = 0;			/* physical delivery */
	entry1.mask = 0;			/* unmask IRQ now */
2695
	entry1.dest = hard_smp_processor_id();
L
Linus Torvalds 已提交
2696 2697 2698 2699 2700
	entry1.delivery_mode = dest_ExtINT;
	entry1.polarity = entry0.polarity;
	entry1.trigger = 0;
	entry1.vector = 0;

2701
	ioapic_write_entry(apic, pin, entry1);
L
Linus Torvalds 已提交
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717

	save_control = CMOS_READ(RTC_CONTROL);
	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
		   RTC_FREQ_SELECT);
	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);

	i = 100;
	while (i-- > 0) {
		mdelay(10);
		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
			i -= 10;
	}

	CMOS_WRITE(save_control, RTC_CONTROL);
	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2718
	clear_IO_APIC_pin(apic, pin);
L
Linus Torvalds 已提交
2719

2720
	ioapic_write_entry(apic, pin, entry0);
L
Linus Torvalds 已提交
2721 2722
}

Y
Yinghai Lu 已提交
2723
static int disable_timer_pin_1 __initdata;
2724
/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2725
static int __init disable_timer_pin_setup(char *arg)
Y
Yinghai Lu 已提交
2726 2727 2728 2729
{
	disable_timer_pin_1 = 1;
	return 0;
}
2730
early_param("disable_timer_pin_1", disable_timer_pin_setup);
Y
Yinghai Lu 已提交
2731 2732 2733

int timer_through_8259 __initdata;

L
Linus Torvalds 已提交
2734 2735 2736 2737 2738
/*
 * This code may look a bit paranoid, but it's supposed to cooperate with
 * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
 * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
 * fanatically on his truly buggy board.
2739 2740
 *
 * FIXME: really need to revamp this for all platforms.
L
Linus Torvalds 已提交
2741
 */
2742
static inline void __init check_timer(void)
L
Linus Torvalds 已提交
2743
{
2744
	struct irq_cfg *cfg = get_irq_chip_data(0);
2745
	int node = cpu_to_node(0);
2746
	int apic1, pin1, apic2, pin2;
2747
	unsigned long flags;
2748
	int no_pin1 = 0;
2749 2750

	local_irq_save(flags);
2751

L
Linus Torvalds 已提交
2752 2753 2754
	/*
	 * get/set the timer IRQ vector:
	 */
2755
	legacy_pic->mask(0);
2756
	assign_irq_vector(0, cfg, apic->target_cpus());
L
Linus Torvalds 已提交
2757 2758

	/*
2759 2760 2761 2762 2763 2764 2765
	 * As IRQ0 is to be enabled in the 8259A, the virtual
	 * wire has to be disabled in the local APIC.  Also
	 * timer interrupts need to be acknowledged manually in
	 * the 8259A for the i82489DX when using the NMI
	 * watchdog as that APIC treats NMIs as level-triggered.
	 * The AEOI mode will finish them in the 8259A
	 * automatically.
L
Linus Torvalds 已提交
2766
	 */
2767
	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2768
	legacy_pic->init(1);
2769
#ifdef CONFIG_X86_32
Y
Yinghai Lu 已提交
2770 2771 2772 2773 2774 2775 2776
	{
		unsigned int ver;

		ver = apic_read(APIC_LVR);
		ver = GET_APIC_VERSION(ver);
		timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
	}
2777
#endif
L
Linus Torvalds 已提交
2778

2779 2780 2781 2782
	pin1  = find_isa_irq_pin(0, mp_INT);
	apic1 = find_isa_irq_apic(0, mp_INT);
	pin2  = ioapic_i8259.pin;
	apic2 = ioapic_i8259.apic;
L
Linus Torvalds 已提交
2783

2784 2785
	apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
		    "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2786
		    cfg->vector, apic1, pin1, apic2, pin2);
L
Linus Torvalds 已提交
2787

2788 2789 2790 2791 2792 2793 2794 2795
	/*
	 * Some BIOS writers are clueless and report the ExtINTA
	 * I/O APIC input from the cascaded 8259A as the timer
	 * interrupt input.  So just in case, if only one pin
	 * was found above, try it both directly and through the
	 * 8259A.
	 */
	if (pin1 == -1) {
2796 2797
		if (intr_remapping_enabled)
			panic("BIOS bug: timer not connected to IO-APIC");
2798 2799 2800 2801 2802 2803 2804 2805
		pin1 = pin2;
		apic1 = apic2;
		no_pin1 = 1;
	} else if (pin2 == -1) {
		pin2 = pin1;
		apic2 = apic1;
	}

L
Linus Torvalds 已提交
2806 2807 2808 2809
	if (pin1 != -1) {
		/*
		 * Ok, does IRQ0 through the IOAPIC work?
		 */
2810
		if (no_pin1) {
2811
			add_pin_to_irq_node(cfg, node, apic1, pin1);
2812
			setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
Y
Yinghai Lu 已提交
2813
		} else {
2814
			/* for edge trigger, setup_ioapic_irq already
Y
Yinghai Lu 已提交
2815 2816 2817 2818 2819 2820 2821
			 * leave it unmasked.
			 * so only need to unmask if it is level-trigger
			 * do we really have level trigger timer?
			 */
			int idx;
			idx = find_irq_entry(apic1, pin1, mp_INT);
			if (idx != -1 && irq_trigger(idx))
T
Thomas Gleixner 已提交
2822
				unmask_ioapic(cfg);
2823
		}
L
Linus Torvalds 已提交
2824 2825 2826
		if (timer_irq_works()) {
			if (nmi_watchdog == NMI_IO_APIC) {
				setup_nmi();
2827
				legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2828
			}
2829 2830
			if (disable_timer_pin_1 > 0)
				clear_IO_APIC_pin(0, pin1);
2831
			goto out;
L
Linus Torvalds 已提交
2832
		}
2833 2834
		if (intr_remapping_enabled)
			panic("timer doesn't work through Interrupt-remapped IO-APIC");
Y
Yinghai Lu 已提交
2835
		local_irq_disable();
2836
		clear_IO_APIC_pin(apic1, pin1);
2837
		if (!no_pin1)
2838 2839
			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
				    "8254 timer not connected to IO-APIC\n");
L
Linus Torvalds 已提交
2840

2841 2842 2843 2844
		apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
			    "(IRQ0) through the 8259A ...\n");
		apic_printk(APIC_QUIET, KERN_INFO
			    "..... (found apic %d pin %d) ...\n", apic2, pin2);
L
Linus Torvalds 已提交
2845 2846 2847
		/*
		 * legacy devices should be connected to IO APIC #0
		 */
2848
		replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2849
		setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2850
		legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2851
		if (timer_irq_works()) {
2852
			apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2853
			timer_through_8259 = 1;
L
Linus Torvalds 已提交
2854
			if (nmi_watchdog == NMI_IO_APIC) {
2855
				legacy_pic->mask(0);
L
Linus Torvalds 已提交
2856
				setup_nmi();
2857
				legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2858
			}
2859
			goto out;
L
Linus Torvalds 已提交
2860 2861 2862 2863
		}
		/*
		 * Cleanup, just in case ...
		 */
Y
Yinghai Lu 已提交
2864
		local_irq_disable();
2865
		legacy_pic->mask(0);
2866
		clear_IO_APIC_pin(apic2, pin2);
2867
		apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
L
Linus Torvalds 已提交
2868 2869 2870
	}

	if (nmi_watchdog == NMI_IO_APIC) {
2871 2872
		apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
			    "through the IO-APIC - disabling NMI Watchdog!\n");
2873
		nmi_watchdog = NMI_NONE;
L
Linus Torvalds 已提交
2874
	}
2875
#ifdef CONFIG_X86_32
2876
	timer_ack = 0;
2877
#endif
L
Linus Torvalds 已提交
2878

2879 2880
	apic_printk(APIC_QUIET, KERN_INFO
		    "...trying to set up timer as Virtual Wire IRQ...\n");
L
Linus Torvalds 已提交
2881

2882
	lapic_register_intr(0);
2883
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
2884
	legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2885 2886

	if (timer_irq_works()) {
2887
		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2888
		goto out;
L
Linus Torvalds 已提交
2889
	}
Y
Yinghai Lu 已提交
2890
	local_irq_disable();
2891
	legacy_pic->mask(0);
2892
	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2893
	apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
L
Linus Torvalds 已提交
2894

2895 2896
	apic_printk(APIC_QUIET, KERN_INFO
		    "...trying to set up timer as ExtINT IRQ...\n");
L
Linus Torvalds 已提交
2897

2898 2899
	legacy_pic->init(0);
	legacy_pic->make_irq(0);
2900
	apic_write(APIC_LVT0, APIC_DM_EXTINT);
L
Linus Torvalds 已提交
2901 2902 2903 2904

	unlock_ExtINT_logic();

	if (timer_irq_works()) {
2905
		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2906
		goto out;
L
Linus Torvalds 已提交
2907
	}
Y
Yinghai Lu 已提交
2908
	local_irq_disable();
2909
	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
L
Linus Torvalds 已提交
2910
	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
2911
		"report.  Then try booting with the 'noapic' option.\n");
2912 2913
out:
	local_irq_restore(flags);
L
Linus Torvalds 已提交
2914 2915 2916
}

/*
2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931
 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
 * to devices.  However there may be an I/O APIC pin available for
 * this interrupt regardless.  The pin may be left unconnected, but
 * typically it will be reused as an ExtINT cascade interrupt for
 * the master 8259A.  In the MPS case such a pin will normally be
 * reported as an ExtINT interrupt in the MP table.  With ACPI
 * there is no provision for ExtINT interrupts, and in the absence
 * of an override it would be treated as an ordinary ISA I/O APIC
 * interrupt, that is edge-triggered and unmasked by default.  We
 * used to do this, but it caused problems on some systems because
 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
 * the same ExtINT cascade interrupt to drive the local APIC of the
 * bootstrap processor.  Therefore we refrain from routing IRQ2 to
 * the I/O APIC in all cases now.  No actual device should request
 * it anyway.  --macro
L
Linus Torvalds 已提交
2932
 */
2933
#define PIC_IRQS	(1UL << PIC_CASCADE_IR)
L
Linus Torvalds 已提交
2934 2935 2936

void __init setup_IO_APIC(void)
{
2937 2938 2939 2940

	/*
	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
	 */
2941
	io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
L
Linus Torvalds 已提交
2942

2943
	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
T
Thomas Gleixner 已提交
2944
	/*
2945 2946
         * Set up IO-APIC IRQ routing.
         */
2947 2948
	x86_init.mpparse.setup_ioapic_ids();

L
Linus Torvalds 已提交
2949 2950 2951
	sync_Arb_IDs();
	setup_IO_APIC_irqs();
	init_IO_APIC_traps();
2952
	if (legacy_pic->nr_legacy_irqs)
2953
		check_timer();
L
Linus Torvalds 已提交
2954 2955 2956
}

/*
2957 2958
 *      Called after all the initialization is done. If we didnt find any
 *      APIC bugs then we can allow the modify fast path
L
Linus Torvalds 已提交
2959
 */
2960

L
Linus Torvalds 已提交
2961 2962
static int __init io_apic_bug_finalize(void)
{
T
Thomas Gleixner 已提交
2963 2964 2965
	if (sis_apic_bug == -1)
		sis_apic_bug = 0;
	return 0;
L
Linus Torvalds 已提交
2966 2967 2968 2969 2970 2971 2972 2973
}

late_initcall(io_apic_bug_finalize);

struct sysfs_ioapic_data {
	struct sys_device dev;
	struct IO_APIC_route_entry entry[0];
};
2974
static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
L
Linus Torvalds 已提交
2975

2976
static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
L
Linus Torvalds 已提交
2977 2978 2979 2980
{
	struct IO_APIC_route_entry *entry;
	struct sysfs_ioapic_data *data;
	int i;
2981

L
Linus Torvalds 已提交
2982 2983
	data = container_of(dev, struct sysfs_ioapic_data, dev);
	entry = data->entry;
2984 2985
	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
		*entry = ioapic_read_entry(dev->id, i);
L
Linus Torvalds 已提交
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996

	return 0;
}

static int ioapic_resume(struct sys_device *dev)
{
	struct IO_APIC_route_entry *entry;
	struct sysfs_ioapic_data *data;
	unsigned long flags;
	union IO_APIC_reg_00 reg_00;
	int i;
2997

L
Linus Torvalds 已提交
2998 2999 3000
	data = container_of(dev, struct sysfs_ioapic_data, dev);
	entry = data->entry;

3001
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3002
	reg_00.raw = io_apic_read(dev->id, 0);
3003 3004
	if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
		reg_00.bits.ID = mp_ioapics[dev->id].apicid;
L
Linus Torvalds 已提交
3005 3006
		io_apic_write(dev->id, 0, reg_00.raw);
	}
3007
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3008
	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3009
		ioapic_write_entry(dev->id, i, entry[i]);
L
Linus Torvalds 已提交
3010 3011 3012 3013 3014

	return 0;
}

static struct sysdev_class ioapic_sysdev_class = {
3015
	.name = "ioapic",
L
Linus Torvalds 已提交
3016 3017 3018 3019 3020 3021
	.suspend = ioapic_suspend,
	.resume = ioapic_resume,
};

static int __init ioapic_init_sysfs(void)
{
3022 3023
	struct sys_device * dev;
	int i, size, error;
L
Linus Torvalds 已提交
3024 3025 3026 3027 3028

	error = sysdev_class_register(&ioapic_sysdev_class);
	if (error)
		return error;

3029
	for (i = 0; i < nr_ioapics; i++ ) {
3030
		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
L
Linus Torvalds 已提交
3031
			* sizeof(struct IO_APIC_route_entry);
3032
		mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
L
Linus Torvalds 已提交
3033 3034 3035 3036 3037
		if (!mp_ioapic_data[i]) {
			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
			continue;
		}
		dev = &mp_ioapic_data[i]->dev;
3038
		dev->id = i;
L
Linus Torvalds 已提交
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
		dev->cls = &ioapic_sysdev_class;
		error = sysdev_register(dev);
		if (error) {
			kfree(mp_ioapic_data[i]);
			mp_ioapic_data[i] = NULL;
			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
			continue;
		}
	}

	return 0;
}

device_initcall(ioapic_init_sysfs);

3054
/*
3055
 * Dynamic irq allocate and deallocation
3056
 */
3057
unsigned int create_irq_nr(unsigned int from, int node)
3058
{
3059
	struct irq_cfg *cfg;
3060
	unsigned long flags;
3061 3062
	unsigned int ret = 0;
	int irq;
3063

3064 3065
	if (from < nr_irqs_gsi)
		from = nr_irqs_gsi;
3066

3067 3068 3069 3070 3071 3072 3073
	irq = alloc_irq_from(from, node);
	if (irq < 0)
		return 0;
	cfg = alloc_irq_cfg(irq, node);
	if (!cfg) {
		free_irq_at(irq, NULL);
		return 0;
3074
	}
3075

3076 3077 3078 3079
	raw_spin_lock_irqsave(&vector_lock, flags);
	if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
		ret = irq;
	raw_spin_unlock_irqrestore(&vector_lock, flags);
3080

3081 3082 3083 3084 3085 3086 3087
	if (ret) {
		set_irq_chip_data(irq, cfg);
		irq_clear_status_flags(irq, IRQ_NOREQUEST);
	} else {
		free_irq_at(irq, cfg);
	}
	return ret;
3088 3089
}

Y
Yinghai Lu 已提交
3090 3091
int create_irq(void)
{
3092
	int node = cpu_to_node(0);
3093
	unsigned int irq_want;
3094 3095
	int irq;

3096
	irq_want = nr_irqs_gsi;
3097
	irq = create_irq_nr(irq_want, node);
3098 3099 3100 3101 3102

	if (irq == 0)
		irq = -1;

	return irq;
Y
Yinghai Lu 已提交
3103 3104
}

3105 3106
void destroy_irq(unsigned int irq)
{
3107
	struct irq_cfg *cfg = get_irq_chip_data(irq);
3108 3109
	unsigned long flags;

3110
	irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3111

3112
	free_irte(irq);
3113
	raw_spin_lock_irqsave(&vector_lock, flags);
3114
	__clear_irq_vector(irq, cfg);
3115
	raw_spin_unlock_irqrestore(&vector_lock, flags);
3116
	free_irq_at(irq, cfg);
3117 3118
}

3119
/*
S
Simon Arlott 已提交
3120
 * MSI message composition
3121 3122
 */
#ifdef CONFIG_PCI_MSI
3123 3124
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
			   struct msi_msg *msg, u8 hpet_id)
3125
{
3126 3127
	struct irq_cfg *cfg;
	int err;
3128 3129
	unsigned dest;

J
Jan Beulich 已提交
3130 3131 3132
	if (disable_apic)
		return -ENXIO;

Y
Yinghai Lu 已提交
3133
	cfg = irq_cfg(irq);
3134
	err = assign_irq_vector(irq, cfg, apic->target_cpus());
3135 3136
	if (err)
		return err;
3137

3138
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3139

3140
	if (irq_remapped(get_irq_chip_data(irq))) {
3141 3142 3143 3144 3145 3146 3147
		struct irte irte;
		int ir_index;
		u16 sub_handle;

		ir_index = map_irq_to_irte_handle(irq, &sub_handle);
		BUG_ON(ir_index == -1);

3148
		prepare_irte(&irte, cfg->vector, dest);
3149

3150
		/* Set source-id of interrupt request */
3151 3152 3153 3154
		if (pdev)
			set_msi_sid(&irte, pdev);
		else
			set_hpet_sid(&irte, hpet_id);
3155

3156 3157 3158 3159 3160 3161 3162 3163
		modify_irte(irq, &irte);

		msg->address_hi = MSI_ADDR_BASE_HI;
		msg->data = sub_handle;
		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
				  MSI_ADDR_IR_SHV |
				  MSI_ADDR_IR_INDEX1(ir_index) |
				  MSI_ADDR_IR_INDEX2(ir_index);
3164
	} else {
3165 3166 3167 3168 3169 3170
		if (x2apic_enabled())
			msg->address_hi = MSI_ADDR_BASE_HI |
					  MSI_ADDR_EXT_DEST_ID(dest);
		else
			msg->address_hi = MSI_ADDR_BASE_HI;

3171 3172
		msg->address_lo =
			MSI_ADDR_BASE_LO |
3173
			((apic->irq_dest_mode == 0) ?
3174 3175
				MSI_ADDR_DEST_MODE_PHYSICAL:
				MSI_ADDR_DEST_MODE_LOGICAL) |
3176
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3177 3178 3179
				MSI_ADDR_REDIRECTION_CPU:
				MSI_ADDR_REDIRECTION_LOWPRI) |
			MSI_ADDR_DEST_ID(dest);
3180

3181 3182 3183
		msg->data =
			MSI_DATA_TRIGGER_EDGE |
			MSI_DATA_LEVEL_ASSERT |
3184
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3185 3186 3187 3188
				MSI_DATA_DELIVERY_FIXED:
				MSI_DATA_DELIVERY_LOWPRI) |
			MSI_DATA_VECTOR(cfg->vector);
	}
3189
	return err;
3190 3191
}

3192
#ifdef CONFIG_SMP
3193 3194
static int
msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3195
{
3196
	struct irq_cfg *cfg = data->chip_data;
3197 3198 3199
	struct msi_msg msg;
	unsigned int dest;

3200
	if (__ioapic_set_affinity(data, mask, &dest))
3201
		return -1;
3202

3203
	__get_cached_msi_msg(data->msi_desc, &msg);
3204 3205

	msg.data &= ~MSI_DATA_VECTOR_MASK;
3206
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
3207 3208 3209
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

3210
	__write_msi_msg(data->msi_desc, &msg);
3211 3212

	return 0;
3213
}
3214 3215 3216 3217 3218
#ifdef CONFIG_INTR_REMAP
/*
 * Migrate the MSI irq to another cpumask. This migration is
 * done in the process context using interrupt-remapping hardware.
 */
3219
static int
3220 3221
ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
		    bool force)
3222
{
3223 3224
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
3225 3226 3227
	struct irte irte;

	if (get_irte(irq, &irte))
3228
		return -1;
3229

3230
	if (__ioapic_set_affinity(data, mask, &dest))
3231
		return -1;
3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * atomically update the IRTE with the new destination and vector.
	 */
	modify_irte(irq, &irte);

	/*
	 * After this point, all the interrupts will start arriving
	 * at the new destination. So, time to cleanup the previous
	 * vector allocation.
	 */
3246 3247
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);
3248 3249

	return 0;
3250
}
Y
Yinghai Lu 已提交
3251

3252
#endif
3253
#endif /* CONFIG_SMP */
3254

3255 3256 3257 3258 3259
/*
 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
 * which implement the MSI or MSI-X Capability Structure.
 */
static struct irq_chip msi_chip = {
3260 3261 3262 3263
	.name			= "PCI-MSI",
	.irq_unmask		= unmask_msi_irq,
	.irq_mask		= mask_msi_irq,
	.irq_ack		= ack_apic_edge,
3264
#ifdef CONFIG_SMP
3265
	.irq_set_affinity	= msi_set_affinity,
3266
#endif
3267
	.irq_retrigger		= ioapic_retrigger_irq,
3268 3269
};

3270
static struct irq_chip msi_ir_chip = {
3271 3272 3273
	.name			= "IR-PCI-MSI",
	.irq_unmask		= unmask_msi_irq,
	.irq_mask		= mask_msi_irq,
3274
#ifdef CONFIG_INTR_REMAP
3275
	.irq_ack		= ir_ack_apic_edge,
3276
#ifdef CONFIG_SMP
3277
	.irq_set_affinity	= ir_msi_set_affinity,
3278
#endif
3279
#endif
3280
	.irq_retrigger		= ioapic_retrigger_irq,
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303
};

/*
 * Map the PCI dev to the corresponding remapping hardware unit
 * and allocate 'nvec' consecutive interrupt-remapping table entries
 * in it.
 */
static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
{
	struct intel_iommu *iommu;
	int index;

	iommu = map_dev_to_ir(dev);
	if (!iommu) {
		printk(KERN_ERR
		       "Unable to map PCI %s to iommu\n", pci_name(dev));
		return -ENOENT;
	}

	index = alloc_irte(iommu, irq, nvec);
	if (index < 0) {
		printk(KERN_ERR
		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
T
Thomas Gleixner 已提交
3304
		       pci_name(dev));
3305 3306 3307 3308
		return -ENOSPC;
	}
	return index;
}
3309

Y
Yinghai Lu 已提交
3310
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3311 3312
{
	struct msi_msg msg;
3313
	int ret;
3314

3315
	ret = msi_compose_msg(dev, irq, &msg, -1);
3316 3317 3318
	if (ret < 0)
		return ret;

Y
Yinghai Lu 已提交
3319
	set_irq_msi(irq, msidesc);
3320 3321
	write_msi_msg(irq, &msg);

3322
	if (irq_remapped(get_irq_chip_data(irq))) {
3323
		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3324 3325 3326
		set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
	} else
		set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3327

Y
Yinghai Lu 已提交
3328 3329
	dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);

3330 3331 3332
	return 0;
}

3333 3334
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
3335 3336
	int node, ret, sub_handle, index = 0;
	unsigned int irq, irq_want;
3337
	struct msi_desc *msidesc;
3338
	struct intel_iommu *iommu = NULL;
3339

3340 3341 3342 3343
	/* x86 doesn't support multiple MSI yet */
	if (type == PCI_CAP_ID_MSI && nvec > 1)
		return 1;

3344
	node = dev_to_node(&dev->dev);
3345
	irq_want = nr_irqs_gsi;
3346
	sub_handle = 0;
3347
	list_for_each_entry(msidesc, &dev->msi_list, list) {
3348
		irq = create_irq_nr(irq_want, node);
3349 3350
		if (irq == 0)
			return -1;
Y
Yinghai Lu 已提交
3351
		irq_want = irq + 1;
3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
		if (!intr_remapping_enabled)
			goto no_ir;

		if (!sub_handle) {
			/*
			 * allocate the consecutive block of IRTE's
			 * for 'nvec'
			 */
			index = msi_alloc_irte(dev, irq, nvec);
			if (index < 0) {
				ret = index;
				goto error;
			}
		} else {
			iommu = map_dev_to_ir(dev);
			if (!iommu) {
				ret = -ENOENT;
				goto error;
			}
			/*
			 * setup the mapping between the irq and the IRTE
			 * base index, the sub_handle pointing to the
			 * appropriate interrupt remap table entry.
			 */
			set_irte_irq(irq, iommu, index, sub_handle);
		}
no_ir:
3379
		ret = setup_msi_irq(dev, msidesc, irq);
3380 3381 3382 3383 3384
		if (ret < 0)
			goto error;
		sub_handle++;
	}
	return 0;
3385 3386

error:
3387 3388
	destroy_irq(irq);
	return ret;
3389 3390
}

3391 3392
void arch_teardown_msi_irq(unsigned int irq)
{
3393
	destroy_irq(irq);
3394 3395
}

3396
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3397
#ifdef CONFIG_SMP
3398 3399 3400
static int
dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
		      bool force)
3401
{
3402 3403
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
3404 3405
	struct msi_msg msg;

3406
	if (__ioapic_set_affinity(data, mask, &dest))
3407
		return -1;
3408 3409 3410 3411 3412 3413 3414 3415 3416

	dmar_msi_read(irq, &msg);

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

	dmar_msi_write(irq, &msg);
3417 3418

	return 0;
3419
}
Y
Yinghai Lu 已提交
3420

3421 3422
#endif /* CONFIG_SMP */

3423
static struct irq_chip dmar_msi_type = {
3424 3425 3426 3427
	.name			= "DMAR_MSI",
	.irq_unmask		= dmar_msi_unmask,
	.irq_mask		= dmar_msi_mask,
	.irq_ack		= ack_apic_edge,
3428
#ifdef CONFIG_SMP
3429
	.irq_set_affinity	= dmar_msi_set_affinity,
3430
#endif
3431
	.irq_retrigger		= ioapic_retrigger_irq,
3432 3433 3434 3435 3436 3437
};

int arch_setup_dmar_msi(unsigned int irq)
{
	int ret;
	struct msi_msg msg;
3438

3439
	ret = msi_compose_msg(NULL, irq, &msg, -1);
3440 3441 3442 3443 3444 3445 3446 3447 3448
	if (ret < 0)
		return ret;
	dmar_msi_write(irq, &msg);
	set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
		"edge");
	return 0;
}
#endif

3449 3450 3451
#ifdef CONFIG_HPET_TIMER

#ifdef CONFIG_SMP
3452 3453
static int hpet_msi_set_affinity(struct irq_data *data,
				 const struct cpumask *mask, bool force)
3454
{
3455
	struct irq_cfg *cfg = data->chip_data;
3456 3457 3458
	struct msi_msg msg;
	unsigned int dest;

3459
	if (__ioapic_set_affinity(data, mask, &dest))
3460
		return -1;
3461

3462
	hpet_msi_read(data->handler_data, &msg);
3463 3464 3465 3466 3467 3468

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

3469
	hpet_msi_write(data->handler_data, &msg);
3470 3471

	return 0;
3472
}
Y
Yinghai Lu 已提交
3473

3474 3475
#endif /* CONFIG_SMP */

3476
static struct irq_chip ir_hpet_msi_type = {
3477 3478 3479
	.name			= "IR-HPET_MSI",
	.irq_unmask		= hpet_msi_unmask,
	.irq_mask		= hpet_msi_mask,
3480
#ifdef CONFIG_INTR_REMAP
3481
	.irq_ack		= ir_ack_apic_edge,
3482
#ifdef CONFIG_SMP
3483
	.irq_set_affinity	= ir_msi_set_affinity,
3484 3485
#endif
#endif
3486
	.irq_retrigger		= ioapic_retrigger_irq,
3487 3488
};

3489
static struct irq_chip hpet_msi_type = {
3490
	.name = "HPET_MSI",
3491 3492
	.irq_unmask = hpet_msi_unmask,
	.irq_mask = hpet_msi_mask,
3493
	.irq_ack = ack_apic_edge,
3494
#ifdef CONFIG_SMP
3495
	.irq_set_affinity = hpet_msi_set_affinity,
3496
#endif
3497
	.irq_retrigger = ioapic_retrigger_irq,
3498 3499
};

3500
int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3501 3502
{
	struct msi_msg msg;
3503
	int ret;
3504

3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
	if (intr_remapping_enabled) {
		struct intel_iommu *iommu = map_hpet_to_ir(id);
		int index;

		if (!iommu)
			return -1;

		index = alloc_irte(iommu, irq, 1);
		if (index < 0)
			return -1;
	}

	ret = msi_compose_msg(NULL, irq, &msg, id);
3518 3519 3520
	if (ret < 0)
		return ret;

3521
	hpet_msi_write(get_irq_data(irq), &msg);
3522
	irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3523
	if (irq_remapped(get_irq_chip_data(irq)))
3524 3525 3526 3527 3528
		set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
					      handle_edge_irq, "edge");
	else
		set_irq_chip_and_handler_name(irq, &hpet_msi_type,
					      handle_edge_irq, "edge");
Y
Yinghai Lu 已提交
3529

3530 3531 3532 3533
	return 0;
}
#endif

3534
#endif /* CONFIG_PCI_MSI */
3535 3536 3537 3538 3539 3540 3541
/*
 * Hypertransport interrupt support
 */
#ifdef CONFIG_HT_IRQ

#ifdef CONFIG_SMP

3542
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3543
{
3544 3545
	struct ht_irq_msg msg;
	fetch_ht_irq_msg(irq, &msg);
3546

3547
	msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3548
	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3549

3550
	msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3551
	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3552

3553
	write_ht_irq_msg(irq, &msg);
3554 3555
}

3556 3557
static int
ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3558
{
3559
	struct irq_cfg *cfg = data->chip_data;
3560 3561
	unsigned int dest;

3562
	if (__ioapic_set_affinity(data, mask, &dest))
3563
		return -1;
3564

3565
	target_ht_irq(data->irq, dest, cfg->vector);
3566
	return 0;
3567
}
Y
Yinghai Lu 已提交
3568

3569 3570
#endif

3571
static struct irq_chip ht_irq_chip = {
3572 3573 3574 3575
	.name			= "PCI-HT",
	.irq_mask		= mask_ht_irq,
	.irq_unmask		= unmask_ht_irq,
	.irq_ack		= ack_apic_edge,
3576
#ifdef CONFIG_SMP
3577
	.irq_set_affinity	= ht_set_affinity,
3578
#endif
3579
	.irq_retrigger		= ioapic_retrigger_irq,
3580 3581 3582 3583
};

int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
3584 3585
	struct irq_cfg *cfg;
	int err;
3586

J
Jan Beulich 已提交
3587 3588 3589
	if (disable_apic)
		return -ENXIO;

Y
Yinghai Lu 已提交
3590
	cfg = irq_cfg(irq);
3591
	err = assign_irq_vector(irq, cfg, apic->target_cpus());
3592
	if (!err) {
3593
		struct ht_irq_msg msg;
3594 3595
		unsigned dest;

3596 3597
		dest = apic->cpu_mask_to_apicid_and(cfg->domain,
						    apic->target_cpus());
3598

3599
		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3600

3601 3602
		msg.address_lo =
			HT_IRQ_LOW_BASE |
3603
			HT_IRQ_LOW_DEST_ID(dest) |
3604
			HT_IRQ_LOW_VECTOR(cfg->vector) |
3605
			((apic->irq_dest_mode == 0) ?
3606 3607 3608
				HT_IRQ_LOW_DM_PHYSICAL :
				HT_IRQ_LOW_DM_LOGICAL) |
			HT_IRQ_LOW_RQEOI_EDGE |
3609
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3610 3611 3612 3613
				HT_IRQ_LOW_MT_FIXED :
				HT_IRQ_LOW_MT_ARBITRATED) |
			HT_IRQ_LOW_IRQ_MASKED;

3614
		write_ht_irq_msg(irq, &msg);
3615

3616 3617
		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
					      handle_edge_irq, "edge");
Y
Yinghai Lu 已提交
3618 3619

		dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3620
	}
3621
	return err;
3622 3623 3624
}
#endif /* CONFIG_HT_IRQ */

3625 3626 3627 3628 3629
int __init io_apic_get_redir_entries (int ioapic)
{
	union IO_APIC_reg_01	reg_01;
	unsigned long flags;

3630
	raw_spin_lock_irqsave(&ioapic_lock, flags);
3631
	reg_01.raw = io_apic_read(ioapic, 1);
3632
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3633

3634 3635 3636 3637 3638
	/* The register returns the maximum index redir index
	 * supported, which is one less than the total number of redir
	 * entries.
	 */
	return reg_01.bits.entries + 1;
3639 3640
}

3641
void __init probe_nr_irqs_gsi(void)
3642
{
3643
	int nr;
3644

3645
	nr = gsi_top + NR_IRQS_LEGACY;
3646
	if (nr > nr_irqs_gsi)
3647
		nr_irqs_gsi = nr;
3648 3649

	printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3650 3651
}

Y
Yinghai Lu 已提交
3652 3653 3654 3655 3656
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
	int nr;

Y
Yinghai Lu 已提交
3657 3658
	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
		nr_irqs = NR_VECTORS * nr_cpu_ids;
Y
Yinghai Lu 已提交
3659

Y
Yinghai Lu 已提交
3660 3661 3662 3663 3664 3665 3666 3667
	nr = nr_irqs_gsi + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
	/*
	 * for MSI and HT dyn irq
	 */
	nr += nr_irqs_gsi * 16;
#endif
	if (nr < nr_irqs)
Y
Yinghai Lu 已提交
3668 3669
		nr_irqs = nr;

3670
	return NR_IRQS_LEGACY;
Y
Yinghai Lu 已提交
3671 3672 3673
}
#endif

3674 3675
static int __io_apic_set_pci_routing(struct device *dev, int irq,
				struct io_apic_irq_attr *irq_attr)
3676 3677 3678
{
	struct irq_cfg *cfg;
	int node;
3679 3680
	int ioapic, pin;
	int trigger, polarity;
3681

3682
	ioapic = irq_attr->ioapic;
3683 3684 3685 3686 3687 3688 3689 3690 3691
	if (!IO_APIC_IRQ(irq)) {
		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
			ioapic);
		return -EINVAL;
	}

	if (dev)
		node = dev_to_node(dev);
	else
3692
		node = cpu_to_node(0);
3693

3694 3695
	cfg = alloc_irq_and_cfg_at(irq, node);
	if (!cfg)
3696 3697
		return 0;

3698 3699 3700 3701
	pin = irq_attr->ioapic_pin;
	trigger = irq_attr->trigger;
	polarity = irq_attr->polarity;

3702 3703 3704
	/*
	 * IRQs < 16 are already in the irq_2_pin[] map
	 */
3705
	if (irq >= legacy_pic->nr_legacy_irqs) {
T
Thomas Gleixner 已提交
3706
		if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
3707 3708 3709 3710
			printk(KERN_INFO "can not add pin %d for irq %d\n",
				pin, irq);
			return 0;
		}
3711 3712
	}

3713
	setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
3714 3715 3716 3717

	return 0;
}

3718 3719
int io_apic_set_pci_routing(struct device *dev, int irq,
				struct io_apic_irq_attr *irq_attr)
3720
{
3721
	int ioapic, pin;
3722 3723 3724 3725 3726
	/*
	 * Avoid pin reprogramming.  PRTs typically include entries
	 * with redundant pin->gsi mappings (but unique PCI devices);
	 * we only program the IOAPIC on the first.
	 */
3727 3728
	ioapic = irq_attr->ioapic;
	pin = irq_attr->ioapic_pin;
3729 3730 3731 3732 3733 3734 3735
	if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
		pr_debug("Pin %d-%d already programmed\n",
			 mp_ioapics[ioapic].apicid, pin);
		return 0;
	}
	set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);

3736
	return __io_apic_set_pci_routing(dev, irq, irq_attr);
3737 3738
}

3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749
u8 __init io_apic_unique_id(u8 id)
{
#ifdef CONFIG_X86_32
	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
	    !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
		return io_apic_get_unique_id(nr_ioapics, id);
	else
		return id;
#else
	int i;
	DECLARE_BITMAP(used, 256);
L
Linus Torvalds 已提交
3750

3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
	bitmap_zero(used, 256);
	for (i = 0; i < nr_ioapics; i++) {
		struct mpc_ioapic *ia = &mp_ioapics[i];
		__set_bit(ia->apicid, used);
	}
	if (!test_bit(id, used))
		return id;
	return find_first_zero_bit(used, 256);
#endif
}
L
Linus Torvalds 已提交
3761

3762
#ifdef CONFIG_X86_32
3763
int __init io_apic_get_unique_id(int ioapic, int apic_id)
L
Linus Torvalds 已提交
3764 3765 3766 3767 3768 3769 3770 3771
{
	union IO_APIC_reg_00 reg_00;
	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
	physid_mask_t tmp;
	unsigned long flags;
	int i = 0;

	/*
3772 3773
	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
L
Linus Torvalds 已提交
3774
	 * supports up to 16 on one shared APIC bus.
3775
	 *
L
Linus Torvalds 已提交
3776 3777 3778 3779 3780
	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
	 *      advantage of new APIC bus architecture.
	 */

	if (physids_empty(apic_id_map))
3781
		apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
L
Linus Torvalds 已提交
3782

3783
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3784
	reg_00.raw = io_apic_read(ioapic, 0);
3785
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3786 3787 3788 3789 3790 3791 3792 3793

	if (apic_id >= get_physical_broadcast()) {
		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
			"%d\n", ioapic, apic_id, reg_00.bits.ID);
		apic_id = reg_00.bits.ID;
	}

	/*
3794
	 * Every APIC in a system must have a unique ID or we get lots of nice
L
Linus Torvalds 已提交
3795 3796
	 * 'stuck on smp_invalidate_needed IPI wait' messages.
	 */
3797
	if (apic->check_apicid_used(&apic_id_map, apic_id)) {
L
Linus Torvalds 已提交
3798 3799

		for (i = 0; i < get_physical_broadcast(); i++) {
3800
			if (!apic->check_apicid_used(&apic_id_map, i))
L
Linus Torvalds 已提交
3801 3802 3803 3804 3805 3806 3807 3808 3809 3810
				break;
		}

		if (i == get_physical_broadcast())
			panic("Max apic_id exceeded!\n");

		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
			"trying %d\n", ioapic, apic_id, i);

		apic_id = i;
3811
	}
L
Linus Torvalds 已提交
3812

3813
	apic->apicid_to_cpu_present(apic_id, &tmp);
L
Linus Torvalds 已提交
3814 3815 3816 3817 3818
	physids_or(apic_id_map, apic_id_map, tmp);

	if (reg_00.bits.ID != apic_id) {
		reg_00.bits.ID = apic_id;

3819
		raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3820 3821
		io_apic_write(ioapic, 0, reg_00.raw);
		reg_00.raw = io_apic_read(ioapic, 0);
3822
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3823 3824

		/* Sanity check */
3825 3826 3827 3828
		if (reg_00.bits.ID != apic_id) {
			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
			return -1;
		}
L
Linus Torvalds 已提交
3829 3830 3831 3832 3833 3834 3835
	}

	apic_printk(APIC_VERBOSE, KERN_INFO
			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);

	return apic_id;
}
3836
#endif
L
Linus Torvalds 已提交
3837

3838
int __init io_apic_get_version(int ioapic)
L
Linus Torvalds 已提交
3839 3840 3841 3842
{
	union IO_APIC_reg_01	reg_01;
	unsigned long flags;

3843
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3844
	reg_01.raw = io_apic_read(ioapic, 1);
3845
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3846 3847 3848 3849

	return reg_01.bits.version;
}

3850
int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3851
{
3852
	int ioapic, pin, idx;
3853 3854 3855 3856

	if (skip_ioapic_setup)
		return -1;

3857 3858
	ioapic = mp_find_ioapic(gsi);
	if (ioapic < 0)
3859 3860
		return -1;

3861 3862 3863 3864 3865 3866
	pin = mp_find_ioapic_pin(ioapic, gsi);
	if (pin < 0)
		return -1;

	idx = find_irq_entry(ioapic, pin, mp_INT);
	if (idx < 0)
3867 3868
		return -1;

3869 3870
	*trigger = irq_trigger(idx);
	*polarity = irq_polarity(idx);
3871 3872 3873
	return 0;
}

3874 3875 3876
/*
 * This function currently is only a helper for the i386 smp boot process where
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3877
 * so mask in all cases should simply be apic->target_cpus()
3878 3879 3880 3881
 */
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
E
Eric W. Biederman 已提交
3882
	int pin, ioapic, irq, irq_entry;
3883
	struct irq_desc *desc;
3884
	const struct cpumask *mask;
3885 3886 3887 3888

	if (skip_ioapic_setup == 1)
		return;

E
Eric W. Biederman 已提交
3889
	for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
3890 3891 3892 3893 3894
	for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
		irq_entry = find_irq_entry(ioapic, pin, mp_INT);
		if (irq_entry == -1)
			continue;
		irq = pin_2_irq(irq_entry, ioapic, pin);
3895

E
Eric W. Biederman 已提交
3896 3897 3898
		if ((ioapic > 0) && (irq > 16))
			continue;

3899
		desc = irq_to_desc(irq);
3900

3901 3902 3903 3904 3905
		/*
		 * Honour affinities which have been set in early boot
		 */
		if (desc->status &
		    (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3906
			mask = desc->irq_data.affinity;
3907 3908
		else
			mask = apic->target_cpus();
3909

3910
		if (intr_remapping_enabled)
3911
			ir_ioapic_set_affinity(&desc->irq_data, mask, false);
3912
		else
3913
			ioapic_set_affinity(&desc->irq_data, mask, false);
3914
	}
3915

3916 3917 3918
}
#endif

3919 3920 3921 3922
#define IOAPIC_RESOURCE_NAME_SIZE 11

static struct resource *ioapic_resources;

3923
static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
{
	unsigned long n;
	struct resource *res;
	char *mem;
	int i;

	if (nr_ioapics <= 0)
		return NULL;

	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
	n *= nr_ioapics;

	mem = alloc_bootmem(n);
	res = (void *)mem;

3939
	mem += sizeof(struct resource) * nr_ioapics;
3940

3941 3942 3943
	for (i = 0; i < nr_ioapics; i++) {
		res[i].name = mem;
		res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3944
		snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3945
		mem += IOAPIC_RESOURCE_NAME_SIZE;
3946 3947 3948 3949 3950 3951 3952
	}

	ioapic_resources = res;

	return res;
}

3953 3954 3955
void __init ioapic_init_mappings(void)
{
	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3956
	struct resource *ioapic_res;
T
Thomas Gleixner 已提交
3957
	int i;
3958

3959
	ioapic_res = ioapic_setup_resources(nr_ioapics);
3960 3961
	for (i = 0; i < nr_ioapics; i++) {
		if (smp_found_config) {
3962
			ioapic_phys = mp_ioapics[i].apicaddr;
3963
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
3964 3965 3966 3967 3968 3969 3970 3971 3972
			if (!ioapic_phys) {
				printk(KERN_ERR
				       "WARNING: bogus zero IO-APIC "
				       "address found in MPTABLE, "
				       "disabling IO/APIC support!\n");
				smp_found_config = 0;
				skip_ioapic_setup = 1;
				goto fake_ioapic_page;
			}
3973
#endif
3974
		} else {
3975
#ifdef CONFIG_X86_32
3976
fake_ioapic_page:
3977
#endif
3978
			ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3979 3980 3981
			ioapic_phys = __pa(ioapic_phys);
		}
		set_fixmap_nocache(idx, ioapic_phys);
3982 3983 3984
		apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
			__fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
			ioapic_phys);
3985
		idx++;
3986

3987
		ioapic_res->start = ioapic_phys;
3988
		ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3989
		ioapic_res++;
3990 3991 3992
	}
}

3993
void __init ioapic_insert_resources(void)
3994 3995 3996 3997 3998
{
	int i;
	struct resource *r = ioapic_resources;

	if (!r) {
3999
		if (nr_ioapics > 0)
4000 4001
			printk(KERN_ERR
				"IO APIC resources couldn't be allocated.\n");
4002
		return;
4003 4004 4005 4006 4007 4008 4009
	}

	for (i = 0; i < nr_ioapics; i++) {
		insert_resource(&iomem_resource, r);
		r++;
	}
}
4010

4011
int mp_find_ioapic(u32 gsi)
4012 4013 4014 4015 4016 4017 4018 4019 4020
{
	int i = 0;

	/* Find the IOAPIC that manages this GSI. */
	for (i = 0; i < nr_ioapics; i++) {
		if ((gsi >= mp_gsi_routing[i].gsi_base)
		    && (gsi <= mp_gsi_routing[i].gsi_end))
			return i;
	}
4021

4022 4023 4024 4025
	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
	return -1;
}

4026
int mp_find_ioapic_pin(int ioapic, u32 gsi)
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047
{
	if (WARN_ON(ioapic == -1))
		return -1;
	if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
		return -1;

	return gsi - mp_gsi_routing[ioapic].gsi_base;
}

static int bad_ioapic(unsigned long address)
{
	if (nr_ioapics >= MAX_IO_APICS) {
		printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
		       "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
		return 1;
	}
	if (!address) {
		printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
		       " found in table, skipping!\n");
		return 1;
	}
4048 4049 4050
	return 0;
}

4051 4052 4053
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
{
	int idx = 0;
4054
	int entries;
4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072

	if (bad_ioapic(address))
		return;

	idx = nr_ioapics;

	mp_ioapics[idx].type = MP_IOAPIC;
	mp_ioapics[idx].flags = MPC_APIC_USABLE;
	mp_ioapics[idx].apicaddr = address;

	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
	mp_ioapics[idx].apicid = io_apic_unique_id(id);
	mp_ioapics[idx].apicver = io_apic_get_version(idx);

	/*
	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
	 */
4073
	entries = io_apic_get_redir_entries(idx);
4074
	mp_gsi_routing[idx].gsi_base = gsi_base;
4075 4076 4077 4078 4079 4080
	mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;

	/*
	 * The number of IO-APIC IRQ registers (== #pins):
	 */
	nr_ioapic_registers[idx] = entries;
4081

4082 4083
	if (mp_gsi_routing[idx].gsi_end >= gsi_top)
		gsi_top = mp_gsi_routing[idx].gsi_end + 1;
4084 4085 4086 4087 4088 4089 4090 4091

	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
	       "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
	       mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
	       mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);

	nr_ioapics++;
}
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101

/* Enable IOAPIC early just for system timer */
void __init pre_init_apic_IRQ0(void)
{
	struct irq_cfg *cfg;

	printk(KERN_INFO "Early APIC setup for system timer0\n");
#ifndef CONFIG_SMP
	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
#endif
4102 4103
	/* Make sure the irq descriptor is set up */
	cfg = alloc_irq_and_cfg_at(0, 0);
4104 4105 4106 4107 4108 4109

	setup_local_APIC();

	add_pin_to_irq_node(cfg, 0, 0, 0);
	set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");

4110
	setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
4111
}