io_apic.c 99.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *	Intel IO-APIC support for multi-Pentium hosts.
 *
I
Ingo Molnar 已提交
4
 *	Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 *	Many thanks to Stig Venaas for trying out countless experimental
 *	patches and reporting/debugging problems patiently!
 *
 *	(c) 1999, Multiple IO-APIC support, developed by
 *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
 *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
 *	further tested and cleaned up by Zach Brown <zab@redhat.com>
 *	and Ingo Molnar <mingo@redhat.com>
 *
 *	Fixes
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
 *					thanks to Eric Gilmore
 *					and Rolf G. Tews
 *					for testing these extensively
 *	Paul Diefenbaugh	:	Added full ACPI support
 */

#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
28
#include <linux/pci.h>
L
Linus Torvalds 已提交
29 30 31
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
32
#include <linux/module.h>
L
Linus Torvalds 已提交
33
#include <linux/sysdev.h>
34
#include <linux/msi.h>
35
#include <linux/htirq.h>
36
#include <linux/freezer.h>
37
#include <linux/kthread.h>
38
#include <linux/jiffies.h>	/* time_after() */
39
#include <linux/slab.h>
40 41 42 43 44
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
#include <linux/bootmem.h>
#include <linux/dmar.h>
45
#include <linux/hpet.h>
46

47
#include <asm/idle.h>
L
Linus Torvalds 已提交
48 49
#include <asm/io.h>
#include <asm/smp.h>
50
#include <asm/cpu.h>
L
Linus Torvalds 已提交
51
#include <asm/desc.h>
52 53 54
#include <asm/proto.h>
#include <asm/acpi.h>
#include <asm/dma.h>
L
Linus Torvalds 已提交
55
#include <asm/timer.h>
56
#include <asm/i8259.h>
57
#include <asm/nmi.h>
58
#include <asm/msidef.h>
59
#include <asm/hypertransport.h>
60
#include <asm/setup.h>
61
#include <asm/irq_remapping.h>
62
#include <asm/hpet.h>
63
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
64

I
Ingo Molnar 已提交
65
#include <asm/apic.h>
L
Linus Torvalds 已提交
66

67
#define __apicdebuginit(type) static type __init
68 69
#define for_each_irq_pin(entry, head) \
	for (entry = head; entry; entry = entry->next)
70

L
Linus Torvalds 已提交
71
/*
72 73
 *      Is the SiS APIC rmw bug present ?
 *      -1 = don't know, 0 = no, 1 = yes
L
Linus Torvalds 已提交
74 75 76
 */
int sis_apic_bug = -1;

77 78
static DEFINE_RAW_SPINLOCK(ioapic_lock);
static DEFINE_RAW_SPINLOCK(vector_lock);
Y
Yinghai Lu 已提交
79

L
Linus Torvalds 已提交
80 81 82 83 84
/*
 * # of IRQ routing registers
 */
int nr_ioapic_registers[MAX_IO_APICS];

85
/* I/O APIC entries */
86
struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
87 88
int nr_ioapics;

89 90 91
/* IO APIC gsi routing info */
struct mp_ioapic_gsi  mp_gsi_routing[MAX_IO_APICS];

92 93
/* The one past the highest gsi number used */
u32 gsi_top;
94

95
/* MP IRQ source entries */
96
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
97 98 99 100

/* # of MP IRQ source entries */
int mp_irq_entries;

101 102 103
/* GSI interrupts */
static int nr_irqs_gsi = NR_IRQS_LEGACY;

104 105 106 107 108 109
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
int mp_bus_id_to_type[MAX_MP_BUSSES];
#endif

DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);

Y
Yinghai Lu 已提交
110 111
int skip_ioapic_setup;

112 113 114 115 116 117 118 119 120
void arch_disable_smp_support(void)
{
#ifdef CONFIG_PCI
	noioapicquirk = 1;
	noioapicreroute = -1;
#endif
	skip_ioapic_setup = 1;
}

121
static int __init parse_noapic(char *str)
Y
Yinghai Lu 已提交
122 123
{
	/* disable IO-APIC */
124
	arch_disable_smp_support();
Y
Yinghai Lu 已提交
125 126 127
	return 0;
}
early_param("noapic", parse_noapic);
128

129 130 131 132 133
struct irq_pin_list {
	int apic, pin;
	struct irq_pin_list *next;
};

T
Thomas Gleixner 已提交
134
static struct irq_pin_list *alloc_irq_pin_list(int node)
135
{
T
Thomas Gleixner 已提交
136
	return kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
137 138
}

Y
Yinghai Lu 已提交
139
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
140
#ifdef CONFIG_SPARSE_IRQ
141
static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
142
#else
143
static struct irq_cfg irq_cfgx[NR_IRQS];
144
#endif
Y
Yinghai Lu 已提交
145

146
int __init arch_early_irq_init(void)
147
{
148
	struct irq_cfg *cfg;
149
	int count, node, i;
T
Thomas Gleixner 已提交
150

151 152 153 154 155
	if (!legacy_pic->nr_legacy_irqs) {
		nr_irqs_gsi = 0;
		io_apic_irqs = ~0UL;
	}

156 157
	cfg = irq_cfgx;
	count = ARRAY_SIZE(irq_cfgx);
158
	node = cpu_to_node(0);
159

160 161 162
	/* Make sure the legacy interrupts are marked in the bitmap */
	irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);

163
	for (i = 0; i < count; i++) {
164
		set_irq_chip_data(i, &cfg[i]);
165 166
		zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
		zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
167 168 169 170
		/*
		 * For legacy IRQ's, start with assigning irq0 to irq15 to
		 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
		 */
171
		if (i < legacy_pic->nr_legacy_irqs) {
172 173 174
			cfg[i].vector = IRQ0_VECTOR + i;
			cpumask_set_cpu(0, cfg[i].domain);
		}
175
	}
176 177

	return 0;
178
}
179

180
#ifdef CONFIG_SPARSE_IRQ
181
struct irq_cfg *irq_cfg(unsigned int irq)
182
{
183
	return get_irq_chip_data(irq);
184
}
T
Thomas Gleixner 已提交
185

186
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
187
{
188
	struct irq_cfg *cfg;
189

190
	cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
191 192 193 194 195 196
	if (!cfg)
		return NULL;
	if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node))
		goto out_cfg;
	if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_ATOMIC, node))
		goto out_domain;
197
	return cfg;
198 199 200 201 202
out_domain:
	free_cpumask_var(cfg->domain);
out_cfg:
	kfree(cfg);
	return NULL;
203 204
}

205
static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
206
{
207 208 209
	if (!cfg)
		return;
	set_irq_chip_data(at, NULL);
210 211 212 213 214
	free_cpumask_var(cfg->domain);
	free_cpumask_var(cfg->old_domain);
	kfree(cfg);
}

215
#else
216

217
struct irq_cfg *irq_cfg(unsigned int irq)
218 219
{
	return irq < nr_irqs ? irq_cfgx + irq : NULL;
220
}
L
Linus Torvalds 已提交
221

222
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
223 224 225 226
{
	return irq_cfgx + irq;
}

227
static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
228

229 230
#endif

231 232 233 234 235 236 237 238 239 240 241 242 243
static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
{
	int res = irq_alloc_desc_at(at, node);
	struct irq_cfg *cfg;

	if (res < 0) {
		if (res != -EEXIST)
			return NULL;
		cfg = get_irq_chip_data(at);
		if (cfg)
			return cfg;
	}

244
	cfg = alloc_irq_cfg(at, node);
245 246 247 248 249 250 251 252 253 254 255 256 257 258
	if (cfg)
		set_irq_chip_data(at, cfg);
	else
		irq_free_desc(at);
	return cfg;
}

static int alloc_irq_from(unsigned int from, int node)
{
	return irq_alloc_desc_from(from, node);
}

static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
{
259
	free_irq_cfg(at, cfg);
260 261 262
	irq_free_desc(at);
}

L
Linus Torvalds 已提交
263 264 265 266
struct io_apic {
	unsigned int index;
	unsigned int unused[3];
	unsigned int data;
267 268
	unsigned int unused2[11];
	unsigned int eoi;
L
Linus Torvalds 已提交
269 270 271 272 273
};

static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{
	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
274
		+ (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
L
Linus Torvalds 已提交
275 276
}

277 278 279 280 281 282
static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(vector, &io_apic->eoi);
}

L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(reg, &io_apic->index);
	return readl(&io_apic->data);
}

static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);
	writel(reg, &io_apic->index);
	writel(value, &io_apic->data);
}

/*
 * Re-write a value: to be used for read-modify-write
 * cycles where the read already set up the index register.
 *
 * Older SiS APIC requires we rewrite the index register
 */
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
305
	struct io_apic __iomem *io_apic = io_apic_base(apic);
T
Thomas Gleixner 已提交
306 307 308

	if (sis_apic_bug)
		writel(reg, &io_apic->index);
L
Linus Torvalds 已提交
309 310 311
	writel(value, &io_apic->data);
}

Y
Yinghai Lu 已提交
312
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
313 314 315 316
{
	struct irq_pin_list *entry;
	unsigned long flags;

317
	raw_spin_lock_irqsave(&ioapic_lock, flags);
318
	for_each_irq_pin(entry, cfg->irq_2_pin) {
319 320 321 322 323 324 325
		unsigned int reg;
		int pin;

		pin = entry->pin;
		reg = io_apic_read(entry->apic, 0x10 + pin*2);
		/* Is the remote IRR bit set? */
		if (reg & IO_APIC_REDIR_REMOTE_IRR) {
326
			raw_spin_unlock_irqrestore(&ioapic_lock, flags);
327 328 329
			return true;
		}
	}
330
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
331 332 333 334

	return false;
}

335 336 337 338 339 340 341 342 343
union entry_union {
	struct { u32 w1, w2; };
	struct IO_APIC_route_entry entry;
};

static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
	union entry_union eu;
	unsigned long flags;
344
	raw_spin_lock_irqsave(&ioapic_lock, flags);
345 346
	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
347
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
348 349 350
	return eu.entry;
}

351 352 353 354 355 356
/*
 * When we write a new IO APIC routing entry, we need to write the high
 * word first! If the mask bit in the low word is clear, we will enable
 * the interrupt, and we need to make sure the entry is fully populated
 * before that happens.
 */
357 358
static void
__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
359
{
360 361
	union entry_union eu = {{0, 0}};

362
	eu.entry = e;
363 364
	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
365 366
}

367
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
368 369
{
	unsigned long flags;
370
	raw_spin_lock_irqsave(&ioapic_lock, flags);
371
	__ioapic_write_entry(apic, pin, e);
372
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
373 374 375 376 377 378 379 380 381 382 383 384
}

/*
 * When we mask an IO APIC routing entry, we need to write the low
 * word first, in order to set the mask bit before we change the
 * high bits!
 */
static void ioapic_mask_entry(int apic, int pin)
{
	unsigned long flags;
	union entry_union eu = { .entry.mask = 1 };

385
	raw_spin_lock_irqsave(&ioapic_lock, flags);
386 387
	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
388
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
389 390
}

L
Linus Torvalds 已提交
391 392 393 394 395
/*
 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
 * shared ISA-space IRQs, so we have to support them. We are super
 * fast in the common case, and fast for shared ISA-space IRQs.
 */
396
static int
T
Thomas Gleixner 已提交
397
__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
L
Linus Torvalds 已提交
398
{
399
	struct irq_pin_list **last, *entry;
400

401 402 403
	/* don't allow duplicates */
	last = &cfg->irq_2_pin;
	for_each_irq_pin(entry, cfg->irq_2_pin) {
404
		if (entry->apic == apic && entry->pin == pin)
405
			return 0;
406
		last = &entry->next;
L
Linus Torvalds 已提交
407
	}
408

T
Thomas Gleixner 已提交
409
	entry = alloc_irq_pin_list(node);
410
	if (!entry) {
411 412 413
		printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
				node, apic, pin);
		return -ENOMEM;
414
	}
L
Linus Torvalds 已提交
415 416
	entry->apic = apic;
	entry->pin = pin;
417

418
	*last = entry;
419 420 421 422 423
	return 0;
}

static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
{
T
Thomas Gleixner 已提交
424
	if (__add_pin_to_irq_node(cfg, node, apic, pin))
425
		panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
L
Linus Torvalds 已提交
426 427 428 429 430
}

/*
 * Reroute an IRQ to a different pin.
 */
431
static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
432 433
					   int oldapic, int oldpin,
					   int newapic, int newpin)
L
Linus Torvalds 已提交
434
{
435
	struct irq_pin_list *entry;
L
Linus Torvalds 已提交
436

437
	for_each_irq_pin(entry, cfg->irq_2_pin) {
L
Linus Torvalds 已提交
438 439 440
		if (entry->apic == oldapic && entry->pin == oldpin) {
			entry->apic = newapic;
			entry->pin = newpin;
441
			/* every one is different, right? */
442
			return;
443
		}
L
Linus Torvalds 已提交
444
	}
445

446 447
	/* old apic/pin didn't exist, so just add new ones */
	add_pin_to_irq_node(cfg, node, newapic, newpin);
L
Linus Torvalds 已提交
448 449
}

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static void __io_apic_modify_irq(struct irq_pin_list *entry,
				 int mask_and, int mask_or,
				 void (*final)(struct irq_pin_list *entry))
{
	unsigned int reg, pin;

	pin = entry->pin;
	reg = io_apic_read(entry->apic, 0x10 + pin * 2);
	reg &= mask_and;
	reg |= mask_or;
	io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
	if (final)
		final(entry);
}

465 466 467
static void io_apic_modify_irq(struct irq_cfg *cfg,
			       int mask_and, int mask_or,
			       void (*final)(struct irq_pin_list *entry))
468 469
{
	struct irq_pin_list *entry;
470

471 472 473 474 475 476 477 478 479 480 481 482 483 484
	for_each_irq_pin(entry, cfg->irq_2_pin)
		__io_apic_modify_irq(entry, mask_and, mask_or, final);
}

static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
{
	__io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
			     IO_APIC_REDIR_MASKED, NULL);
}

static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
{
	__io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
			     IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
485
}
486

487
static void io_apic_sync(struct irq_pin_list *entry)
L
Linus Torvalds 已提交
488
{
489 490 491 492 493 494
	/*
	 * Synchronize the IO-APIC and the CPU by doing
	 * a dummy read from the IO-APIC
	 */
	struct io_apic __iomem *io_apic;
	io_apic = io_apic_base(entry->apic);
Y
Yinghai Lu 已提交
495
	readl(&io_apic->data);
L
Linus Torvalds 已提交
496 497
}

T
Thomas Gleixner 已提交
498
static void mask_ioapic(struct irq_cfg *cfg)
499
{
T
Thomas Gleixner 已提交
500 501 502
	unsigned long flags;

	raw_spin_lock_irqsave(&ioapic_lock, flags);
Y
Yinghai Lu 已提交
503
	io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
T
Thomas Gleixner 已提交
504
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
505
}
L
Linus Torvalds 已提交
506

507
static void mask_ioapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
508
{
509
	mask_ioapic(data->chip_data);
T
Thomas Gleixner 已提交
510
}
Y
Yinghai Lu 已提交
511

T
Thomas Gleixner 已提交
512 513 514
static void __unmask_ioapic(struct irq_cfg *cfg)
{
	io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
L
Linus Torvalds 已提交
515 516
}

T
Thomas Gleixner 已提交
517
static void unmask_ioapic(struct irq_cfg *cfg)
L
Linus Torvalds 已提交
518 519 520
{
	unsigned long flags;

521
	raw_spin_lock_irqsave(&ioapic_lock, flags);
T
Thomas Gleixner 已提交
522
	__unmask_ioapic(cfg);
523
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
524 525
}

526
static void unmask_ioapic_irq(struct irq_data *data)
Y
Yinghai Lu 已提交
527
{
528
	unmask_ioapic(data->chip_data);
Y
Yinghai Lu 已提交
529 530
}

L
Linus Torvalds 已提交
531 532 533
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
{
	struct IO_APIC_route_entry entry;
534

L
Linus Torvalds 已提交
535
	/* Check delivery_mode to be sure we're not clearing an SMI pin */
536
	entry = ioapic_read_entry(apic, pin);
L
Linus Torvalds 已提交
537 538 539 540 541
	if (entry.delivery_mode == dest_SMI)
		return;
	/*
	 * Disable it in the IO-APIC irq-routing table:
	 */
542
	ioapic_mask_entry(apic, pin);
L
Linus Torvalds 已提交
543 544
}

545
static void clear_IO_APIC (void)
L
Linus Torvalds 已提交
546 547 548 549 550 551 552 553
{
	int apic, pin;

	for (apic = 0; apic < nr_ioapics; apic++)
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
			clear_IO_APIC_pin(apic, pin);
}

554
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
555 556 557 558 559 560
/*
 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
 * specific CPU-side IRQs.
 */

#define MAX_PIRQS 8
Y
Yinghai Lu 已提交
561 562 563
static int pirq_entries[MAX_PIRQS] = {
	[0 ... MAX_PIRQS - 1] = -1
};
L
Linus Torvalds 已提交
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589

static int __init ioapic_pirq_setup(char *str)
{
	int i, max;
	int ints[MAX_PIRQS+1];

	get_options(str, ARRAY_SIZE(ints), ints);

	apic_printk(APIC_VERBOSE, KERN_INFO
			"PIRQ redirection, working around broken MP-BIOS.\n");
	max = MAX_PIRQS;
	if (ints[0] < MAX_PIRQS)
		max = ints[0];

	for (i = 0; i < max; i++) {
		apic_printk(APIC_VERBOSE, KERN_DEBUG
				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
		/*
		 * PIRQs are mapped upside down, usually.
		 */
		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
	}
	return 1;
}

__setup("pirq=", ioapic_pirq_setup);
590 591
#endif /* CONFIG_X86_32 */

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{
	int apic;
	struct IO_APIC_route_entry **ioapic_entries;

	ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
				GFP_ATOMIC);
	if (!ioapic_entries)
		return 0;

	for (apic = 0; apic < nr_ioapics; apic++) {
		ioapic_entries[apic] =
			kzalloc(sizeof(struct IO_APIC_route_entry) *
				nr_ioapic_registers[apic], GFP_ATOMIC);
		if (!ioapic_entries[apic])
			goto nomem;
	}

	return ioapic_entries;

nomem:
	while (--apic >= 0)
		kfree(ioapic_entries[apic]);
	kfree(ioapic_entries);

	return 0;
}
619 620

/*
621
 * Saves all the IO-APIC RTE's
622
 */
623
int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
624 625 626
{
	int apic, pin;

627 628
	if (!ioapic_entries)
		return -ENOMEM;
629 630

	for (apic = 0; apic < nr_ioapics; apic++) {
631 632
		if (!ioapic_entries[apic])
			return -ENOMEM;
633

634
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
635
			ioapic_entries[apic][pin] =
636
				ioapic_read_entry(apic, pin);
637
	}
638

639 640 641
	return 0;
}

642 643 644 645
/*
 * Mask all IO APIC entries.
 */
void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
646 647 648
{
	int apic, pin;

649 650 651
	if (!ioapic_entries)
		return;

652
	for (apic = 0; apic < nr_ioapics; apic++) {
653
		if (!ioapic_entries[apic])
654
			break;
655

656 657 658
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
			struct IO_APIC_route_entry entry;

659
			entry = ioapic_entries[apic][pin];
660 661 662 663 664 665 666 667
			if (!entry.mask) {
				entry.mask = 1;
				ioapic_write_entry(apic, pin, entry);
			}
		}
	}
}

668 669 670 671
/*
 * Restore IO APIC entries which was saved in ioapic_entries.
 */
int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
672 673 674
{
	int apic, pin;

675 676 677
	if (!ioapic_entries)
		return -ENOMEM;

678
	for (apic = 0; apic < nr_ioapics; apic++) {
679 680 681
		if (!ioapic_entries[apic])
			return -ENOMEM;

682 683
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
			ioapic_write_entry(apic, pin,
684
					ioapic_entries[apic][pin]);
685
	}
686
	return 0;
687 688
}

689 690 691 692 693 694 695 696
void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
{
	int apic;

	for (apic = 0; apic < nr_ioapics; apic++)
		kfree(ioapic_entries[apic]);

	kfree(ioapic_entries);
697
}
L
Linus Torvalds 已提交
698 699 700 701 702 703 704 705 706

/*
 * Find the IRQ entry number of a certain pin.
 */
static int find_irq_entry(int apic, int pin, int type)
{
	int i;

	for (i = 0; i < mp_irq_entries; i++)
707 708 709 710
		if (mp_irqs[i].irqtype == type &&
		    (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
		     mp_irqs[i].dstapic == MP_APIC_ALL) &&
		    mp_irqs[i].dstirq == pin)
L
Linus Torvalds 已提交
711 712 713 714 715 716 717 718
			return i;

	return -1;
}

/*
 * Find the pin to which IRQ[irq] (ISA) is connected
 */
719
static int __init find_isa_irq_pin(int irq, int type)
L
Linus Torvalds 已提交
720 721 722 723
{
	int i;

	for (i = 0; i < mp_irq_entries; i++) {
724
		int lbus = mp_irqs[i].srcbus;
L
Linus Torvalds 已提交
725

A
Alexey Starikovskiy 已提交
726
		if (test_bit(lbus, mp_bus_not_pci) &&
727 728
		    (mp_irqs[i].irqtype == type) &&
		    (mp_irqs[i].srcbusirq == irq))
L
Linus Torvalds 已提交
729

730
			return mp_irqs[i].dstirq;
L
Linus Torvalds 已提交
731 732 733 734
	}
	return -1;
}

735 736 737 738 739
static int __init find_isa_irq_apic(int irq, int type)
{
	int i;

	for (i = 0; i < mp_irq_entries; i++) {
740
		int lbus = mp_irqs[i].srcbus;
741

A
Alexey Starikovskiy 已提交
742
		if (test_bit(lbus, mp_bus_not_pci) &&
743 744
		    (mp_irqs[i].irqtype == type) &&
		    (mp_irqs[i].srcbusirq == irq))
745 746 747 748
			break;
	}
	if (i < mp_irq_entries) {
		int apic;
749
		for(apic = 0; apic < nr_ioapics; apic++) {
750
			if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
751 752 753 754 755 756 757
				return apic;
		}
	}

	return -1;
}

758
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
L
Linus Torvalds 已提交
759 760 761 762 763
/*
 * EISA Edge/Level control register, ELCR
 */
static int EISA_ELCR(unsigned int irq)
{
764
	if (irq < legacy_pic->nr_legacy_irqs) {
L
Linus Torvalds 已提交
765 766 767 768 769 770 771
		unsigned int port = 0x4d0 + (irq >> 3);
		return (inb(port) >> (irq & 7)) & 1;
	}
	apic_printk(APIC_VERBOSE, KERN_INFO
			"Broken MPtable reports ISA irq %d\n", irq);
	return 0;
}
772

773
#endif
L
Linus Torvalds 已提交
774

A
Alexey Starikovskiy 已提交
775 776 777 778 779 780
/* ISA interrupts are always polarity zero edge triggered,
 * when listed as conforming in the MP table. */

#define default_ISA_trigger(idx)	(0)
#define default_ISA_polarity(idx)	(0)

L
Linus Torvalds 已提交
781 782 783 784 785
/* EISA interrupts are always polarity zero and can be edge or level
 * trigger depending on the ELCR value.  If an interrupt is listed as
 * EISA conforming in the MP table, that means its trigger type must
 * be read in from the ELCR */

786
#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].srcbusirq))
A
Alexey Starikovskiy 已提交
787
#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796 797 798

/* PCI interrupts are always polarity one level triggered,
 * when listed as conforming in the MP table. */

#define default_PCI_trigger(idx)	(1)
#define default_PCI_polarity(idx)	(1)

/* MCA interrupts are always polarity zero level triggered,
 * when listed as conforming in the MP table. */

#define default_MCA_trigger(idx)	(1)
A
Alexey Starikovskiy 已提交
799
#define default_MCA_polarity(idx)	default_ISA_polarity(idx)
L
Linus Torvalds 已提交
800

801
static int MPBIOS_polarity(int idx)
L
Linus Torvalds 已提交
802
{
803
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
804 805 806 807 808
	int polarity;

	/*
	 * Determine IRQ line polarity (high active or low active):
	 */
809
	switch (mp_irqs[idx].irqflag & 3)
810
	{
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
		case 0: /* conforms, ie. bus-type dependent polarity */
			if (test_bit(bus, mp_bus_not_pci))
				polarity = default_ISA_polarity(idx);
			else
				polarity = default_PCI_polarity(idx);
			break;
		case 1: /* high active */
		{
			polarity = 0;
			break;
		}
		case 2: /* reserved */
		{
			printk(KERN_WARNING "broken BIOS!!\n");
			polarity = 1;
			break;
		}
		case 3: /* low active */
		{
			polarity = 1;
			break;
		}
		default: /* invalid */
		{
			printk(KERN_WARNING "broken BIOS!!\n");
			polarity = 1;
			break;
		}
L
Linus Torvalds 已提交
839 840 841 842 843 844
	}
	return polarity;
}

static int MPBIOS_trigger(int idx)
{
845
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
846 847 848 849 850
	int trigger;

	/*
	 * Determine IRQ trigger mode (edge or level sensitive):
	 */
851
	switch ((mp_irqs[idx].irqflag>>2) & 3)
L
Linus Torvalds 已提交
852
	{
853 854 855 856 857
		case 0: /* conforms, ie. bus-type dependent */
			if (test_bit(bus, mp_bus_not_pci))
				trigger = default_ISA_trigger(idx);
			else
				trigger = default_PCI_trigger(idx);
858
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
			switch (mp_bus_id_to_type[bus]) {
				case MP_BUS_ISA: /* ISA pin */
				{
					/* set before the switch */
					break;
				}
				case MP_BUS_EISA: /* EISA pin */
				{
					trigger = default_EISA_trigger(idx);
					break;
				}
				case MP_BUS_PCI: /* PCI pin */
				{
					/* set before the switch */
					break;
				}
				case MP_BUS_MCA: /* MCA pin */
				{
					trigger = default_MCA_trigger(idx);
					break;
				}
				default:
				{
					printk(KERN_WARNING "broken BIOS!!\n");
					trigger = 1;
					break;
				}
			}
#endif
L
Linus Torvalds 已提交
888
			break;
889
		case 1: /* edge */
L
Linus Torvalds 已提交
890
		{
891
			trigger = 0;
L
Linus Torvalds 已提交
892 893
			break;
		}
894
		case 2: /* reserved */
L
Linus Torvalds 已提交
895
		{
896 897
			printk(KERN_WARNING "broken BIOS!!\n");
			trigger = 1;
L
Linus Torvalds 已提交
898 899
			break;
		}
900
		case 3: /* level */
L
Linus Torvalds 已提交
901
		{
902
			trigger = 1;
L
Linus Torvalds 已提交
903 904
			break;
		}
905
		default: /* invalid */
L
Linus Torvalds 已提交
906 907
		{
			printk(KERN_WARNING "broken BIOS!!\n");
908
			trigger = 0;
L
Linus Torvalds 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
			break;
		}
	}
	return trigger;
}

static inline int irq_polarity(int idx)
{
	return MPBIOS_polarity(idx);
}

static inline int irq_trigger(int idx)
{
	return MPBIOS_trigger(idx);
}

static int pin_2_irq(int idx, int apic, int pin)
{
927
	int irq;
928
	int bus = mp_irqs[idx].srcbus;
L
Linus Torvalds 已提交
929 930 931 932

	/*
	 * Debugging check, we are in big trouble if this message pops up!
	 */
933
	if (mp_irqs[idx].dstirq != pin)
L
Linus Torvalds 已提交
934 935
		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");

936
	if (test_bit(bus, mp_bus_not_pci)) {
937
		irq = mp_irqs[idx].srcbusirq;
938
	} else {
939
		u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
940 941 942 943

		if (gsi >= NR_IRQS_LEGACY)
			irq = gsi;
		else
944
			irq = gsi_top + gsi;
L
Linus Torvalds 已提交
945 946
	}

947
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	/*
	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
	 */
	if ((pin >= 16) && (pin <= 23)) {
		if (pirq_entries[pin-16] != -1) {
			if (!pirq_entries[pin-16]) {
				apic_printk(APIC_VERBOSE, KERN_DEBUG
						"disabling PIRQ%d\n", pin-16);
			} else {
				irq = pirq_entries[pin-16];
				apic_printk(APIC_VERBOSE, KERN_DEBUG
						"using PIRQ%d -> IRQ %d\n",
						pin-16, irq);
			}
		}
	}
964 965
#endif

L
Linus Torvalds 已提交
966 967 968
	return irq;
}

969 970 971 972 973
/*
 * Find a specific PCI IRQ entry.
 * Not an __init, possibly needed by modules
 */
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
974
				struct io_apic_irq_attr *irq_attr)
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
{
	int apic, i, best_guess = -1;

	apic_printk(APIC_DEBUG,
		    "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
		    bus, slot, pin);
	if (test_bit(bus, mp_bus_not_pci)) {
		apic_printk(APIC_VERBOSE,
			    "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
		return -1;
	}
	for (i = 0; i < mp_irq_entries; i++) {
		int lbus = mp_irqs[i].srcbus;

		for (apic = 0; apic < nr_ioapics; apic++)
			if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
			    mp_irqs[i].dstapic == MP_APIC_ALL)
				break;

		if (!test_bit(lbus, mp_bus_not_pci) &&
		    !mp_irqs[i].irqtype &&
		    (bus == lbus) &&
		    (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
			int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);

			if (!(apic || IO_APIC_IRQ(irq)))
				continue;

			if (pin == (mp_irqs[i].srcbusirq & 3)) {
1004 1005 1006 1007
				set_io_apic_irq_attr(irq_attr, apic,
						     mp_irqs[i].dstirq,
						     irq_trigger(i),
						     irq_polarity(i));
1008 1009 1010 1011 1012 1013 1014
				return irq;
			}
			/*
			 * Use the first all-but-pin matching entry as a
			 * best-guess fuzzy result for broken mptables.
			 */
			if (best_guess < 0) {
1015 1016 1017 1018
				set_io_apic_irq_attr(irq_attr, apic,
						     mp_irqs[i].dstirq,
						     irq_trigger(i),
						     irq_polarity(i));
1019 1020 1021 1022 1023 1024 1025 1026
				best_guess = irq;
			}
		}
	}
	return best_guess;
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);

1027 1028 1029 1030 1031
void lock_vector_lock(void)
{
	/* Used to the online set of cpus does not change
	 * during assign_irq_vector.
	 */
1032
	raw_spin_lock(&vector_lock);
1033
}
L
Linus Torvalds 已提交
1034

1035
void unlock_vector_lock(void)
L
Linus Torvalds 已提交
1036
{
1037
	raw_spin_unlock(&vector_lock);
1038
}
L
Linus Torvalds 已提交
1039

1040 1041
static int
__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1042
{
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
1054
	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1055
	static int current_offset = VECTOR_OFFSET_START % 8;
1056
	unsigned int old_vector;
1057 1058
	int cpu, err;
	cpumask_var_t tmp_mask;
1059

1060
	if (cfg->move_in_progress)
1061
		return -EBUSY;
1062

1063 1064
	if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
		return -ENOMEM;
1065

1066 1067
	old_vector = cfg->vector;
	if (old_vector) {
1068 1069 1070 1071
		cpumask_and(tmp_mask, mask, cpu_online_mask);
		cpumask_and(tmp_mask, cfg->domain, tmp_mask);
		if (!cpumask_empty(tmp_mask)) {
			free_cpumask_var(tmp_mask);
1072
			return 0;
1073
		}
1074
	}
1075

1076
	/* Only try and allocate irqs on cpus that are present */
1077 1078
	err = -ENOSPC;
	for_each_cpu_and(cpu, mask, cpu_online_mask) {
1079 1080
		int new_cpu;
		int vector, offset;
1081

1082
		apic->vector_allocation_domain(cpu, tmp_mask);
1083

1084 1085
		vector = current_vector;
		offset = current_offset;
1086
next:
1087 1088
		vector += 8;
		if (vector >= first_system_vector) {
1089
			/* If out of vectors on large boxen, must share them. */
1090
			offset = (offset + 1) % 8;
1091
			vector = FIRST_EXTERNAL_VECTOR + offset;
1092 1093 1094
		}
		if (unlikely(current_vector == vector))
			continue;
1095 1096

		if (test_bit(vector, used_vectors))
1097
			goto next;
1098

1099
		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1100 1101 1102 1103 1104 1105 1106
			if (per_cpu(vector_irq, new_cpu)[vector] != -1)
				goto next;
		/* Found one! */
		current_vector = vector;
		current_offset = offset;
		if (old_vector) {
			cfg->move_in_progress = 1;
1107
			cpumask_copy(cfg->old_domain, cfg->domain);
1108
		}
1109
		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1110 1111
			per_cpu(vector_irq, new_cpu)[vector] = irq;
		cfg->vector = vector;
1112 1113 1114
		cpumask_copy(cfg->domain, tmp_mask);
		err = 0;
		break;
1115
	}
1116 1117
	free_cpumask_var(tmp_mask);
	return err;
1118 1119
}

1120
int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1121 1122
{
	int err;
1123 1124
	unsigned long flags;

1125
	raw_spin_lock_irqsave(&vector_lock, flags);
Y
Yinghai Lu 已提交
1126
	err = __assign_irq_vector(irq, cfg, mask);
1127
	raw_spin_unlock_irqrestore(&vector_lock, flags);
1128 1129 1130
	return err;
}

Y
Yinghai Lu 已提交
1131
static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1132 1133 1134 1135 1136 1137
{
	int cpu, vector;

	BUG_ON(!cfg->vector);

	vector = cfg->vector;
1138
	for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1139 1140 1141
		per_cpu(vector_irq, cpu)[vector] = -1;

	cfg->vector = 0;
1142
	cpumask_clear(cfg->domain);
1143 1144 1145

	if (likely(!cfg->move_in_progress))
		return;
1146
	for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1147 1148 1149 1150 1151 1152 1153 1154 1155
		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
								vector++) {
			if (per_cpu(vector_irq, cpu)[vector] != irq)
				continue;
			per_cpu(vector_irq, cpu)[vector] = -1;
			break;
		}
	}
	cfg->move_in_progress = 0;
1156 1157 1158 1159 1160 1161 1162
}

void __setup_vector_irq(int cpu)
{
	/* Initialize vector_irq on a new cpu */
	int irq, vector;
	struct irq_cfg *cfg;
1163
	struct irq_desc *desc;
1164

1165 1166 1167 1168 1169
	/*
	 * vector_lock will make sure that we don't run into irq vector
	 * assignments that might be happening on another cpu in parallel,
	 * while we setup our initial vector to irq mappings.
	 */
1170
	raw_spin_lock(&vector_lock);
1171
	/* Mark the inuse vectors */
1172
	for_each_irq_desc(irq, desc) {
T
Thomas Gleixner 已提交
1173
		cfg = get_irq_desc_chip_data(desc);
1174 1175 1176 1177 1178 1179 1180 1181

		/*
		 * If it is a legacy IRQ handled by the legacy PIC, this cpu
		 * will be part of the irq_cfg's domain.
		 */
		if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
			cpumask_set_cpu(cpu, cfg->domain);

1182
		if (!cpumask_test_cpu(cpu, cfg->domain))
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
			continue;
		vector = cfg->vector;
		per_cpu(vector_irq, cpu)[vector] = irq;
	}
	/* Mark the free vectors */
	for (vector = 0; vector < NR_VECTORS; ++vector) {
		irq = per_cpu(vector_irq, cpu)[vector];
		if (irq < 0)
			continue;

		cfg = irq_cfg(irq);
1194
		if (!cpumask_test_cpu(cpu, cfg->domain))
1195
			per_cpu(vector_irq, cpu)[vector] = -1;
1196
	}
1197
	raw_spin_unlock(&vector_lock);
L
Linus Torvalds 已提交
1198
}
1199

1200
static struct irq_chip ioapic_chip;
1201
static struct irq_chip ir_ioapic_chip;
L
Linus Torvalds 已提交
1202

1203 1204 1205
#define IOAPIC_AUTO     -1
#define IOAPIC_EDGE     0
#define IOAPIC_LEVEL    1
L
Linus Torvalds 已提交
1206

1207
#ifdef CONFIG_X86_32
1208 1209
static inline int IO_APIC_irq_trigger(int irq)
{
T
Thomas Gleixner 已提交
1210
	int apic, idx, pin;
1211

T
Thomas Gleixner 已提交
1212 1213 1214 1215 1216 1217 1218 1219
	for (apic = 0; apic < nr_ioapics; apic++) {
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
			idx = find_irq_entry(apic, pin, mp_INT);
			if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
				return irq_trigger(idx);
		}
	}
	/*
1220 1221
         * nonexistent IRQs are edge default
         */
T
Thomas Gleixner 已提交
1222
	return 0;
1223
}
1224 1225 1226
#else
static inline int IO_APIC_irq_trigger(int irq)
{
1227
	return 1;
1228 1229
}
#endif
1230

1231
static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
L
Linus Torvalds 已提交
1232
{
Y
Yinghai Lu 已提交
1233

1234
	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1235
	    trigger == IOAPIC_LEVEL)
1236
		irq_set_status_flags(irq, IRQ_LEVEL);
1237
	else
1238
		irq_clear_status_flags(irq, IRQ_LEVEL);
1239

1240
	if (irq_remapped(get_irq_chip_data(irq))) {
1241
		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1242 1243 1244 1245 1246 1247 1248 1249 1250
		if (trigger)
			set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
						      handle_fasteoi_irq,
						     "fasteoi");
		else
			set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
						      handle_edge_irq, "edge");
		return;
	}
1251

1252 1253
	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
	    trigger == IOAPIC_LEVEL)
1254
		set_irq_chip_and_handler_name(irq, &ioapic_chip,
1255 1256
					      handle_fasteoi_irq,
					      "fasteoi");
1257
	else
1258
		set_irq_chip_and_handler_name(irq, &ioapic_chip,
1259
					      handle_edge_irq, "edge");
L
Linus Torvalds 已提交
1260 1261
}

1262 1263 1264
int setup_ioapic_entry(int apic_id, int irq,
		       struct IO_APIC_route_entry *entry,
		       unsigned int destination, int trigger,
1265
		       int polarity, int vector, int pin)
L
Linus Torvalds 已提交
1266
{
1267 1268 1269 1270 1271
	/*
	 * add it to the IO-APIC irq-routing table:
	 */
	memset(entry,0,sizeof(*entry));

1272
	if (intr_remapping_enabled) {
I
Ingo Molnar 已提交
1273
		struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1274 1275 1276 1277 1278 1279
		struct irte irte;
		struct IR_IO_APIC_route_entry *ir_entry =
			(struct IR_IO_APIC_route_entry *) entry;
		int index;

		if (!iommu)
I
Ingo Molnar 已提交
1280
			panic("No mapping iommu for ioapic %d\n", apic_id);
1281 1282 1283

		index = alloc_irte(iommu, irq, 1);
		if (index < 0)
I
Ingo Molnar 已提交
1284
			panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1285

1286
		prepare_irte(&irte, vector, destination);
1287

1288 1289 1290
		/* Set source-id of interrupt request */
		set_ioapic_sid(&irte, apic_id);

1291 1292 1293 1294 1295 1296
		modify_irte(irq, &irte);

		ir_entry->index2 = (index >> 15) & 0x1;
		ir_entry->zero = 0;
		ir_entry->format = 1;
		ir_entry->index = (index & 0x7fff);
1297 1298 1299 1300 1301
		/*
		 * IO-APIC RTE will be configured with virtual vector.
		 * irq handler will do the explicit EOI to the io-apic.
		 */
		ir_entry->vector = pin;
1302
	} else {
1303 1304
		entry->delivery_mode = apic->irq_delivery_mode;
		entry->dest_mode = apic->irq_dest_mode;
1305
		entry->dest = destination;
1306
		entry->vector = vector;
1307
	}
1308

1309
	entry->mask = 0;				/* enable IRQ */
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	entry->trigger = trigger;
	entry->polarity = polarity;

	/* Mask level triggered irqs.
	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
	 */
	if (trigger)
		entry->mask = 1;
	return 0;
}

1321 1322
static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
			     struct irq_cfg *cfg, int trigger, int polarity)
1323
{
L
Linus Torvalds 已提交
1324
	struct IO_APIC_route_entry entry;
1325
	unsigned int dest;
1326 1327 1328

	if (!IO_APIC_IRQ(irq))
		return;
1329 1330 1331 1332 1333
	/*
	 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
	 * controllers like 8259. Now that IO-APIC can handle this irq, update
	 * the cfg->domain.
	 */
1334
	if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1335 1336
		apic->vector_allocation_domain(0, cfg->domain);

1337
	if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1338 1339
		return;

1340
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1341 1342 1343 1344

	apic_printk(APIC_VERBOSE,KERN_DEBUG
		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
		    "IRQ %d Mode:%i Active:%i)\n",
I
Ingo Molnar 已提交
1345
		    apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1346 1347 1348
		    irq, trigger, polarity);


I
Ingo Molnar 已提交
1349
	if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1350
			       dest, trigger, polarity, cfg->vector, pin)) {
1351
		printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
I
Ingo Molnar 已提交
1352
		       mp_ioapics[apic_id].apicid, pin);
Y
Yinghai Lu 已提交
1353
		__clear_irq_vector(irq, cfg);
1354 1355 1356
		return;
	}

1357
	ioapic_register_intr(irq, trigger);
1358
	if (irq < legacy_pic->nr_legacy_irqs)
1359
		legacy_pic->mask(irq);
1360

I
Ingo Molnar 已提交
1361
	ioapic_write_entry(apic_id, pin, entry);
1362 1363
}

1364 1365 1366 1367
static struct {
	DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
} mp_ioapic_routing[MAX_IO_APICS];

1368 1369
static void __init setup_IO_APIC_irqs(void)
{
1370
	int apic_id, pin, idx, irq, notcon = 0;
1371
	int node = cpu_to_node(0);
1372
	struct irq_cfg *cfg;
L
Linus Torvalds 已提交
1373 1374 1375

	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");

E
Eric W. Biederman 已提交
1376
	for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
	for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
		idx = find_irq_entry(apic_id, pin, mp_INT);
		if (idx == -1) {
			if (!notcon) {
				notcon = 1;
				apic_printk(APIC_VERBOSE,
					KERN_DEBUG " %d-%d",
					mp_ioapics[apic_id].apicid, pin);
			} else
				apic_printk(APIC_VERBOSE, " %d-%d",
					mp_ioapics[apic_id].apicid, pin);
			continue;
		}
		if (notcon) {
			apic_printk(APIC_VERBOSE,
				" (apicid-pin) not connected\n");
			notcon = 0;
		}
1395

1396
		irq = pin_2_irq(idx, apic_id, pin);
1397

E
Eric W. Biederman 已提交
1398 1399 1400
		if ((apic_id > 0) && (irq > 16))
			continue;

1401 1402 1403 1404 1405 1406 1407
		/*
		 * Skip the timer IRQ if there's a quirk handler
		 * installed and if it returns 1:
		 */
		if (apic->multi_timer_check &&
				apic->multi_timer_check(apic_id, irq))
			continue;
1408

1409 1410
		cfg = alloc_irq_and_cfg_at(irq, node);
		if (!cfg)
1411
			continue;
1412

1413
		add_pin_to_irq_node(cfg, node, apic_id, pin);
1414 1415 1416 1417
		/*
		 * don't mark it in pin_programmed, so later acpi could
		 * set it correctly when irq < 16
		 */
1418 1419
		setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
				  irq_polarity(idx));
L
Linus Torvalds 已提交
1420 1421
	}

1422 1423
	if (notcon)
		apic_printk(APIC_VERBOSE,
1424
			" (apicid-pin) not connected\n");
L
Linus Torvalds 已提交
1425 1426
}

Y
Yinghai Lu 已提交
1427 1428 1429 1430 1431 1432 1433
/*
 * for the gsit that is not in first ioapic
 * but could not use acpi_register_gsi()
 * like some special sci in IBM x3330
 */
void setup_IO_APIC_irq_extra(u32 gsi)
{
1434
	int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
Y
Yinghai Lu 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	struct irq_cfg *cfg;

	/*
	 * Convert 'gsi' to 'ioapic.pin'.
	 */
	apic_id = mp_find_ioapic(gsi);
	if (apic_id < 0)
		return;

	pin = mp_find_ioapic_pin(apic_id, gsi);
	idx = find_irq_entry(apic_id, pin, mp_INT);
	if (idx == -1)
		return;

	irq = pin_2_irq(idx, apic_id, pin);
1450 1451 1452

	/* Only handle the non legacy irqs on secondary ioapics */
	if (apic_id == 0 || irq < NR_IRQS_LEGACY)
Y
Yinghai Lu 已提交
1453
		return;
1454

1455 1456
	cfg = alloc_irq_and_cfg_at(irq, node);
	if (!cfg)
Y
Yinghai Lu 已提交
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
		return;

	add_pin_to_irq_node(cfg, node, apic_id, pin);

	if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
		pr_debug("Pin %d-%d already programmed\n",
			 mp_ioapics[apic_id].apicid, pin);
		return;
	}
	set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);

1468
	setup_ioapic_irq(apic_id, pin, irq, cfg,
Y
Yinghai Lu 已提交
1469 1470 1471
			irq_trigger(idx), irq_polarity(idx));
}

L
Linus Torvalds 已提交
1472
/*
1473
 * Set up the timer pin, possibly with the 8259A-master behind.
L
Linus Torvalds 已提交
1474
 */
I
Ingo Molnar 已提交
1475
static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1476
					int vector)
L
Linus Torvalds 已提交
1477 1478 1479
{
	struct IO_APIC_route_entry entry;

1480 1481 1482
	if (intr_remapping_enabled)
		return;

1483
	memset(&entry, 0, sizeof(entry));
L
Linus Torvalds 已提交
1484 1485 1486 1487 1488

	/*
	 * We use logical delivery to get the timer IRQ
	 * to the first CPU.
	 */
1489
	entry.dest_mode = apic->irq_dest_mode;
Y
Yinghai Lu 已提交
1490
	entry.mask = 0;			/* don't mask IRQ for edge */
1491
	entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1492
	entry.delivery_mode = apic->irq_delivery_mode;
L
Linus Torvalds 已提交
1493 1494 1495 1496 1497 1498
	entry.polarity = 0;
	entry.trigger = 0;
	entry.vector = vector;

	/*
	 * The timer IRQ doesn't have to know that behind the
1499
	 * scene we may have a 8259A-master in AEOI mode ...
L
Linus Torvalds 已提交
1500
	 */
1501
	set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
L
Linus Torvalds 已提交
1502 1503 1504 1505

	/*
	 * Add it to the IO-APIC irq-routing table:
	 */
I
Ingo Molnar 已提交
1506
	ioapic_write_entry(apic_id, pin, entry);
L
Linus Torvalds 已提交
1507 1508
}

1509 1510

__apicdebuginit(void) print_IO_APIC(void)
L
Linus Torvalds 已提交
1511 1512 1513 1514 1515 1516 1517
{
	int apic, i;
	union IO_APIC_reg_00 reg_00;
	union IO_APIC_reg_01 reg_01;
	union IO_APIC_reg_02 reg_02;
	union IO_APIC_reg_03 reg_03;
	unsigned long flags;
1518
	struct irq_cfg *cfg;
1519
	struct irq_desc *desc;
1520
	unsigned int irq;
L
Linus Torvalds 已提交
1521

1522
	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
L
Linus Torvalds 已提交
1523 1524
	for (i = 0; i < nr_ioapics; i++)
		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1525
		       mp_ioapics[i].apicid, nr_ioapic_registers[i]);
L
Linus Torvalds 已提交
1526 1527 1528 1529 1530 1531 1532 1533 1534

	/*
	 * We are a bit conservative about what we expect.  We have to
	 * know about every hardware change ASAP.
	 */
	printk(KERN_INFO "testing the IO APIC.......................\n");

	for (apic = 0; apic < nr_ioapics; apic++) {

1535
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
1536 1537 1538 1539
	reg_00.raw = io_apic_read(apic, 0);
	reg_01.raw = io_apic_read(apic, 1);
	if (reg_01.bits.version >= 0x10)
		reg_02.raw = io_apic_read(apic, 2);
T
Thomas Gleixner 已提交
1540 1541
	if (reg_01.bits.version >= 0x20)
		reg_03.raw = io_apic_read(apic, 3);
1542
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
1543

1544
	printk("\n");
1545
	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
L
Linus Torvalds 已提交
1546 1547 1548 1549 1550
	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);

1551
	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
L
Linus Torvalds 已提交
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);

	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
	 * but the value of reg_02 is read as the previous read register
	 * value, so ignore it if reg_02 == reg_01.
	 */
	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
	}

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
	 * or reg_03, but the value of reg_0[23] is read as the previous read
	 * register value, so ignore it if reg_03 == reg_0[12].
	 */
	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
	    reg_03.raw != reg_01.raw) {
		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
	}

	printk(KERN_DEBUG ".... IRQ redirection table:\n");

1580
	printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1581
			  " Stat Dmod Deli Vect:\n");
L
Linus Torvalds 已提交
1582 1583 1584 1585

	for (i = 0; i <= reg_01.bits.entries; i++) {
		struct IO_APIC_route_entry entry;

1586
		entry = ioapic_read_entry(apic, i);
L
Linus Torvalds 已提交
1587

1588 1589 1590 1591
		printk(KERN_DEBUG " %02x %03X ",
			i,
			entry.dest
		);
L
Linus Torvalds 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605

		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
			entry.mask,
			entry.trigger,
			entry.irr,
			entry.polarity,
			entry.delivery_status,
			entry.dest_mode,
			entry.delivery_mode,
			entry.vector
		);
	}
	}
	printk(KERN_DEBUG "IRQ to pin mappings:\n");
1606 1607 1608
	for_each_irq_desc(irq, desc) {
		struct irq_pin_list *entry;

T
Thomas Gleixner 已提交
1609
		cfg = get_irq_desc_chip_data(desc);
1610 1611
		if (!cfg)
			continue;
1612
		entry = cfg->irq_2_pin;
1613
		if (!entry)
L
Linus Torvalds 已提交
1614
			continue;
1615
		printk(KERN_DEBUG "IRQ%d ", irq);
1616
		for_each_irq_pin(entry, cfg->irq_2_pin)
L
Linus Torvalds 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625
			printk("-> %d:%d", entry->apic, entry->pin);
		printk("\n");
	}

	printk(KERN_INFO ".................................... done.\n");

	return;
}

1626
__apicdebuginit(void) print_APIC_field(int base)
L
Linus Torvalds 已提交
1627
{
1628
	int i;
L
Linus Torvalds 已提交
1629

1630 1631 1632 1633 1634 1635
	printk(KERN_DEBUG);

	for (i = 0; i < 8; i++)
		printk(KERN_CONT "%08x", apic_read(base + i*0x10));

	printk(KERN_CONT "\n");
L
Linus Torvalds 已提交
1636 1637
}

1638
__apicdebuginit(void) print_local_APIC(void *dummy)
L
Linus Torvalds 已提交
1639
{
1640
	unsigned int i, v, ver, maxlvt;
1641
	u64 icr;
L
Linus Torvalds 已提交
1642

1643
	printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
L
Linus Torvalds 已提交
1644
		smp_processor_id(), hard_smp_processor_id());
1645
	v = apic_read(APIC_ID);
1646
	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
L
Linus Torvalds 已提交
1647 1648 1649
	v = apic_read(APIC_LVR);
	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
	ver = GET_APIC_VERSION(v);
1650
	maxlvt = lapic_get_maxlvt();
L
Linus Torvalds 已提交
1651 1652 1653 1654

	v = apic_read(APIC_TASKPRI);
	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);

1655
	if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
1656 1657 1658 1659 1660
		if (!APIC_XAPIC(ver)) {
			v = apic_read(APIC_ARBPRI);
			printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
			       v & APIC_ARBPRI_MASK);
		}
L
Linus Torvalds 已提交
1661 1662 1663 1664
		v = apic_read(APIC_PROCPRI);
		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
	}

1665 1666 1667 1668 1669 1670 1671 1672 1673
	/*
	 * Remote read supported only in the 82489DX and local APIC for
	 * Pentium processors.
	 */
	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
		v = apic_read(APIC_RRR);
		printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
	}

L
Linus Torvalds 已提交
1674 1675
	v = apic_read(APIC_LDR);
	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1676 1677 1678 1679
	if (!x2apic_enabled()) {
		v = apic_read(APIC_DFR);
		printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
	}
L
Linus Torvalds 已提交
1680 1681 1682 1683
	v = apic_read(APIC_SPIV);
	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);

	printk(KERN_DEBUG "... APIC ISR field:\n");
1684
	print_APIC_field(APIC_ISR);
L
Linus Torvalds 已提交
1685
	printk(KERN_DEBUG "... APIC TMR field:\n");
1686
	print_APIC_field(APIC_TMR);
L
Linus Torvalds 已提交
1687
	printk(KERN_DEBUG "... APIC IRR field:\n");
1688
	print_APIC_field(APIC_IRR);
L
Linus Torvalds 已提交
1689

1690 1691
	if (APIC_INTEGRATED(ver)) {             /* !82489DX */
		if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
L
Linus Torvalds 已提交
1692
			apic_write(APIC_ESR, 0);
1693

L
Linus Torvalds 已提交
1694 1695 1696 1697
		v = apic_read(APIC_ESR);
		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
	}

1698
	icr = apic_icr_read();
1699 1700
	printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
	printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
L
Linus Torvalds 已提交
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724

	v = apic_read(APIC_LVTT);
	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);

	if (maxlvt > 3) {                       /* PC is LVT#4. */
		v = apic_read(APIC_LVTPC);
		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
	}
	v = apic_read(APIC_LVT0);
	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
	v = apic_read(APIC_LVT1);
	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);

	if (maxlvt > 2) {			/* ERR is LVT#3. */
		v = apic_read(APIC_LVTERR);
		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
	}

	v = apic_read(APIC_TMICT);
	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
	v = apic_read(APIC_TMCCT);
	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
	v = apic_read(APIC_TDCR);
	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736

	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
		v = apic_read(APIC_EFEAT);
		maxlvt = (v >> 16) & 0xff;
		printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
		v = apic_read(APIC_ECTRL);
		printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
		for (i = 0; i < maxlvt; i++) {
			v = apic_read(APIC_EILVTn(i));
			printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
		}
	}
L
Linus Torvalds 已提交
1737 1738 1739
	printk("\n");
}

1740
__apicdebuginit(void) print_local_APICs(int maxcpu)
L
Linus Torvalds 已提交
1741
{
1742 1743
	int cpu;

1744 1745 1746
	if (!maxcpu)
		return;

1747
	preempt_disable();
1748 1749 1750
	for_each_online_cpu(cpu) {
		if (cpu >= maxcpu)
			break;
1751
		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1752
	}
1753
	preempt_enable();
L
Linus Torvalds 已提交
1754 1755
}

1756
__apicdebuginit(void) print_PIC(void)
L
Linus Torvalds 已提交
1757 1758 1759 1760
{
	unsigned int v;
	unsigned long flags;

1761
	if (!legacy_pic->nr_legacy_irqs)
L
Linus Torvalds 已提交
1762 1763 1764 1765
		return;

	printk(KERN_DEBUG "\nprinting PIC contents\n");

1766
	raw_spin_lock_irqsave(&i8259A_lock, flags);
L
Linus Torvalds 已提交
1767 1768 1769 1770 1771 1772 1773

	v = inb(0xa1) << 8 | inb(0x21);
	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);

	v = inb(0xa0) << 8 | inb(0x20);
	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);

1774 1775
	outb(0x0b,0xa0);
	outb(0x0b,0x20);
L
Linus Torvalds 已提交
1776
	v = inb(0xa0) << 8 | inb(0x20);
1777 1778
	outb(0x0a,0xa0);
	outb(0x0a,0x20);
L
Linus Torvalds 已提交
1779

1780
	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
L
Linus Torvalds 已提交
1781 1782 1783 1784 1785 1786 1787

	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);

	v = inb(0x4d1) << 8 | inb(0x4d0);
	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
}

1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
static int __initdata show_lapic = 1;
static __init int setup_show_lapic(char *arg)
{
	int num = -1;

	if (strcmp(arg, "all") == 0) {
		show_lapic = CONFIG_NR_CPUS;
	} else {
		get_option(&arg, &num);
		if (num >= 0)
			show_lapic = num;
	}

	return 1;
}
__setup("show_lapic=", setup_show_lapic);

__apicdebuginit(int) print_ICs(void)
1806
{
1807 1808 1809
	if (apic_verbosity == APIC_QUIET)
		return 0;

1810
	print_PIC();
1811 1812

	/* don't print out if apic is not there */
1813
	if (!cpu_has_apic && !apic_from_smp_config())
1814 1815
		return 0;

1816
	print_local_APICs(show_lapic);
1817 1818 1819 1820 1821
	print_IO_APIC();

	return 0;
}

1822
fs_initcall(print_ICs);
1823

L
Linus Torvalds 已提交
1824

Y
Yinghai Lu 已提交
1825 1826 1827
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };

1828
void __init enable_IO_APIC(void)
L
Linus Torvalds 已提交
1829
{
1830
	int i8259_apic, i8259_pin;
1831
	int apic;
1832

1833
	if (!legacy_pic->nr_legacy_irqs)
1834 1835
		return;

1836
	for(apic = 0; apic < nr_ioapics; apic++) {
1837 1838
		int pin;
		/* See if any of the pins is in ExtINT mode */
1839
		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1840
			struct IO_APIC_route_entry entry;
1841
			entry = ioapic_read_entry(apic, pin);
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871

			/* If the interrupt line is enabled and in ExtInt mode
			 * I have found the pin where the i8259 is connected.
			 */
			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
				ioapic_i8259.apic = apic;
				ioapic_i8259.pin  = pin;
				goto found_i8259;
			}
		}
	}
 found_i8259:
	/* Look to see what if the MP table has reported the ExtINT */
	/* If we could not find the appropriate pin by looking at the ioapic
	 * the i8259 probably is not connected the ioapic but give the
	 * mptable a chance anyway.
	 */
	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
	/* Trust the MP table if nothing is setup in the hardware */
	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
		ioapic_i8259.pin  = i8259_pin;
		ioapic_i8259.apic = i8259_apic;
	}
	/* Complain if the MP table and the hardware disagree */
	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
	{
		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
L
Linus Torvalds 已提交
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
	}

	/*
	 * Do not trust the IO-APIC being empty at bootup
	 */
	clear_IO_APIC();
}

/*
 * Not an __init, needed by the reboot code
 */
void disable_IO_APIC(void)
{
	/*
	 * Clear the IO-APIC before rebooting:
	 */
	clear_IO_APIC();

1890
	if (!legacy_pic->nr_legacy_irqs)
1891 1892
		return;

1893
	/*
1894
	 * If the i8259 is routed through an IOAPIC
1895
	 * Put that IOAPIC in virtual wire mode
1896
	 * so legacy interrupts can be delivered.
1897 1898 1899 1900 1901
	 *
	 * With interrupt-remapping, for now we will use virtual wire A mode,
	 * as virtual wire B is little complex (need to configure both
	 * IOAPIC RTE aswell as interrupt-remapping table entry).
	 * As this gets called during crash dump, keep this simple for now.
1902
	 */
1903
	if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
1904 1905 1906 1907 1908 1909 1910 1911 1912
		struct IO_APIC_route_entry entry;

		memset(&entry, 0, sizeof(entry));
		entry.mask            = 0; /* Enabled */
		entry.trigger         = 0; /* Edge */
		entry.irr             = 0;
		entry.polarity        = 0; /* High */
		entry.delivery_status = 0;
		entry.dest_mode       = 0; /* Physical */
1913
		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
1914
		entry.vector          = 0;
1915
		entry.dest            = read_apic_id();
1916 1917 1918 1919

		/*
		 * Add it to the IO-APIC irq-routing table:
		 */
1920
		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1921
	}
1922

1923 1924 1925
	/*
	 * Use virtual wire A mode when interrupt remapping is enabled.
	 */
1926
	if (cpu_has_apic || apic_from_smp_config())
1927 1928
		disconnect_bsp_APIC(!intr_remapping_enabled &&
				ioapic_i8259.pin != -1);
L
Linus Torvalds 已提交
1929 1930
}

1931
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
1932 1933 1934 1935 1936 1937 1938
/*
 * function to set the IO-APIC physical IDs based on the
 * values stored in the MPC table.
 *
 * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
 */

1939
void __init setup_ioapic_ids_from_mpc(void)
L
Linus Torvalds 已提交
1940 1941 1942
{
	union IO_APIC_reg_00 reg_00;
	physid_mask_t phys_id_present_map;
I
Ingo Molnar 已提交
1943
	int apic_id;
L
Linus Torvalds 已提交
1944 1945 1946 1947
	int i;
	unsigned char old_id;
	unsigned long flags;

1948
	if (acpi_ioapic)
1949
		return;
1950 1951 1952 1953
	/*
	 * Don't check I/O APIC IDs for xAPIC systems.  They have
	 * no meaning without the serial APIC bus.
	 */
1954 1955
	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1956
		return;
L
Linus Torvalds 已提交
1957 1958 1959 1960
	/*
	 * This is broken; anything with a real cpu count has to
	 * circumvent this idiocy regardless.
	 */
1961
	apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
L
Linus Torvalds 已提交
1962 1963 1964 1965

	/*
	 * Set the IOAPIC ID to the value stored in the MPC table.
	 */
I
Ingo Molnar 已提交
1966
	for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
L
Linus Torvalds 已提交
1967 1968

		/* Read the register 0 value */
1969
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
1970
		reg_00.raw = io_apic_read(apic_id, 0);
1971
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1972

I
Ingo Molnar 已提交
1973
		old_id = mp_ioapics[apic_id].apicid;
L
Linus Torvalds 已提交
1974

I
Ingo Molnar 已提交
1975
		if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
L
Linus Torvalds 已提交
1976
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
I
Ingo Molnar 已提交
1977
				apic_id, mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
1978 1979
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
				reg_00.bits.ID);
I
Ingo Molnar 已提交
1980
			mp_ioapics[apic_id].apicid = reg_00.bits.ID;
L
Linus Torvalds 已提交
1981 1982 1983 1984 1985 1986 1987
		}

		/*
		 * Sanity check, is the ID really free? Every APIC in a
		 * system must have a unique ID or we get lots of nice
		 * 'stuck on smp_invalidate_needed IPI wait' messages.
		 */
1988
		if (apic->check_apicid_used(&phys_id_present_map,
I
Ingo Molnar 已提交
1989
					mp_ioapics[apic_id].apicid)) {
L
Linus Torvalds 已提交
1990
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
I
Ingo Molnar 已提交
1991
				apic_id, mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
1992 1993 1994 1995 1996 1997 1998 1999
			for (i = 0; i < get_physical_broadcast(); i++)
				if (!physid_isset(i, phys_id_present_map))
					break;
			if (i >= get_physical_broadcast())
				panic("Max APIC ID exceeded!\n");
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
				i);
			physid_set(i, phys_id_present_map);
I
Ingo Molnar 已提交
2000
			mp_ioapics[apic_id].apicid = i;
L
Linus Torvalds 已提交
2001 2002
		} else {
			physid_mask_t tmp;
2003
			apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
L
Linus Torvalds 已提交
2004 2005
			apic_printk(APIC_VERBOSE, "Setting %d in the "
					"phys_id_present_map\n",
I
Ingo Molnar 已提交
2006
					mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
2007 2008 2009 2010 2011 2012 2013 2014
			physids_or(phys_id_present_map, phys_id_present_map, tmp);
		}


		/*
		 * We need to adjust the IRQ routing table
		 * if the ID changed.
		 */
I
Ingo Molnar 已提交
2015
		if (old_id != mp_ioapics[apic_id].apicid)
L
Linus Torvalds 已提交
2016
			for (i = 0; i < mp_irq_entries; i++)
2017 2018
				if (mp_irqs[i].dstapic == old_id)
					mp_irqs[i].dstapic
I
Ingo Molnar 已提交
2019
						= mp_ioapics[apic_id].apicid;
L
Linus Torvalds 已提交
2020 2021 2022 2023

		/*
		 * Read the right value from the MPC table and
		 * write it into the ID register.
2024
		 */
L
Linus Torvalds 已提交
2025 2026
		apic_printk(APIC_VERBOSE, KERN_INFO
			"...changing IO-APIC physical APIC ID to %d ...",
I
Ingo Molnar 已提交
2027
			mp_ioapics[apic_id].apicid);
L
Linus Torvalds 已提交
2028

I
Ingo Molnar 已提交
2029
		reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2030
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2031
		io_apic_write(apic_id, 0, reg_00.raw);
2032
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
2033 2034 2035 2036

		/*
		 * Sanity check
		 */
2037
		raw_spin_lock_irqsave(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2038
		reg_00.raw = io_apic_read(apic_id, 0);
2039
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
I
Ingo Molnar 已提交
2040
		if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
L
Linus Torvalds 已提交
2041 2042 2043 2044 2045
			printk("could not set ID!\n");
		else
			apic_printk(APIC_VERBOSE, " ok.\n");
	}
}
2046
#endif
L
Linus Torvalds 已提交
2047

2048
int no_timer_check __initdata;
2049 2050 2051 2052 2053 2054 2055 2056

static int __init notimercheck(char *s)
{
	no_timer_check = 1;
	return 1;
}
__setup("no_timer_check", notimercheck);

L
Linus Torvalds 已提交
2057 2058 2059 2060 2061 2062 2063 2064
/*
 * There is a nasty bug in some older SMP boards, their mptable lies
 * about the timer IRQ. We do the following to work around the situation:
 *
 *	- timer IRQ defaults to IO-APIC IRQ
 *	- if this function detects that timer IRQs are defunct, then we fall
 *	  back to ISA timer IRQs
 */
2065
static int __init timer_irq_works(void)
L
Linus Torvalds 已提交
2066 2067
{
	unsigned long t1 = jiffies;
2068
	unsigned long flags;
L
Linus Torvalds 已提交
2069

2070 2071 2072
	if (no_timer_check)
		return 1;

2073
	local_save_flags(flags);
L
Linus Torvalds 已提交
2074 2075 2076
	local_irq_enable();
	/* Let ten ticks pass... */
	mdelay((10 * 1000) / HZ);
2077
	local_irq_restore(flags);
L
Linus Torvalds 已提交
2078 2079 2080 2081 2082 2083 2084 2085

	/*
	 * Expect a few ticks at least, to be sure some possible
	 * glue logic does not lock up after one or two first
	 * ticks in a non-ExtINT mode.  Also the local APIC
	 * might have cached one ExtINT interrupt.  Finally, at
	 * least one tick may be lost due to delays.
	 */
2086 2087

	/* jiffies wrap? */
2088
	if (time_after(jiffies, t1 + 4))
L
Linus Torvalds 已提交
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
		return 1;
	return 0;
}

/*
 * In the SMP+IOAPIC case it might happen that there are an unspecified
 * number of pending IRQ events unhandled. These cases are very rare,
 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
 * better to do it this way as thus we do not have to be aware of
 * 'pending' interrupts in the IRQ path, except at this point.
 */
/*
 * Edge triggered needs to resend any interrupt
 * that was delayed but this is now handled in the device
 * independent code.
 */

/*
 * Starting up a edge-triggered IO-APIC interrupt is
 * nasty - we need to make sure that we get the edge.
 * If it is already asserted for some reason, we need
 * return 1 to indicate that is was pending.
 *
 * This is not complete - we should be able to fake
 * an edge even if it isn't on the 8259A...
 */
2115

2116
static unsigned int startup_ioapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2117
{
2118
	int was_pending = 0, irq = data->irq;
L
Linus Torvalds 已提交
2119 2120
	unsigned long flags;

2121
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2122
	if (irq < legacy_pic->nr_legacy_irqs) {
2123
		legacy_pic->mask(irq);
2124
		if (legacy_pic->irq_pending(irq))
L
Linus Torvalds 已提交
2125 2126
			was_pending = 1;
	}
2127
	__unmask_ioapic(data->chip_data);
2128
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
2129 2130 2131 2132

	return was_pending;
}

2133
static int ioapic_retrigger_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2134
{
2135
	struct irq_cfg *cfg = data->chip_data;
2136 2137
	unsigned long flags;

2138
	raw_spin_lock_irqsave(&vector_lock, flags);
2139
	apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2140
	raw_spin_unlock_irqrestore(&vector_lock, flags);
2141 2142 2143

	return 1;
}
2144

2145 2146 2147 2148 2149 2150 2151 2152
/*
 * Level and edge triggered IO-APIC interrupts need different handling,
 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
 * handled with the level-triggered descriptor, but that one has slightly
 * more overhead. Level-triggered interrupts cannot be handled with the
 * edge-triggered handler, without risking IRQ storms and other ugly
 * races.
 */
2153

2154
#ifdef CONFIG_SMP
2155
void send_cleanup_vector(struct irq_cfg *cfg)
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
{
	cpumask_var_t cleanup_mask;

	if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
		unsigned int i;
		for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
			apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
	} else {
		cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
		apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
		free_cpumask_var(cleanup_mask);
	}
	cfg->move_in_progress = 0;
}

2171
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2172 2173 2174 2175 2176
{
	int apic, pin;
	struct irq_pin_list *entry;
	u8 vector = cfg->vector;

2177
	for_each_irq_pin(entry, cfg->irq_2_pin) {
2178 2179 2180 2181 2182 2183 2184 2185
		unsigned int reg;

		apic = entry->apic;
		pin = entry->pin;
		/*
		 * With interrupt-remapping, destination information comes
		 * from interrupt-remapping table entry.
		 */
2186
		if (!irq_remapped(cfg))
2187 2188 2189 2190 2191 2192 2193 2194 2195
			io_apic_write(apic, 0x11 + pin*2, dest);
		reg = io_apic_read(apic, 0x10 + pin*2);
		reg &= ~IO_APIC_REDIR_VECTOR_MASK;
		reg |= vector;
		io_apic_modify(apic, 0x10 + pin*2, reg);
	}
}

/*
2196
 * Either sets data->affinity to a valid value, and returns
2197
 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2198
 * leaves data->affinity untouched.
2199
 */
2200 2201
int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
			  unsigned int *dest_id)
2202
{
2203
	struct irq_cfg *cfg = data->chip_data;
2204 2205

	if (!cpumask_intersects(mask, cpu_online_mask))
2206
		return -1;
2207

2208
	if (assign_irq_vector(data->irq, data->chip_data, mask))
2209
		return -1;
2210

2211
	cpumask_copy(data->affinity, mask);
2212

2213
	*dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2214
	return 0;
2215 2216
}

2217
static int
2218 2219
ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		    bool force)
2220
{
2221
	unsigned int dest, irq = data->irq;
2222
	unsigned long flags;
2223
	int ret;
2224

2225
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2226
	ret = __ioapic_set_affinity(data, mask, &dest);
2227
	if (!ret) {
2228 2229
		/* Only the high 8 bits are valid. */
		dest = SET_APIC_LOGICAL_ID(dest);
2230
		__target_IO_APIC_irq(irq, dest, data->chip_data);
2231
	}
2232
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2233
	return ret;
2234 2235
}

2236
#ifdef CONFIG_INTR_REMAP
2237

2238 2239 2240
/*
 * Migrate the IO-APIC irq in the presence of intr-remapping.
 *
2241 2242
 * For both level and edge triggered, irq migration is a simple atomic
 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2243
 *
2244 2245 2246 2247
 * For level triggered, we eliminate the io-apic RTE modification (with the
 * updated vector information), by using a virtual vector (io-apic pin number).
 * Real vector that is used for interrupting cpu will be coming from
 * the interrupt-remapping table entry.
2248
 */
2249
static int
2250 2251
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		       bool force)
2252
{
2253 2254
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
2255
	struct irte irte;
2256

2257
	if (!cpumask_intersects(mask, cpu_online_mask))
2258
		return -EINVAL;
2259

2260
	if (get_irte(irq, &irte))
2261
		return -EBUSY;
2262

Y
Yinghai Lu 已提交
2263
	if (assign_irq_vector(irq, cfg, mask))
2264
		return -EBUSY;
2265

2266
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2267 2268 2269 2270 2271 2272 2273 2274 2275

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * Modified the IRTE and flushes the Interrupt entry cache.
	 */
	modify_irte(irq, &irte);

2276 2277
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);
2278

2279
	cpumask_copy(data->affinity, mask);
2280
	return 0;
2281 2282
}

2283
#else
2284 2285 2286
static inline int
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
		       bool force)
2287
{
2288
	return 0;
2289
}
2290 2291 2292 2293 2294
#endif

asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
	unsigned vector, me;
2295

2296 2297 2298 2299 2300 2301 2302
	ack_APIC_irq();
	exit_idle();
	irq_enter();

	me = smp_processor_id();
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		unsigned int irq;
2303
		unsigned int irr;
2304 2305 2306 2307
		struct irq_desc *desc;
		struct irq_cfg *cfg;
		irq = __get_cpu_var(vector_irq)[vector];

2308 2309 2310
		if (irq == -1)
			continue;

2311 2312 2313 2314 2315
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		cfg = irq_cfg(irq);
2316
		raw_spin_lock(&desc->lock);
2317

2318 2319 2320 2321 2322 2323 2324
		/*
		 * Check if the irq migration is in progress. If so, we
		 * haven't received the cleanup request yet for this irq.
		 */
		if (cfg->move_in_progress)
			goto unlock;

2325
		if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2326 2327
			goto unlock;

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
		/*
		 * Check if the vector that needs to be cleanedup is
		 * registered at the cpu's IRR. If so, then this is not
		 * the best time to clean it up. Lets clean it up in the
		 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
		 * to myself.
		 */
		if (irr  & (1 << (vector % 32))) {
			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
			goto unlock;
		}
2340 2341
		__get_cpu_var(vector_irq)[vector] = -1;
unlock:
2342
		raw_spin_unlock(&desc->lock);
2343 2344 2345 2346 2347
	}

	irq_exit();
}

T
Thomas Gleixner 已提交
2348
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2349
{
2350
	unsigned me;
2351

2352
	if (likely(!cfg->move_in_progress))
2353 2354 2355
		return;

	me = smp_processor_id();
2356

2357
	if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2358
		send_cleanup_vector(cfg);
2359
}
2360

T
Thomas Gleixner 已提交
2361
static void irq_complete_move(struct irq_cfg *cfg)
2362
{
T
Thomas Gleixner 已提交
2363
	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2364 2365 2366 2367
}

void irq_force_complete_move(int irq)
{
T
Thomas Gleixner 已提交
2368
	struct irq_cfg *cfg = get_irq_chip_data(irq);
2369

2370 2371 2372
	if (!cfg)
		return;

T
Thomas Gleixner 已提交
2373
	__irq_complete_move(cfg, cfg->vector);
2374
}
2375
#else
T
Thomas Gleixner 已提交
2376
static inline void irq_complete_move(struct irq_cfg *cfg) { }
2377
#endif
Y
Yinghai Lu 已提交
2378

2379
static void ack_apic_edge(struct irq_data *data)
2380
{
2381 2382
	irq_complete_move(data->chip_data);
	move_native_irq(data->irq);
2383 2384 2385
	ack_APIC_irq();
}

Y
Yinghai Lu 已提交
2386 2387
atomic_t irq_mis_count;

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
/*
 * IO-APIC versions below 0x20 don't support EOI register.
 * For the record, here is the information about various versions:
 *     0Xh     82489DX
 *     1Xh     I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
 *     2Xh     I/O(x)APIC which is PCI 2.2 Compliant
 *     30h-FFh Reserved
 *
 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
 * version as 0x2. This is an error with documentation and these ICH chips
 * use io-apic's of version 0x20.
 *
 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
 * Otherwise, we simulate the EOI message manually by changing the trigger
 * mode to edge and then back to level, with RTE being masked during this.
*/
T
Thomas Gleixner 已提交
2404
static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2405 2406
{
	struct irq_pin_list *entry;
T
Thomas Gleixner 已提交
2407
	unsigned long flags;
2408

T
Thomas Gleixner 已提交
2409
	raw_spin_lock_irqsave(&ioapic_lock, flags);
2410
	for_each_irq_pin(entry, cfg->irq_2_pin) {
2411 2412 2413 2414 2415 2416 2417
		if (mp_ioapics[entry->apic].apicver >= 0x20) {
			/*
			 * Intr-remapping uses pin number as the virtual vector
			 * in the RTE. Actual vector is programmed in
			 * intr-remapping table entry. Hence for the io-apic
			 * EOI we use the pin number.
			 */
2418
			if (irq_remapped(cfg))
2419 2420 2421 2422 2423 2424 2425
				io_apic_eoi(entry->apic, entry->pin);
			else
				io_apic_eoi(entry->apic, cfg->vector);
		} else {
			__mask_and_edge_IO_APIC_irq(entry);
			__unmask_and_level_IO_APIC_irq(entry);
		}
2426
	}
2427
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2428 2429
}

2430
static void ack_apic_level(struct irq_data *data)
2431
{
2432 2433
	struct irq_cfg *cfg = data->chip_data;
	int i, do_unmask_irq = 0, irq = data->irq;
Y
Yinghai Lu 已提交
2434
	struct irq_desc *desc = irq_to_desc(irq);
Y
Yinghai Lu 已提交
2435
	unsigned long v;
2436

T
Thomas Gleixner 已提交
2437
	irq_complete_move(cfg);
2438
#ifdef CONFIG_GENERIC_PENDING_IRQ
2439
	/* If we are moving the irq we need to mask it */
Y
Yinghai Lu 已提交
2440
	if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2441
		do_unmask_irq = 1;
T
Thomas Gleixner 已提交
2442
		mask_ioapic(cfg);
2443
	}
2444 2445
#endif

Y
Yinghai Lu 已提交
2446
	/*
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
	 * It appears there is an erratum which affects at least version 0x11
	 * of I/O APIC (that's the 82093AA and cores integrated into various
	 * chipsets).  Under certain conditions a level-triggered interrupt is
	 * erroneously delivered as edge-triggered one but the respective IRR
	 * bit gets set nevertheless.  As a result the I/O unit expects an EOI
	 * message but it will never arrive and further interrupts are blocked
	 * from the source.  The exact reason is so far unknown, but the
	 * phenomenon was observed when two consecutive interrupt requests
	 * from a given source get delivered to the same CPU and the source is
	 * temporarily disabled in between.
	 *
	 * A workaround is to simulate an EOI message manually.  We achieve it
	 * by setting the trigger mode to edge and then to level when the edge
	 * trigger mode gets detected in the TMR of a local APIC for a
	 * level-triggered interrupt.  We mask the source for the time of the
	 * operation to prevent an edge-triggered interrupt escaping meanwhile.
	 * The idea is from Manfred Spraul.  --macro
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
	 *
	 * Also in the case when cpu goes offline, fixup_irqs() will forward
	 * any unhandled interrupt on the offlined cpu to the new cpu
	 * destination that is handling the corresponding interrupt. This
	 * interrupt forwarding is done via IPI's. Hence, in this case also
	 * level-triggered io-apic interrupt will be seen as an edge
	 * interrupt in the IRR. And we can't rely on the cpu's EOI
	 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
	 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
	 * supporting EOI register, we do an explicit EOI to clear the
	 * remote IRR and on IO-APIC's which don't have an EOI register,
	 * we use the above logic (mask+edge followed by unmask+level) from
	 * Manfred Spraul to clear the remote IRR.
2477
	 */
Y
Yinghai Lu 已提交
2478
	i = cfg->vector;
Y
Yinghai Lu 已提交
2479 2480
	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));

2481 2482 2483 2484 2485 2486
	/*
	 * We must acknowledge the irq before we move it or the acknowledge will
	 * not propagate properly.
	 */
	ack_APIC_irq();

2487 2488 2489 2490 2491 2492 2493
	/*
	 * Tail end of clearing remote IRR bit (either by delivering the EOI
	 * message via io-apic EOI register write or simulating it using
	 * mask+edge followed by unnask+level logic) manually when the
	 * level triggered interrupt is seen as the edge triggered interrupt
	 * at the cpu.
	 */
2494 2495 2496
	if (!(v & (1 << (i & 0x1f)))) {
		atomic_inc(&irq_mis_count);

T
Thomas Gleixner 已提交
2497
		eoi_ioapic_irq(irq, cfg);
2498 2499
	}

2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
	/* Now we can move and renable the irq */
	if (unlikely(do_unmask_irq)) {
		/* Only migrate the irq if the ack has been received.
		 *
		 * On rare occasions the broadcast level triggered ack gets
		 * delayed going to ioapics, and if we reprogram the
		 * vector while Remote IRR is still set the irq will never
		 * fire again.
		 *
		 * To prevent this scenario we read the Remote IRR bit
		 * of the ioapic.  This has two effects.
		 * - On any sane system the read of the ioapic will
		 *   flush writes (and acks) going to the ioapic from
		 *   this cpu.
		 * - We get to see if the ACK has actually been delivered.
		 *
		 * Based on failed experiments of reprogramming the
		 * ioapic entry from outside of irq context starting
		 * with masking the ioapic entry and then polling until
		 * Remote IRR was clear before reprogramming the
		 * ioapic I don't trust the Remote IRR bit to be
		 * completey accurate.
		 *
		 * However there appears to be no other way to plug
		 * this race, so if the Remote IRR bit is not
		 * accurate and is causing problems then it is a hardware bug
		 * and you can go talk to the chipset vendor about it.
		 */
Y
Yinghai Lu 已提交
2528
		if (!io_apic_level_ack_pending(cfg))
2529
			move_masked_irq(irq);
T
Thomas Gleixner 已提交
2530
		unmask_ioapic(cfg);
2531
	}
Y
Yinghai Lu 已提交
2532
}
2533

2534
#ifdef CONFIG_INTR_REMAP
2535
static void ir_ack_apic_edge(struct irq_data *data)
2536
{
2537
	ack_APIC_irq();
2538 2539
}

2540
static void ir_ack_apic_level(struct irq_data *data)
2541
{
2542
	ack_APIC_irq();
2543
	eoi_ioapic_irq(data->irq, data->chip_data);
2544 2545 2546
}
#endif /* CONFIG_INTR_REMAP */

2547
static struct irq_chip ioapic_chip __read_mostly = {
2548 2549 2550 2551 2552 2553
	.name			= "IO-APIC",
	.irq_startup		= startup_ioapic_irq,
	.irq_mask		= mask_ioapic_irq,
	.irq_unmask		= unmask_ioapic_irq,
	.irq_ack		= ack_apic_edge,
	.irq_eoi		= ack_apic_level,
2554
#ifdef CONFIG_SMP
2555
	.irq_set_affinity	= ioapic_set_affinity,
2556
#endif
2557
	.irq_retrigger		= ioapic_retrigger_irq,
L
Linus Torvalds 已提交
2558 2559
};

2560
static struct irq_chip ir_ioapic_chip __read_mostly = {
2561 2562 2563 2564
	.name			= "IR-IO-APIC",
	.irq_startup		= startup_ioapic_irq,
	.irq_mask		= mask_ioapic_irq,
	.irq_unmask		= unmask_ioapic_irq,
2565
#ifdef CONFIG_INTR_REMAP
2566 2567
	.irq_ack		= ir_ack_apic_edge,
	.irq_eoi		= ir_ack_apic_level,
2568
#ifdef CONFIG_SMP
2569
	.irq_set_affinity	= ir_ioapic_set_affinity,
2570
#endif
2571
#endif
2572
	.irq_retrigger		= ioapic_retrigger_irq,
2573
};
L
Linus Torvalds 已提交
2574 2575 2576 2577

static inline void init_IO_APIC_traps(void)
{
	int irq;
2578
	struct irq_desc *desc;
2579
	struct irq_cfg *cfg;
L
Linus Torvalds 已提交
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591

	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
2592
	for_each_irq_desc(irq, desc) {
T
Thomas Gleixner 已提交
2593
		cfg = get_irq_desc_chip_data(desc);
2594
		if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
L
Linus Torvalds 已提交
2595 2596 2597 2598 2599
			/*
			 * Hmm.. We don't have an entry for this,
			 * so default to an old-fashioned 8259
			 * interrupt if we can..
			 */
2600 2601
			if (irq < legacy_pic->nr_legacy_irqs)
				legacy_pic->make_irq(irq);
2602
			else
L
Linus Torvalds 已提交
2603
				/* Strange. Oh, well.. */
2604
				desc->chip = &no_irq_chip;
L
Linus Torvalds 已提交
2605 2606 2607 2608
		}
	}
}

2609 2610 2611
/*
 * The local APIC irq-chip implementation:
 */
L
Linus Torvalds 已提交
2612

2613
static void mask_lapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2614 2615 2616 2617
{
	unsigned long v;

	v = apic_read(APIC_LVT0);
2618
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
L
Linus Torvalds 已提交
2619 2620
}

2621
static void unmask_lapic_irq(struct irq_data *data)
L
Linus Torvalds 已提交
2622
{
2623
	unsigned long v;
L
Linus Torvalds 已提交
2624

2625
	v = apic_read(APIC_LVT0);
2626
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2627
}
L
Linus Torvalds 已提交
2628

2629
static void ack_lapic_irq(struct irq_data *data)
2630 2631 2632 2633
{
	ack_APIC_irq();
}

2634
static struct irq_chip lapic_chip __read_mostly = {
2635
	.name		= "local-APIC",
2636 2637 2638
	.irq_mask	= mask_lapic_irq,
	.irq_unmask	= unmask_lapic_irq,
	.irq_ack	= ack_lapic_irq,
L
Linus Torvalds 已提交
2639 2640
};

2641
static void lapic_register_intr(int irq)
2642
{
2643
	irq_clear_status_flags(irq, IRQ_LEVEL);
2644 2645 2646 2647
	set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
				      "edge");
}

2648
static void __init setup_nmi(void)
L
Linus Torvalds 已提交
2649 2650
{
	/*
2651
	 * Dirty trick to enable the NMI watchdog ...
L
Linus Torvalds 已提交
2652 2653 2654 2655 2656 2657
	 * We put the 8259A master into AEOI mode and
	 * unmask on all local APICs LVT0 as NMI.
	 *
	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
	 * is from Maciej W. Rozycki - so we do not have to EOI from
	 * the NMI handler or the timer interrupt.
2658
	 */
L
Linus Torvalds 已提交
2659 2660
	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");

2661
	enable_NMI_through_LVT0();
L
Linus Torvalds 已提交
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672

	apic_printk(APIC_VERBOSE, " done.\n");
}

/*
 * This looks a bit hackish but it's about the only one way of sending
 * a few INTA cycles to 8259As and any associated glue logic.  ICR does
 * not support the ExtINT mode, unfortunately.  We need to send these
 * cycles as some i82489DX-based boards have glue logic that keeps the
 * 8259A interrupt line asserted until INTA.  --macro
 */
2673
static inline void __init unlock_ExtINT_logic(void)
L
Linus Torvalds 已提交
2674
{
2675
	int apic, pin, i;
L
Linus Torvalds 已提交
2676 2677 2678
	struct IO_APIC_route_entry entry0, entry1;
	unsigned char save_control, save_freq_select;

2679
	pin  = find_isa_irq_pin(8, mp_INT);
2680 2681 2682 2683
	if (pin == -1) {
		WARN_ON_ONCE(1);
		return;
	}
2684
	apic = find_isa_irq_apic(8, mp_INT);
2685 2686
	if (apic == -1) {
		WARN_ON_ONCE(1);
L
Linus Torvalds 已提交
2687
		return;
2688
	}
L
Linus Torvalds 已提交
2689

2690
	entry0 = ioapic_read_entry(apic, pin);
2691
	clear_IO_APIC_pin(apic, pin);
L
Linus Torvalds 已提交
2692 2693 2694 2695 2696

	memset(&entry1, 0, sizeof(entry1));

	entry1.dest_mode = 0;			/* physical delivery */
	entry1.mask = 0;			/* unmask IRQ now */
2697
	entry1.dest = hard_smp_processor_id();
L
Linus Torvalds 已提交
2698 2699 2700 2701 2702
	entry1.delivery_mode = dest_ExtINT;
	entry1.polarity = entry0.polarity;
	entry1.trigger = 0;
	entry1.vector = 0;

2703
	ioapic_write_entry(apic, pin, entry1);
L
Linus Torvalds 已提交
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719

	save_control = CMOS_READ(RTC_CONTROL);
	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
		   RTC_FREQ_SELECT);
	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);

	i = 100;
	while (i-- > 0) {
		mdelay(10);
		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
			i -= 10;
	}

	CMOS_WRITE(save_control, RTC_CONTROL);
	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2720
	clear_IO_APIC_pin(apic, pin);
L
Linus Torvalds 已提交
2721

2722
	ioapic_write_entry(apic, pin, entry0);
L
Linus Torvalds 已提交
2723 2724
}

Y
Yinghai Lu 已提交
2725
static int disable_timer_pin_1 __initdata;
2726
/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2727
static int __init disable_timer_pin_setup(char *arg)
Y
Yinghai Lu 已提交
2728 2729 2730 2731
{
	disable_timer_pin_1 = 1;
	return 0;
}
2732
early_param("disable_timer_pin_1", disable_timer_pin_setup);
Y
Yinghai Lu 已提交
2733 2734 2735

int timer_through_8259 __initdata;

L
Linus Torvalds 已提交
2736 2737 2738 2739 2740
/*
 * This code may look a bit paranoid, but it's supposed to cooperate with
 * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
 * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
 * fanatically on his truly buggy board.
2741 2742
 *
 * FIXME: really need to revamp this for all platforms.
L
Linus Torvalds 已提交
2743
 */
2744
static inline void __init check_timer(void)
L
Linus Torvalds 已提交
2745
{
2746
	struct irq_cfg *cfg = get_irq_chip_data(0);
2747
	int node = cpu_to_node(0);
2748
	int apic1, pin1, apic2, pin2;
2749
	unsigned long flags;
2750
	int no_pin1 = 0;
2751 2752

	local_irq_save(flags);
2753

L
Linus Torvalds 已提交
2754 2755 2756
	/*
	 * get/set the timer IRQ vector:
	 */
2757
	legacy_pic->mask(0);
2758
	assign_irq_vector(0, cfg, apic->target_cpus());
L
Linus Torvalds 已提交
2759 2760

	/*
2761 2762 2763 2764 2765 2766 2767
	 * As IRQ0 is to be enabled in the 8259A, the virtual
	 * wire has to be disabled in the local APIC.  Also
	 * timer interrupts need to be acknowledged manually in
	 * the 8259A for the i82489DX when using the NMI
	 * watchdog as that APIC treats NMIs as level-triggered.
	 * The AEOI mode will finish them in the 8259A
	 * automatically.
L
Linus Torvalds 已提交
2768
	 */
2769
	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2770
	legacy_pic->init(1);
2771
#ifdef CONFIG_X86_32
Y
Yinghai Lu 已提交
2772 2773 2774 2775 2776 2777 2778
	{
		unsigned int ver;

		ver = apic_read(APIC_LVR);
		ver = GET_APIC_VERSION(ver);
		timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
	}
2779
#endif
L
Linus Torvalds 已提交
2780

2781 2782 2783 2784
	pin1  = find_isa_irq_pin(0, mp_INT);
	apic1 = find_isa_irq_apic(0, mp_INT);
	pin2  = ioapic_i8259.pin;
	apic2 = ioapic_i8259.apic;
L
Linus Torvalds 已提交
2785

2786 2787
	apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
		    "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2788
		    cfg->vector, apic1, pin1, apic2, pin2);
L
Linus Torvalds 已提交
2789

2790 2791 2792 2793 2794 2795 2796 2797
	/*
	 * Some BIOS writers are clueless and report the ExtINTA
	 * I/O APIC input from the cascaded 8259A as the timer
	 * interrupt input.  So just in case, if only one pin
	 * was found above, try it both directly and through the
	 * 8259A.
	 */
	if (pin1 == -1) {
2798 2799
		if (intr_remapping_enabled)
			panic("BIOS bug: timer not connected to IO-APIC");
2800 2801 2802 2803 2804 2805 2806 2807
		pin1 = pin2;
		apic1 = apic2;
		no_pin1 = 1;
	} else if (pin2 == -1) {
		pin2 = pin1;
		apic2 = apic1;
	}

L
Linus Torvalds 已提交
2808 2809 2810 2811
	if (pin1 != -1) {
		/*
		 * Ok, does IRQ0 through the IOAPIC work?
		 */
2812
		if (no_pin1) {
2813
			add_pin_to_irq_node(cfg, node, apic1, pin1);
2814
			setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
Y
Yinghai Lu 已提交
2815
		} else {
2816
			/* for edge trigger, setup_ioapic_irq already
Y
Yinghai Lu 已提交
2817 2818 2819 2820 2821 2822 2823
			 * leave it unmasked.
			 * so only need to unmask if it is level-trigger
			 * do we really have level trigger timer?
			 */
			int idx;
			idx = find_irq_entry(apic1, pin1, mp_INT);
			if (idx != -1 && irq_trigger(idx))
T
Thomas Gleixner 已提交
2824
				unmask_ioapic(cfg);
2825
		}
L
Linus Torvalds 已提交
2826 2827 2828
		if (timer_irq_works()) {
			if (nmi_watchdog == NMI_IO_APIC) {
				setup_nmi();
2829
				legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2830
			}
2831 2832
			if (disable_timer_pin_1 > 0)
				clear_IO_APIC_pin(0, pin1);
2833
			goto out;
L
Linus Torvalds 已提交
2834
		}
2835 2836
		if (intr_remapping_enabled)
			panic("timer doesn't work through Interrupt-remapped IO-APIC");
Y
Yinghai Lu 已提交
2837
		local_irq_disable();
2838
		clear_IO_APIC_pin(apic1, pin1);
2839
		if (!no_pin1)
2840 2841
			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
				    "8254 timer not connected to IO-APIC\n");
L
Linus Torvalds 已提交
2842

2843 2844 2845 2846
		apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
			    "(IRQ0) through the 8259A ...\n");
		apic_printk(APIC_QUIET, KERN_INFO
			    "..... (found apic %d pin %d) ...\n", apic2, pin2);
L
Linus Torvalds 已提交
2847 2848 2849
		/*
		 * legacy devices should be connected to IO APIC #0
		 */
2850
		replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2851
		setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2852
		legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2853
		if (timer_irq_works()) {
2854
			apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2855
			timer_through_8259 = 1;
L
Linus Torvalds 已提交
2856
			if (nmi_watchdog == NMI_IO_APIC) {
2857
				legacy_pic->mask(0);
L
Linus Torvalds 已提交
2858
				setup_nmi();
2859
				legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2860
			}
2861
			goto out;
L
Linus Torvalds 已提交
2862 2863 2864 2865
		}
		/*
		 * Cleanup, just in case ...
		 */
Y
Yinghai Lu 已提交
2866
		local_irq_disable();
2867
		legacy_pic->mask(0);
2868
		clear_IO_APIC_pin(apic2, pin2);
2869
		apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
L
Linus Torvalds 已提交
2870 2871 2872
	}

	if (nmi_watchdog == NMI_IO_APIC) {
2873 2874
		apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
			    "through the IO-APIC - disabling NMI Watchdog!\n");
2875
		nmi_watchdog = NMI_NONE;
L
Linus Torvalds 已提交
2876
	}
2877
#ifdef CONFIG_X86_32
2878
	timer_ack = 0;
2879
#endif
L
Linus Torvalds 已提交
2880

2881 2882
	apic_printk(APIC_QUIET, KERN_INFO
		    "...trying to set up timer as Virtual Wire IRQ...\n");
L
Linus Torvalds 已提交
2883

2884
	lapic_register_intr(0);
2885
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
2886
	legacy_pic->unmask(0);
L
Linus Torvalds 已提交
2887 2888

	if (timer_irq_works()) {
2889
		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2890
		goto out;
L
Linus Torvalds 已提交
2891
	}
Y
Yinghai Lu 已提交
2892
	local_irq_disable();
2893
	legacy_pic->mask(0);
2894
	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2895
	apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
L
Linus Torvalds 已提交
2896

2897 2898
	apic_printk(APIC_QUIET, KERN_INFO
		    "...trying to set up timer as ExtINT IRQ...\n");
L
Linus Torvalds 已提交
2899

2900 2901
	legacy_pic->init(0);
	legacy_pic->make_irq(0);
2902
	apic_write(APIC_LVT0, APIC_DM_EXTINT);
L
Linus Torvalds 已提交
2903 2904 2905 2906

	unlock_ExtINT_logic();

	if (timer_irq_works()) {
2907
		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2908
		goto out;
L
Linus Torvalds 已提交
2909
	}
Y
Yinghai Lu 已提交
2910
	local_irq_disable();
2911
	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
L
Linus Torvalds 已提交
2912
	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
2913
		"report.  Then try booting with the 'noapic' option.\n");
2914 2915
out:
	local_irq_restore(flags);
L
Linus Torvalds 已提交
2916 2917 2918
}

/*
2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933
 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
 * to devices.  However there may be an I/O APIC pin available for
 * this interrupt regardless.  The pin may be left unconnected, but
 * typically it will be reused as an ExtINT cascade interrupt for
 * the master 8259A.  In the MPS case such a pin will normally be
 * reported as an ExtINT interrupt in the MP table.  With ACPI
 * there is no provision for ExtINT interrupts, and in the absence
 * of an override it would be treated as an ordinary ISA I/O APIC
 * interrupt, that is edge-triggered and unmasked by default.  We
 * used to do this, but it caused problems on some systems because
 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
 * the same ExtINT cascade interrupt to drive the local APIC of the
 * bootstrap processor.  Therefore we refrain from routing IRQ2 to
 * the I/O APIC in all cases now.  No actual device should request
 * it anyway.  --macro
L
Linus Torvalds 已提交
2934
 */
2935
#define PIC_IRQS	(1UL << PIC_CASCADE_IR)
L
Linus Torvalds 已提交
2936 2937 2938

void __init setup_IO_APIC(void)
{
2939 2940 2941 2942

	/*
	 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
	 */
2943
	io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
L
Linus Torvalds 已提交
2944

2945
	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
T
Thomas Gleixner 已提交
2946
	/*
2947 2948
         * Set up IO-APIC IRQ routing.
         */
2949 2950
	x86_init.mpparse.setup_ioapic_ids();

L
Linus Torvalds 已提交
2951 2952 2953
	sync_Arb_IDs();
	setup_IO_APIC_irqs();
	init_IO_APIC_traps();
2954
	if (legacy_pic->nr_legacy_irqs)
2955
		check_timer();
L
Linus Torvalds 已提交
2956 2957 2958
}

/*
2959 2960
 *      Called after all the initialization is done. If we didnt find any
 *      APIC bugs then we can allow the modify fast path
L
Linus Torvalds 已提交
2961
 */
2962

L
Linus Torvalds 已提交
2963 2964
static int __init io_apic_bug_finalize(void)
{
T
Thomas Gleixner 已提交
2965 2966 2967
	if (sis_apic_bug == -1)
		sis_apic_bug = 0;
	return 0;
L
Linus Torvalds 已提交
2968 2969 2970 2971 2972 2973 2974 2975
}

late_initcall(io_apic_bug_finalize);

struct sysfs_ioapic_data {
	struct sys_device dev;
	struct IO_APIC_route_entry entry[0];
};
2976
static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
L
Linus Torvalds 已提交
2977

2978
static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
L
Linus Torvalds 已提交
2979 2980 2981 2982
{
	struct IO_APIC_route_entry *entry;
	struct sysfs_ioapic_data *data;
	int i;
2983

L
Linus Torvalds 已提交
2984 2985
	data = container_of(dev, struct sysfs_ioapic_data, dev);
	entry = data->entry;
2986 2987
	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
		*entry = ioapic_read_entry(dev->id, i);
L
Linus Torvalds 已提交
2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998

	return 0;
}

static int ioapic_resume(struct sys_device *dev)
{
	struct IO_APIC_route_entry *entry;
	struct sysfs_ioapic_data *data;
	unsigned long flags;
	union IO_APIC_reg_00 reg_00;
	int i;
2999

L
Linus Torvalds 已提交
3000 3001 3002
	data = container_of(dev, struct sysfs_ioapic_data, dev);
	entry = data->entry;

3003
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3004
	reg_00.raw = io_apic_read(dev->id, 0);
3005 3006
	if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
		reg_00.bits.ID = mp_ioapics[dev->id].apicid;
L
Linus Torvalds 已提交
3007 3008
		io_apic_write(dev->id, 0, reg_00.raw);
	}
3009
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3010
	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3011
		ioapic_write_entry(dev->id, i, entry[i]);
L
Linus Torvalds 已提交
3012 3013 3014 3015 3016

	return 0;
}

static struct sysdev_class ioapic_sysdev_class = {
3017
	.name = "ioapic",
L
Linus Torvalds 已提交
3018 3019 3020 3021 3022 3023
	.suspend = ioapic_suspend,
	.resume = ioapic_resume,
};

static int __init ioapic_init_sysfs(void)
{
3024 3025
	struct sys_device * dev;
	int i, size, error;
L
Linus Torvalds 已提交
3026 3027 3028 3029 3030

	error = sysdev_class_register(&ioapic_sysdev_class);
	if (error)
		return error;

3031
	for (i = 0; i < nr_ioapics; i++ ) {
3032
		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
L
Linus Torvalds 已提交
3033
			* sizeof(struct IO_APIC_route_entry);
3034
		mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
L
Linus Torvalds 已提交
3035 3036 3037 3038 3039
		if (!mp_ioapic_data[i]) {
			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
			continue;
		}
		dev = &mp_ioapic_data[i]->dev;
3040
		dev->id = i;
L
Linus Torvalds 已提交
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
		dev->cls = &ioapic_sysdev_class;
		error = sysdev_register(dev);
		if (error) {
			kfree(mp_ioapic_data[i]);
			mp_ioapic_data[i] = NULL;
			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
			continue;
		}
	}

	return 0;
}

device_initcall(ioapic_init_sysfs);

3056
/*
3057
 * Dynamic irq allocate and deallocation
3058
 */
3059
unsigned int create_irq_nr(unsigned int from, int node)
3060
{
3061
	struct irq_cfg *cfg;
3062
	unsigned long flags;
3063 3064
	unsigned int ret = 0;
	int irq;
3065

3066 3067
	if (from < nr_irqs_gsi)
		from = nr_irqs_gsi;
3068

3069 3070 3071 3072 3073 3074 3075
	irq = alloc_irq_from(from, node);
	if (irq < 0)
		return 0;
	cfg = alloc_irq_cfg(irq, node);
	if (!cfg) {
		free_irq_at(irq, NULL);
		return 0;
3076
	}
3077

3078 3079 3080 3081
	raw_spin_lock_irqsave(&vector_lock, flags);
	if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
		ret = irq;
	raw_spin_unlock_irqrestore(&vector_lock, flags);
3082

3083 3084 3085 3086 3087 3088 3089
	if (ret) {
		set_irq_chip_data(irq, cfg);
		irq_clear_status_flags(irq, IRQ_NOREQUEST);
	} else {
		free_irq_at(irq, cfg);
	}
	return ret;
3090 3091
}

Y
Yinghai Lu 已提交
3092 3093
int create_irq(void)
{
3094
	int node = cpu_to_node(0);
3095
	unsigned int irq_want;
3096 3097
	int irq;

3098
	irq_want = nr_irqs_gsi;
3099
	irq = create_irq_nr(irq_want, node);
3100 3101 3102 3103 3104

	if (irq == 0)
		irq = -1;

	return irq;
Y
Yinghai Lu 已提交
3105 3106
}

3107 3108
void destroy_irq(unsigned int irq)
{
3109
	struct irq_cfg *cfg = get_irq_chip_data(irq);
3110 3111
	unsigned long flags;

3112
	irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3113

3114
	free_irte(irq);
3115
	raw_spin_lock_irqsave(&vector_lock, flags);
3116
	__clear_irq_vector(irq, cfg);
3117
	raw_spin_unlock_irqrestore(&vector_lock, flags);
3118
	free_irq_at(irq, cfg);
3119 3120
}

3121
/*
S
Simon Arlott 已提交
3122
 * MSI message composition
3123 3124
 */
#ifdef CONFIG_PCI_MSI
3125 3126
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
			   struct msi_msg *msg, u8 hpet_id)
3127
{
3128 3129
	struct irq_cfg *cfg;
	int err;
3130 3131
	unsigned dest;

J
Jan Beulich 已提交
3132 3133 3134
	if (disable_apic)
		return -ENXIO;

Y
Yinghai Lu 已提交
3135
	cfg = irq_cfg(irq);
3136
	err = assign_irq_vector(irq, cfg, apic->target_cpus());
3137 3138
	if (err)
		return err;
3139

3140
	dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3141

3142
	if (irq_remapped(get_irq_chip_data(irq))) {
3143 3144 3145 3146 3147 3148 3149
		struct irte irte;
		int ir_index;
		u16 sub_handle;

		ir_index = map_irq_to_irte_handle(irq, &sub_handle);
		BUG_ON(ir_index == -1);

3150
		prepare_irte(&irte, cfg->vector, dest);
3151

3152
		/* Set source-id of interrupt request */
3153 3154 3155 3156
		if (pdev)
			set_msi_sid(&irte, pdev);
		else
			set_hpet_sid(&irte, hpet_id);
3157

3158 3159 3160 3161 3162 3163 3164 3165
		modify_irte(irq, &irte);

		msg->address_hi = MSI_ADDR_BASE_HI;
		msg->data = sub_handle;
		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
				  MSI_ADDR_IR_SHV |
				  MSI_ADDR_IR_INDEX1(ir_index) |
				  MSI_ADDR_IR_INDEX2(ir_index);
3166
	} else {
3167 3168 3169 3170 3171 3172
		if (x2apic_enabled())
			msg->address_hi = MSI_ADDR_BASE_HI |
					  MSI_ADDR_EXT_DEST_ID(dest);
		else
			msg->address_hi = MSI_ADDR_BASE_HI;

3173 3174
		msg->address_lo =
			MSI_ADDR_BASE_LO |
3175
			((apic->irq_dest_mode == 0) ?
3176 3177
				MSI_ADDR_DEST_MODE_PHYSICAL:
				MSI_ADDR_DEST_MODE_LOGICAL) |
3178
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3179 3180 3181
				MSI_ADDR_REDIRECTION_CPU:
				MSI_ADDR_REDIRECTION_LOWPRI) |
			MSI_ADDR_DEST_ID(dest);
3182

3183 3184 3185
		msg->data =
			MSI_DATA_TRIGGER_EDGE |
			MSI_DATA_LEVEL_ASSERT |
3186
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3187 3188 3189 3190
				MSI_DATA_DELIVERY_FIXED:
				MSI_DATA_DELIVERY_LOWPRI) |
			MSI_DATA_VECTOR(cfg->vector);
	}
3191
	return err;
3192 3193
}

3194
#ifdef CONFIG_SMP
3195 3196
static int
msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3197
{
3198
	struct irq_cfg *cfg = data->chip_data;
3199 3200 3201
	struct msi_msg msg;
	unsigned int dest;

3202
	if (__ioapic_set_affinity(data, mask, &dest))
3203
		return -1;
3204

3205
	__get_cached_msi_msg(data->msi_desc, &msg);
3206 3207

	msg.data &= ~MSI_DATA_VECTOR_MASK;
3208
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
3209 3210 3211
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

3212
	__write_msi_msg(data->msi_desc, &msg);
3213 3214

	return 0;
3215
}
3216 3217 3218 3219 3220
#ifdef CONFIG_INTR_REMAP
/*
 * Migrate the MSI irq to another cpumask. This migration is
 * done in the process context using interrupt-remapping hardware.
 */
3221
static int
3222 3223
ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
		    bool force)
3224
{
3225 3226
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
3227 3228 3229
	struct irte irte;

	if (get_irte(irq, &irte))
3230
		return -1;
3231

3232
	if (__ioapic_set_affinity(data, mask, &dest))
3233
		return -1;
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * atomically update the IRTE with the new destination and vector.
	 */
	modify_irte(irq, &irte);

	/*
	 * After this point, all the interrupts will start arriving
	 * at the new destination. So, time to cleanup the previous
	 * vector allocation.
	 */
3248 3249
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);
3250 3251

	return 0;
3252
}
Y
Yinghai Lu 已提交
3253

3254
#endif
3255
#endif /* CONFIG_SMP */
3256

3257 3258 3259 3260 3261
/*
 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
 * which implement the MSI or MSI-X Capability Structure.
 */
static struct irq_chip msi_chip = {
3262 3263 3264 3265
	.name			= "PCI-MSI",
	.irq_unmask		= unmask_msi_irq,
	.irq_mask		= mask_msi_irq,
	.irq_ack		= ack_apic_edge,
3266
#ifdef CONFIG_SMP
3267
	.irq_set_affinity	= msi_set_affinity,
3268
#endif
3269
	.irq_retrigger		= ioapic_retrigger_irq,
3270 3271
};

3272
static struct irq_chip msi_ir_chip = {
3273 3274 3275
	.name			= "IR-PCI-MSI",
	.irq_unmask		= unmask_msi_irq,
	.irq_mask		= mask_msi_irq,
3276
#ifdef CONFIG_INTR_REMAP
3277
	.irq_ack		= ir_ack_apic_edge,
3278
#ifdef CONFIG_SMP
3279
	.irq_set_affinity	= ir_msi_set_affinity,
3280
#endif
3281
#endif
3282
	.irq_retrigger		= ioapic_retrigger_irq,
3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
};

/*
 * Map the PCI dev to the corresponding remapping hardware unit
 * and allocate 'nvec' consecutive interrupt-remapping table entries
 * in it.
 */
static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
{
	struct intel_iommu *iommu;
	int index;

	iommu = map_dev_to_ir(dev);
	if (!iommu) {
		printk(KERN_ERR
		       "Unable to map PCI %s to iommu\n", pci_name(dev));
		return -ENOENT;
	}

	index = alloc_irte(iommu, irq, nvec);
	if (index < 0) {
		printk(KERN_ERR
		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
T
Thomas Gleixner 已提交
3306
		       pci_name(dev));
3307 3308 3309 3310
		return -ENOSPC;
	}
	return index;
}
3311

Y
Yinghai Lu 已提交
3312
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3313 3314
{
	struct msi_msg msg;
3315
	int ret;
3316

3317
	ret = msi_compose_msg(dev, irq, &msg, -1);
3318 3319 3320
	if (ret < 0)
		return ret;

Y
Yinghai Lu 已提交
3321
	set_irq_msi(irq, msidesc);
3322 3323
	write_msi_msg(irq, &msg);

3324
	if (irq_remapped(get_irq_chip_data(irq))) {
3325
		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3326 3327 3328
		set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
	} else
		set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3329

Y
Yinghai Lu 已提交
3330 3331
	dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);

3332 3333 3334
	return 0;
}

3335 3336
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
3337 3338
	int node, ret, sub_handle, index = 0;
	unsigned int irq, irq_want;
3339
	struct msi_desc *msidesc;
3340
	struct intel_iommu *iommu = NULL;
3341

3342 3343 3344 3345
	/* x86 doesn't support multiple MSI yet */
	if (type == PCI_CAP_ID_MSI && nvec > 1)
		return 1;

3346
	node = dev_to_node(&dev->dev);
3347
	irq_want = nr_irqs_gsi;
3348
	sub_handle = 0;
3349
	list_for_each_entry(msidesc, &dev->msi_list, list) {
3350
		irq = create_irq_nr(irq_want, node);
3351 3352
		if (irq == 0)
			return -1;
Y
Yinghai Lu 已提交
3353
		irq_want = irq + 1;
3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380
		if (!intr_remapping_enabled)
			goto no_ir;

		if (!sub_handle) {
			/*
			 * allocate the consecutive block of IRTE's
			 * for 'nvec'
			 */
			index = msi_alloc_irte(dev, irq, nvec);
			if (index < 0) {
				ret = index;
				goto error;
			}
		} else {
			iommu = map_dev_to_ir(dev);
			if (!iommu) {
				ret = -ENOENT;
				goto error;
			}
			/*
			 * setup the mapping between the irq and the IRTE
			 * base index, the sub_handle pointing to the
			 * appropriate interrupt remap table entry.
			 */
			set_irte_irq(irq, iommu, index, sub_handle);
		}
no_ir:
3381
		ret = setup_msi_irq(dev, msidesc, irq);
3382 3383 3384 3385 3386
		if (ret < 0)
			goto error;
		sub_handle++;
	}
	return 0;
3387 3388

error:
3389 3390
	destroy_irq(irq);
	return ret;
3391 3392
}

3393 3394
void arch_teardown_msi_irq(unsigned int irq)
{
3395
	destroy_irq(irq);
3396 3397
}

3398
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3399
#ifdef CONFIG_SMP
3400 3401 3402
static int
dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
		      bool force)
3403
{
3404 3405
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
3406 3407
	struct msi_msg msg;

3408
	if (__ioapic_set_affinity(data, mask, &dest))
3409
		return -1;
3410 3411 3412 3413 3414 3415 3416 3417 3418

	dmar_msi_read(irq, &msg);

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

	dmar_msi_write(irq, &msg);
3419 3420

	return 0;
3421
}
Y
Yinghai Lu 已提交
3422

3423 3424
#endif /* CONFIG_SMP */

3425
static struct irq_chip dmar_msi_type = {
3426 3427 3428 3429
	.name			= "DMAR_MSI",
	.irq_unmask		= dmar_msi_unmask,
	.irq_mask		= dmar_msi_mask,
	.irq_ack		= ack_apic_edge,
3430
#ifdef CONFIG_SMP
3431
	.irq_set_affinity	= dmar_msi_set_affinity,
3432
#endif
3433
	.irq_retrigger		= ioapic_retrigger_irq,
3434 3435 3436 3437 3438 3439
};

int arch_setup_dmar_msi(unsigned int irq)
{
	int ret;
	struct msi_msg msg;
3440

3441
	ret = msi_compose_msg(NULL, irq, &msg, -1);
3442 3443 3444 3445 3446 3447 3448 3449 3450
	if (ret < 0)
		return ret;
	dmar_msi_write(irq, &msg);
	set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
		"edge");
	return 0;
}
#endif

3451 3452 3453
#ifdef CONFIG_HPET_TIMER

#ifdef CONFIG_SMP
3454 3455
static int hpet_msi_set_affinity(struct irq_data *data,
				 const struct cpumask *mask, bool force)
3456
{
3457
	struct irq_cfg *cfg = data->chip_data;
3458 3459 3460
	struct msi_msg msg;
	unsigned int dest;

3461
	if (__ioapic_set_affinity(data, mask, &dest))
3462
		return -1;
3463

3464
	hpet_msi_read(data->handler_data, &msg);
3465 3466 3467 3468 3469 3470

	msg.data &= ~MSI_DATA_VECTOR_MASK;
	msg.data |= MSI_DATA_VECTOR(cfg->vector);
	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
	msg.address_lo |= MSI_ADDR_DEST_ID(dest);

3471
	hpet_msi_write(data->handler_data, &msg);
3472 3473

	return 0;
3474
}
Y
Yinghai Lu 已提交
3475

3476 3477
#endif /* CONFIG_SMP */

3478
static struct irq_chip ir_hpet_msi_type = {
3479 3480 3481
	.name			= "IR-HPET_MSI",
	.irq_unmask		= hpet_msi_unmask,
	.irq_mask		= hpet_msi_mask,
3482
#ifdef CONFIG_INTR_REMAP
3483
	.irq_ack		= ir_ack_apic_edge,
3484
#ifdef CONFIG_SMP
3485
	.irq_set_affinity	= ir_msi_set_affinity,
3486 3487
#endif
#endif
3488
	.irq_retrigger		= ioapic_retrigger_irq,
3489 3490
};

3491
static struct irq_chip hpet_msi_type = {
3492
	.name = "HPET_MSI",
3493 3494
	.irq_unmask = hpet_msi_unmask,
	.irq_mask = hpet_msi_mask,
3495
	.irq_ack = ack_apic_edge,
3496
#ifdef CONFIG_SMP
3497
	.irq_set_affinity = hpet_msi_set_affinity,
3498
#endif
3499
	.irq_retrigger = ioapic_retrigger_irq,
3500 3501
};

3502
int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3503 3504
{
	struct msi_msg msg;
3505
	int ret;
3506

3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519
	if (intr_remapping_enabled) {
		struct intel_iommu *iommu = map_hpet_to_ir(id);
		int index;

		if (!iommu)
			return -1;

		index = alloc_irte(iommu, irq, 1);
		if (index < 0)
			return -1;
	}

	ret = msi_compose_msg(NULL, irq, &msg, id);
3520 3521 3522
	if (ret < 0)
		return ret;

3523
	hpet_msi_write(get_irq_data(irq), &msg);
3524
	irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3525
	if (irq_remapped(get_irq_chip_data(irq)))
3526 3527 3528 3529 3530
		set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
					      handle_edge_irq, "edge");
	else
		set_irq_chip_and_handler_name(irq, &hpet_msi_type,
					      handle_edge_irq, "edge");
Y
Yinghai Lu 已提交
3531

3532 3533 3534 3535
	return 0;
}
#endif

3536
#endif /* CONFIG_PCI_MSI */
3537 3538 3539 3540 3541 3542 3543
/*
 * Hypertransport interrupt support
 */
#ifdef CONFIG_HT_IRQ

#ifdef CONFIG_SMP

3544
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3545
{
3546 3547
	struct ht_irq_msg msg;
	fetch_ht_irq_msg(irq, &msg);
3548

3549
	msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3550
	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3551

3552
	msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3553
	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3554

3555
	write_ht_irq_msg(irq, &msg);
3556 3557
}

3558 3559
static int
ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3560
{
3561
	struct irq_cfg *cfg = data->chip_data;
3562 3563
	unsigned int dest;

3564
	if (__ioapic_set_affinity(data, mask, &dest))
3565
		return -1;
3566

3567
	target_ht_irq(data->irq, dest, cfg->vector);
3568
	return 0;
3569
}
Y
Yinghai Lu 已提交
3570

3571 3572
#endif

3573
static struct irq_chip ht_irq_chip = {
3574 3575 3576 3577
	.name			= "PCI-HT",
	.irq_mask		= mask_ht_irq,
	.irq_unmask		= unmask_ht_irq,
	.irq_ack		= ack_apic_edge,
3578
#ifdef CONFIG_SMP
3579
	.irq_set_affinity	= ht_set_affinity,
3580
#endif
3581
	.irq_retrigger		= ioapic_retrigger_irq,
3582 3583 3584 3585
};

int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
3586 3587
	struct irq_cfg *cfg;
	int err;
3588

J
Jan Beulich 已提交
3589 3590 3591
	if (disable_apic)
		return -ENXIO;

Y
Yinghai Lu 已提交
3592
	cfg = irq_cfg(irq);
3593
	err = assign_irq_vector(irq, cfg, apic->target_cpus());
3594
	if (!err) {
3595
		struct ht_irq_msg msg;
3596 3597
		unsigned dest;

3598 3599
		dest = apic->cpu_mask_to_apicid_and(cfg->domain,
						    apic->target_cpus());
3600

3601
		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3602

3603 3604
		msg.address_lo =
			HT_IRQ_LOW_BASE |
3605
			HT_IRQ_LOW_DEST_ID(dest) |
3606
			HT_IRQ_LOW_VECTOR(cfg->vector) |
3607
			((apic->irq_dest_mode == 0) ?
3608 3609 3610
				HT_IRQ_LOW_DM_PHYSICAL :
				HT_IRQ_LOW_DM_LOGICAL) |
			HT_IRQ_LOW_RQEOI_EDGE |
3611
			((apic->irq_delivery_mode != dest_LowestPrio) ?
3612 3613 3614 3615
				HT_IRQ_LOW_MT_FIXED :
				HT_IRQ_LOW_MT_ARBITRATED) |
			HT_IRQ_LOW_IRQ_MASKED;

3616
		write_ht_irq_msg(irq, &msg);
3617

3618 3619
		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
					      handle_edge_irq, "edge");
Y
Yinghai Lu 已提交
3620 3621

		dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3622
	}
3623
	return err;
3624 3625 3626
}
#endif /* CONFIG_HT_IRQ */

3627 3628 3629 3630 3631
int __init io_apic_get_redir_entries (int ioapic)
{
	union IO_APIC_reg_01	reg_01;
	unsigned long flags;

3632
	raw_spin_lock_irqsave(&ioapic_lock, flags);
3633
	reg_01.raw = io_apic_read(ioapic, 1);
3634
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3635

3636 3637 3638 3639 3640
	/* The register returns the maximum index redir index
	 * supported, which is one less than the total number of redir
	 * entries.
	 */
	return reg_01.bits.entries + 1;
3641 3642
}

3643
void __init probe_nr_irqs_gsi(void)
3644
{
3645
	int nr;
3646

3647
	nr = gsi_top + NR_IRQS_LEGACY;
3648
	if (nr > nr_irqs_gsi)
3649
		nr_irqs_gsi = nr;
3650 3651

	printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3652 3653
}

Y
Yinghai Lu 已提交
3654 3655 3656 3657 3658
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
	int nr;

Y
Yinghai Lu 已提交
3659 3660
	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
		nr_irqs = NR_VECTORS * nr_cpu_ids;
Y
Yinghai Lu 已提交
3661

Y
Yinghai Lu 已提交
3662 3663 3664 3665 3666 3667 3668 3669
	nr = nr_irqs_gsi + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
	/*
	 * for MSI and HT dyn irq
	 */
	nr += nr_irqs_gsi * 16;
#endif
	if (nr < nr_irqs)
Y
Yinghai Lu 已提交
3670 3671
		nr_irqs = nr;

3672
	return NR_IRQS_LEGACY;
Y
Yinghai Lu 已提交
3673 3674 3675
}
#endif

3676 3677
static int __io_apic_set_pci_routing(struct device *dev, int irq,
				struct io_apic_irq_attr *irq_attr)
3678 3679 3680
{
	struct irq_cfg *cfg;
	int node;
3681 3682
	int ioapic, pin;
	int trigger, polarity;
3683

3684
	ioapic = irq_attr->ioapic;
3685 3686 3687 3688 3689 3690 3691 3692 3693
	if (!IO_APIC_IRQ(irq)) {
		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
			ioapic);
		return -EINVAL;
	}

	if (dev)
		node = dev_to_node(dev);
	else
3694
		node = cpu_to_node(0);
3695

3696 3697
	cfg = alloc_irq_and_cfg_at(irq, node);
	if (!cfg)
3698 3699
		return 0;

3700 3701 3702 3703
	pin = irq_attr->ioapic_pin;
	trigger = irq_attr->trigger;
	polarity = irq_attr->polarity;

3704 3705 3706
	/*
	 * IRQs < 16 are already in the irq_2_pin[] map
	 */
3707
	if (irq >= legacy_pic->nr_legacy_irqs) {
T
Thomas Gleixner 已提交
3708
		if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
3709 3710 3711 3712
			printk(KERN_INFO "can not add pin %d for irq %d\n",
				pin, irq);
			return 0;
		}
3713 3714
	}

3715
	setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
3716 3717 3718 3719

	return 0;
}

3720 3721
int io_apic_set_pci_routing(struct device *dev, int irq,
				struct io_apic_irq_attr *irq_attr)
3722
{
3723
	int ioapic, pin;
3724 3725 3726 3727 3728
	/*
	 * Avoid pin reprogramming.  PRTs typically include entries
	 * with redundant pin->gsi mappings (but unique PCI devices);
	 * we only program the IOAPIC on the first.
	 */
3729 3730
	ioapic = irq_attr->ioapic;
	pin = irq_attr->ioapic_pin;
3731 3732 3733 3734 3735 3736 3737
	if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
		pr_debug("Pin %d-%d already programmed\n",
			 mp_ioapics[ioapic].apicid, pin);
		return 0;
	}
	set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);

3738
	return __io_apic_set_pci_routing(dev, irq, irq_attr);
3739 3740
}

3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751
u8 __init io_apic_unique_id(u8 id)
{
#ifdef CONFIG_X86_32
	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
	    !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
		return io_apic_get_unique_id(nr_ioapics, id);
	else
		return id;
#else
	int i;
	DECLARE_BITMAP(used, 256);
L
Linus Torvalds 已提交
3752

3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
	bitmap_zero(used, 256);
	for (i = 0; i < nr_ioapics; i++) {
		struct mpc_ioapic *ia = &mp_ioapics[i];
		__set_bit(ia->apicid, used);
	}
	if (!test_bit(id, used))
		return id;
	return find_first_zero_bit(used, 256);
#endif
}
L
Linus Torvalds 已提交
3763

3764
#ifdef CONFIG_X86_32
3765
int __init io_apic_get_unique_id(int ioapic, int apic_id)
L
Linus Torvalds 已提交
3766 3767 3768 3769 3770 3771 3772 3773
{
	union IO_APIC_reg_00 reg_00;
	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
	physid_mask_t tmp;
	unsigned long flags;
	int i = 0;

	/*
3774 3775
	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
L
Linus Torvalds 已提交
3776
	 * supports up to 16 on one shared APIC bus.
3777
	 *
L
Linus Torvalds 已提交
3778 3779 3780 3781 3782
	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
	 *      advantage of new APIC bus architecture.
	 */

	if (physids_empty(apic_id_map))
3783
		apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
L
Linus Torvalds 已提交
3784

3785
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3786
	reg_00.raw = io_apic_read(ioapic, 0);
3787
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3788 3789 3790 3791 3792 3793 3794 3795

	if (apic_id >= get_physical_broadcast()) {
		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
			"%d\n", ioapic, apic_id, reg_00.bits.ID);
		apic_id = reg_00.bits.ID;
	}

	/*
3796
	 * Every APIC in a system must have a unique ID or we get lots of nice
L
Linus Torvalds 已提交
3797 3798
	 * 'stuck on smp_invalidate_needed IPI wait' messages.
	 */
3799
	if (apic->check_apicid_used(&apic_id_map, apic_id)) {
L
Linus Torvalds 已提交
3800 3801

		for (i = 0; i < get_physical_broadcast(); i++) {
3802
			if (!apic->check_apicid_used(&apic_id_map, i))
L
Linus Torvalds 已提交
3803 3804 3805 3806 3807 3808 3809 3810 3811 3812
				break;
		}

		if (i == get_physical_broadcast())
			panic("Max apic_id exceeded!\n");

		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
			"trying %d\n", ioapic, apic_id, i);

		apic_id = i;
3813
	}
L
Linus Torvalds 已提交
3814

3815
	apic->apicid_to_cpu_present(apic_id, &tmp);
L
Linus Torvalds 已提交
3816 3817 3818 3819 3820
	physids_or(apic_id_map, apic_id_map, tmp);

	if (reg_00.bits.ID != apic_id) {
		reg_00.bits.ID = apic_id;

3821
		raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3822 3823
		io_apic_write(ioapic, 0, reg_00.raw);
		reg_00.raw = io_apic_read(ioapic, 0);
3824
		raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3825 3826

		/* Sanity check */
3827 3828 3829 3830
		if (reg_00.bits.ID != apic_id) {
			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
			return -1;
		}
L
Linus Torvalds 已提交
3831 3832 3833 3834 3835 3836 3837
	}

	apic_printk(APIC_VERBOSE, KERN_INFO
			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);

	return apic_id;
}
3838
#endif
L
Linus Torvalds 已提交
3839

3840
int __init io_apic_get_version(int ioapic)
L
Linus Torvalds 已提交
3841 3842 3843 3844
{
	union IO_APIC_reg_01	reg_01;
	unsigned long flags;

3845
	raw_spin_lock_irqsave(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3846
	reg_01.raw = io_apic_read(ioapic, 1);
3847
	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
L
Linus Torvalds 已提交
3848 3849 3850 3851

	return reg_01.bits.version;
}

3852
int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3853
{
3854
	int ioapic, pin, idx;
3855 3856 3857 3858

	if (skip_ioapic_setup)
		return -1;

3859 3860
	ioapic = mp_find_ioapic(gsi);
	if (ioapic < 0)
3861 3862
		return -1;

3863 3864 3865 3866 3867 3868
	pin = mp_find_ioapic_pin(ioapic, gsi);
	if (pin < 0)
		return -1;

	idx = find_irq_entry(ioapic, pin, mp_INT);
	if (idx < 0)
3869 3870
		return -1;

3871 3872
	*trigger = irq_trigger(idx);
	*polarity = irq_polarity(idx);
3873 3874 3875
	return 0;
}

3876 3877 3878
/*
 * This function currently is only a helper for the i386 smp boot process where
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3879
 * so mask in all cases should simply be apic->target_cpus()
3880 3881 3882 3883
 */
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
E
Eric W. Biederman 已提交
3884
	int pin, ioapic, irq, irq_entry;
3885
	struct irq_desc *desc;
3886
	const struct cpumask *mask;
3887 3888 3889 3890

	if (skip_ioapic_setup == 1)
		return;

E
Eric W. Biederman 已提交
3891
	for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
3892 3893 3894 3895 3896
	for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
		irq_entry = find_irq_entry(ioapic, pin, mp_INT);
		if (irq_entry == -1)
			continue;
		irq = pin_2_irq(irq_entry, ioapic, pin);
3897

E
Eric W. Biederman 已提交
3898 3899 3900
		if ((ioapic > 0) && (irq > 16))
			continue;

3901
		desc = irq_to_desc(irq);
3902

3903 3904 3905 3906 3907
		/*
		 * Honour affinities which have been set in early boot
		 */
		if (desc->status &
		    (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3908
			mask = desc->irq_data.affinity;
3909 3910
		else
			mask = apic->target_cpus();
3911

3912
		if (intr_remapping_enabled)
3913
			ir_ioapic_set_affinity(&desc->irq_data, mask, false);
3914
		else
3915
			ioapic_set_affinity(&desc->irq_data, mask, false);
3916
	}
3917

3918 3919 3920
}
#endif

3921 3922 3923 3924
#define IOAPIC_RESOURCE_NAME_SIZE 11

static struct resource *ioapic_resources;

3925
static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
{
	unsigned long n;
	struct resource *res;
	char *mem;
	int i;

	if (nr_ioapics <= 0)
		return NULL;

	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
	n *= nr_ioapics;

	mem = alloc_bootmem(n);
	res = (void *)mem;

3941
	mem += sizeof(struct resource) * nr_ioapics;
3942

3943 3944 3945
	for (i = 0; i < nr_ioapics; i++) {
		res[i].name = mem;
		res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3946
		snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3947
		mem += IOAPIC_RESOURCE_NAME_SIZE;
3948 3949 3950 3951 3952 3953 3954
	}

	ioapic_resources = res;

	return res;
}

3955 3956 3957
void __init ioapic_init_mappings(void)
{
	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3958
	struct resource *ioapic_res;
T
Thomas Gleixner 已提交
3959
	int i;
3960

3961
	ioapic_res = ioapic_setup_resources(nr_ioapics);
3962 3963
	for (i = 0; i < nr_ioapics; i++) {
		if (smp_found_config) {
3964
			ioapic_phys = mp_ioapics[i].apicaddr;
3965
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
3966 3967 3968 3969 3970 3971 3972 3973 3974
			if (!ioapic_phys) {
				printk(KERN_ERR
				       "WARNING: bogus zero IO-APIC "
				       "address found in MPTABLE, "
				       "disabling IO/APIC support!\n");
				smp_found_config = 0;
				skip_ioapic_setup = 1;
				goto fake_ioapic_page;
			}
3975
#endif
3976
		} else {
3977
#ifdef CONFIG_X86_32
3978
fake_ioapic_page:
3979
#endif
3980
			ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3981 3982 3983
			ioapic_phys = __pa(ioapic_phys);
		}
		set_fixmap_nocache(idx, ioapic_phys);
3984 3985 3986
		apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
			__fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
			ioapic_phys);
3987
		idx++;
3988

3989
		ioapic_res->start = ioapic_phys;
3990
		ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3991
		ioapic_res++;
3992 3993 3994
	}
}

3995
void __init ioapic_insert_resources(void)
3996 3997 3998 3999 4000
{
	int i;
	struct resource *r = ioapic_resources;

	if (!r) {
4001
		if (nr_ioapics > 0)
4002 4003
			printk(KERN_ERR
				"IO APIC resources couldn't be allocated.\n");
4004
		return;
4005 4006 4007 4008 4009 4010 4011
	}

	for (i = 0; i < nr_ioapics; i++) {
		insert_resource(&iomem_resource, r);
		r++;
	}
}
4012

4013
int mp_find_ioapic(u32 gsi)
4014 4015 4016 4017 4018 4019 4020 4021 4022
{
	int i = 0;

	/* Find the IOAPIC that manages this GSI. */
	for (i = 0; i < nr_ioapics; i++) {
		if ((gsi >= mp_gsi_routing[i].gsi_base)
		    && (gsi <= mp_gsi_routing[i].gsi_end))
			return i;
	}
4023

4024 4025 4026 4027
	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
	return -1;
}

4028
int mp_find_ioapic_pin(int ioapic, u32 gsi)
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
{
	if (WARN_ON(ioapic == -1))
		return -1;
	if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
		return -1;

	return gsi - mp_gsi_routing[ioapic].gsi_base;
}

static int bad_ioapic(unsigned long address)
{
	if (nr_ioapics >= MAX_IO_APICS) {
		printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
		       "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
		return 1;
	}
	if (!address) {
		printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
		       " found in table, skipping!\n");
		return 1;
	}
4050 4051 4052
	return 0;
}

4053 4054 4055
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
{
	int idx = 0;
4056
	int entries;
4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074

	if (bad_ioapic(address))
		return;

	idx = nr_ioapics;

	mp_ioapics[idx].type = MP_IOAPIC;
	mp_ioapics[idx].flags = MPC_APIC_USABLE;
	mp_ioapics[idx].apicaddr = address;

	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
	mp_ioapics[idx].apicid = io_apic_unique_id(id);
	mp_ioapics[idx].apicver = io_apic_get_version(idx);

	/*
	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
	 */
4075
	entries = io_apic_get_redir_entries(idx);
4076
	mp_gsi_routing[idx].gsi_base = gsi_base;
4077 4078 4079 4080 4081 4082
	mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;

	/*
	 * The number of IO-APIC IRQ registers (== #pins):
	 */
	nr_ioapic_registers[idx] = entries;
4083

4084 4085
	if (mp_gsi_routing[idx].gsi_end >= gsi_top)
		gsi_top = mp_gsi_routing[idx].gsi_end + 1;
4086 4087 4088 4089 4090 4091 4092 4093

	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
	       "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
	       mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
	       mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);

	nr_ioapics++;
}
4094 4095 4096 4097 4098 4099 4100 4101 4102 4103

/* Enable IOAPIC early just for system timer */
void __init pre_init_apic_IRQ0(void)
{
	struct irq_cfg *cfg;

	printk(KERN_INFO "Early APIC setup for system timer0\n");
#ifndef CONFIG_SMP
	phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
#endif
4104 4105
	/* Make sure the irq descriptor is set up */
	cfg = alloc_irq_and_cfg_at(0, 0);
4106 4107 4108 4109 4110 4111

	setup_local_APIC();

	add_pin_to_irq_node(cfg, 0, 0, 0);
	set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");

4112
	setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
4113
}