vector.c 23.5 KB
Newer Older
1 2 3 4 5
/*
 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
 *
 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
 *	Moved from arch/x86/kernel/apic/io_apic.c.
6 7
 * Jiang Liu <jiang.liu@linux.intel.com>
 *	Enable support of hierarchical irqdomains
8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/slab.h>
17
#include <asm/irqdomain.h>
18 19 20 21 22 23
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/i8259.h>
#include <asm/desc.h>
#include <asm/irq_remapping.h>

24 25
struct apic_chip_data {
	struct irq_cfg		cfg;
26 27
	unsigned int		cpu;
	unsigned int		prev_cpu;
28
	struct hlist_node	clist;
29 30 31 32 33
	cpumask_var_t		domain;
	cpumask_var_t		old_domain;
	u8			move_in_progress : 1;
};

34
struct irq_domain *x86_vector_domain;
35
EXPORT_SYMBOL_GPL(x86_vector_domain);
36
static DEFINE_RAW_SPINLOCK(vector_lock);
37
static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
38
static struct irq_chip lapic_controller;
39 40 41
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
#endif
42 43 44 45 46 47 48 49 50 51 52 53 54 55

void lock_vector_lock(void)
{
	/* Used to the online set of cpus does not change
	 * during assign_irq_vector.
	 */
	raw_spin_lock(&vector_lock);
}

void unlock_vector_lock(void)
{
	raw_spin_unlock(&vector_lock);
}

56
static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
57
{
58
	if (!irqd)
59 60
		return NULL;

61 62
	while (irqd->parent_data)
		irqd = irqd->parent_data;
63

64
	return irqd->chip_data;
65 66
}

67
struct irq_cfg *irqd_cfg(struct irq_data *irqd)
68
{
69
	struct apic_chip_data *apicd = apic_chip_data(irqd);
70

71
	return apicd ? &apicd->cfg : NULL;
72
}
73
EXPORT_SYMBOL_GPL(irqd_cfg);
74 75

struct irq_cfg *irq_cfg(unsigned int irq)
76
{
77 78
	return irqd_cfg(irq_get_irq_data(irq));
}
79

80 81
static struct apic_chip_data *alloc_apic_chip_data(int node)
{
82
	struct apic_chip_data *apicd;
83

84 85
	apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
	if (!apicd)
86
		return NULL;
87
	if (!zalloc_cpumask_var_node(&apicd->domain, GFP_KERNEL, node))
88
		goto out_data;
89
	if (!zalloc_cpumask_var_node(&apicd->old_domain, GFP_KERNEL, node))
90
		goto out_domain;
91
	INIT_HLIST_NODE(&apicd->clist);
92
	return apicd;
93
out_domain:
94
	free_cpumask_var(apicd->domain);
95
out_data:
96
	kfree(apicd);
97 98 99
	return NULL;
}

100
static void free_apic_chip_data(struct apic_chip_data *apicd)
101
{
102 103 104 105
	if (apicd) {
		free_cpumask_var(apicd->domain);
		free_cpumask_var(apicd->old_domain);
		kfree(apicd);
106
	}
107 108
}

109
static int __assign_irq_vector(int irq, struct apic_chip_data *d,
110
			       const struct cpumask *mask,
111
			       struct irq_data *irqd)
112 113 114 115 116 117 118 119 120 121 122 123 124 125
{
	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
	static int current_offset = VECTOR_OFFSET_START % 16;
126
	int cpu, vector;
127

128 129 130 131
	/*
	 * If there is still a move in progress or the previous move has not
	 * been cleaned up completely, tell the caller to come back later.
	 */
132
	if (d->cfg.old_vector)
133 134 135
		return -EBUSY;

	/* Only try and allocate irqs on cpus that are present */
136
	cpumask_clear(d->old_domain);
137
	cpumask_clear(searched_cpumask);
138 139
	cpu = cpumask_first_and(mask, cpu_online_mask);
	while (cpu < nr_cpu_ids) {
140
		int new_cpu, offset;
141

142
		cpumask_copy(vector_cpumask, cpumask_of(cpu));
143

144 145 146
		/*
		 * Clear the offline cpus from @vector_cpumask for searching
		 * and verify whether the result overlaps with @mask. If true,
147
		 * then the call to apic->cpu_mask_to_apicid() will
148 149 150 151 152 153 154
		 * succeed as well. If not, no point in trying to find a
		 * vector in this mask.
		 */
		cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
		if (!cpumask_intersects(vector_searchmask, mask))
			goto next_cpu;

155 156
		if (cpumask_subset(vector_cpumask, d->domain)) {
			if (cpumask_equal(vector_cpumask, d->domain))
157
				goto success;
158
			/*
159 160
			 * Mark the cpus which are not longer in the mask for
			 * cleanup.
161
			 */
162 163 164
			cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
			vector = d->cfg.vector;
			goto update;
165 166 167 168 169 170
		}

		vector = current_vector;
		offset = current_offset;
next:
		vector += 16;
171
		if (vector >= FIRST_SYSTEM_VECTOR) {
172 173 174 175
			offset = (offset + 1) % 16;
			vector = FIRST_EXTERNAL_VECTOR + offset;
		}

176 177 178
		/* If the search wrapped around, try the next cpu */
		if (unlikely(current_vector == vector))
			goto next_cpu;
179

180
		if (test_bit(vector, system_vectors))
181 182
			goto next;

183
		for_each_cpu(new_cpu, vector_searchmask) {
184
			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
185 186 187 188 189
				goto next;
		}
		/* Found one! */
		current_vector = vector;
		current_offset = offset;
190 191
		/* Schedule the old vector for cleanup on all cpus */
		if (d->cfg.vector)
192
			cpumask_copy(d->old_domain, d->domain);
193
		for_each_cpu(new_cpu, vector_searchmask)
194
			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
195
		goto update;
196 197 198 199 200 201 202 203 204 205 206 207 208

next_cpu:
		/*
		 * We exclude the current @vector_cpumask from the requested
		 * @mask and try again with the next online cpu in the
		 * result. We cannot modify @mask, so we use @vector_cpumask
		 * as a temporary buffer here as it will be reassigned when
		 * calling apic->vector_allocation_domain() above.
		 */
		cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
		cpumask_andnot(vector_cpumask, mask, searched_cpumask);
		cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
		continue;
209
	}
210
	return -ENOSPC;
211

212
update:
213 214 215 216 217 218
	/*
	 * Exclude offline cpus from the cleanup mask and set the
	 * move_in_progress flag when the result is not empty.
	 */
	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
	d->move_in_progress = !cpumask_empty(d->old_domain);
219
	d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
220
	d->prev_cpu = d->cpu;
221 222
	d->cfg.vector = vector;
	cpumask_copy(d->domain, vector_cpumask);
223
success:
224 225 226 227
	/*
	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
	 * as we already established, that mask & d->domain & cpu_online_mask
	 * is not empty.
228 229 230
	 *
	 * vector_searchmask is a subset of d->domain and has the offline
	 * cpus masked out.
231
	 */
232
	cpumask_and(vector_searchmask, vector_searchmask, mask);
233
	BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqd,
234
					&d->cfg.dest_apicid));
235
	d->cpu = cpumask_first(vector_searchmask);
236
	return 0;
237 238
}

239
static int assign_irq_vector(int irq, struct apic_chip_data *apicd,
240
			     const struct cpumask *mask,
241
			     struct irq_data *irqd)
242 243 244 245 246
{
	int err;
	unsigned long flags;

	raw_spin_lock_irqsave(&vector_lock, flags);
247
	err = __assign_irq_vector(irq, apicd, mask, irqd);
248 249 250 251
	raw_spin_unlock_irqrestore(&vector_lock, flags);
	return err;
}

252
static int assign_irq_vector_policy(int irq, int node,
253
				    struct apic_chip_data *apicd,
254
				    struct irq_alloc_info *info,
255
				    struct irq_data *irqd)
256 257
{
	if (info && info->mask)
258
		return assign_irq_vector(irq, apicd, info->mask, irqd);
259
	if (node != NUMA_NO_NODE &&
260
	    assign_irq_vector(irq, apicd, cpumask_of_node(node), irqd) == 0)
261
		return 0;
262
	return assign_irq_vector(irq, apicd, cpu_online_mask, irqd);
263 264
}

265
static void clear_irq_vector(int irq, struct apic_chip_data *apicd)
266
{
267
	unsigned int vector = apicd->cfg.vector;
268

269
	if (!vector)
270
		return;
271

272
	per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
273
	apicd->cfg.vector = 0;
274

275 276 277
	/* Clean up move in progress */
	vector = apicd->cfg.old_vector;
	if (!vector)
278 279
		return;

280
	per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
281
	apicd->move_in_progress = 0;
282
	hlist_del_init(&apicd->clist);
283 284
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
void init_irq_alloc_info(struct irq_alloc_info *info,
			 const struct cpumask *mask)
{
	memset(info, 0, sizeof(*info));
	info->mask = mask;
}

void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
{
	if (src)
		*dst = *src;
	else
		memset(dst, 0, sizeof(*dst));
}

static void x86_vector_free_irqs(struct irq_domain *domain,
				 unsigned int virq, unsigned int nr_irqs)
{
303 304
	struct apic_chip_data *apicd;
	struct irq_data *irqd;
305
	unsigned long flags;
306 307 308
	int i;

	for (i = 0; i < nr_irqs; i++) {
309 310
		irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
		if (irqd && irqd->chip_data) {
311
			raw_spin_lock_irqsave(&vector_lock, flags);
312 313 314
			clear_irq_vector(virq + i, irqd->chip_data);
			apicd = irqd->chip_data;
			irq_domain_reset_irq_data(irqd);
315
			raw_spin_unlock_irqrestore(&vector_lock, flags);
316
			free_apic_chip_data(apicd);
317 318 319 320 321 322 323 324
		}
	}
}

static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
				 unsigned int nr_irqs, void *arg)
{
	struct irq_alloc_info *info = arg;
325 326
	struct apic_chip_data *apicd;
	struct irq_data *irqd;
327
	int i, err, node;
328 329 330 331 332 333 334 335 336

	if (disable_apic)
		return -ENXIO;

	/* Currently vector allocator can't guarantee contiguous allocations */
	if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
		return -ENOSYS;

	for (i = 0; i < nr_irqs; i++) {
337 338 339
		irqd = irq_domain_get_irq_data(domain, virq + i);
		BUG_ON(!irqd);
		node = irq_data_get_node(irqd);
340 341
		WARN_ON_ONCE(irqd->chip_data);
		apicd = alloc_apic_chip_data(node);
342
		if (!apicd) {
343 344 345 346
			err = -ENOMEM;
			goto error;
		}

347 348 349 350
		irqd->chip = &lapic_controller;
		irqd->chip_data = apicd;
		irqd->hwirq = virq + i;
		irqd_set_single_target(irqd);
351 352 353 354 355 356 357 358 359 360 361
		/*
		 * Make sure, that the legacy to IOAPIC transition stays on
		 * the same vector. This is required for check_timer() to
		 * work correctly as it might switch back to legacy mode.
		 */
		if (info->flags & X86_IRQ_ALLOC_LEGACY) {
			apicd->cfg.vector = ISA_IRQ_VECTOR(virq + i);
			apicd->cpu = 0;
			cpumask_copy(apicd->domain, cpumask_of(0));
		}

362 363
		err = assign_irq_vector_policy(virq + i, node, apicd, info,
					       irqd);
364 365 366 367 368 369 370 371 372 373 374
		if (err)
			goto error;
	}

	return 0;

error:
	x86_vector_free_irqs(domain, virq, i + 1);
	return err;
}

T
Thomas Gleixner 已提交
375 376 377
static const struct irq_domain_ops x86_vector_domain_ops = {
	.alloc	= x86_vector_alloc_irqs,
	.free	= x86_vector_free_irqs,
378 379
};

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
int __init arch_probe_nr_irqs(void)
{
	int nr;

	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
		nr_irqs = NR_VECTORS * nr_cpu_ids;

	nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
	/*
	 * for MSI and HT dyn irq
	 */
	if (gsi_top <= NR_IRQS_LEGACY)
		nr +=  8 * nr_cpu_ids;
	else
		nr += gsi_top * 16;
#endif
	if (nr < nr_irqs)
		nr_irqs = nr;

400 401 402 403 404
	/*
	 * We don't know if PIC is present at this point so we need to do
	 * probe() to get the right number of legacy IRQs.
	 */
	return legacy_pic->probe();
405 406 407 408
}

int __init arch_early_irq_init(void)
{
409 410 411 412 413 414
	struct fwnode_handle *fn;

	fn = irq_domain_alloc_named_fwnode("VECTOR");
	BUG_ON(!fn);
	x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
						   NULL);
415
	BUG_ON(x86_vector_domain == NULL);
416
	irq_domain_free_fwnode(fn);
417 418
	irq_set_default_host(x86_vector_domain);

419
	arch_init_msi_domain(x86_vector_domain);
420
	arch_init_htirq_domain(x86_vector_domain);
421

422
	BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
423
	BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
424
	BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
425

426 427 428
	return arch_early_ioapic_init();
}

429 430
/* Temporary hack to keep things working */
static void vector_update_shutdown_irqs(void)
431
{
432
	struct irq_desc *desc;
433
	int irq;
434

435
	for_each_irq_desc(irq, desc) {
436 437
		struct irq_data *irqd = irq_desc_get_irq_data(desc);
		struct apic_chip_data *ad = apic_chip_data(irqd);
438

439
		if (ad && ad->cfg.vector && ad->cpu == smp_processor_id())
440
			this_cpu_write(vector_irq[ad->cfg.vector], desc);
441 442 443
	}
}

444 445 446 447 448 449 450 451 452 453 454 455 456
static struct irq_desc *__setup_vector_irq(int vector)
{
	int isairq = vector - ISA_IRQ_VECTOR(0);

	/* Check whether the irq is in the legacy space */
	if (isairq < 0 || isairq >= nr_legacy_irqs())
		return VECTOR_UNUSED;
	/* Check whether the irq is handled by the IOAPIC */
	if (test_bit(isairq, &io_apic_irqs))
		return VECTOR_UNUSED;
	return irq_to_desc(isairq);
}

457
/*
458
 * Setup the vector to irq mappings. Must be called with vector_lock held.
459 460 461
 */
void setup_vector_irq(int cpu)
{
462
	unsigned int vector;
463

464
	lockdep_assert_held(&vector_lock);
465
	/*
466 467 468 469 470 471 472
	 * The interrupt affinity logic never targets interrupts to offline
	 * CPUs. The exception are the legacy PIC interrupts. In general
	 * they are only targeted to CPU0, but depending on the platform
	 * they can be distributed to any online CPU in hardware. The
	 * kernel has no influence on that. So all active legacy vectors
	 * must be installed on all CPUs. All non legacy interrupts can be
	 * cleared.
473
	 */
474 475
	for (vector = 0; vector < NR_VECTORS; vector++)
		this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
476

477 478 479 480 481 482
	/*
	 * Until the rewrite of the managed interrupt management is in
	 * place it's necessary to walk the irq descriptors and check for
	 * interrupts which are targeted at this CPU.
	 */
	vector_update_shutdown_irqs();
483 484
}

485
static int apic_retrigger_irq(struct irq_data *irqd)
486
{
487
	struct apic_chip_data *apicd = apic_chip_data(irqd);
488 489 490
	unsigned long flags;

	raw_spin_lock_irqsave(&vector_lock, flags);
491
	apic->send_IPI(apicd->cpu, apicd->cfg.vector);
492 493 494 495 496
	raw_spin_unlock_irqrestore(&vector_lock, flags);

	return 1;
}

497
void apic_ack_edge(struct irq_data *irqd)
498
{
499 500
	irq_complete_move(irqd_cfg(irqd));
	irq_move_irq(irqd);
501 502 503
	ack_APIC_irq();
}

504
static int apic_set_affinity(struct irq_data *irqd,
505
			     const struct cpumask *dest, bool force)
506
{
507 508
	struct apic_chip_data *apicd = irqd->chip_data;
	int err, irq = irqd->irq;
509

510
	if (!IS_ENABLED(CONFIG_SMP))
511 512 513 514 515
		return -EPERM;

	if (!cpumask_intersects(dest, cpu_online_mask))
		return -EINVAL;

516
	err = assign_irq_vector(irq, apicd, dest, irqd);
517
	return err ? err : IRQ_SET_MASK_OK;
518 519 520
}

static struct irq_chip lapic_controller = {
T
Thomas Gleixner 已提交
521
	.name			= "APIC",
522
	.irq_ack		= apic_ack_edge,
523
	.irq_set_affinity	= apic_set_affinity,
524 525 526
	.irq_retrigger		= apic_retrigger_irq,
};

527
#ifdef CONFIG_SMP
528

529
asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
530
{
531 532 533
	struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
	struct apic_chip_data *apicd;
	struct hlist_node *tmp;
534

535
	entering_ack_irq();
536 537 538
	/* Prevent vectors vanishing under us */
	raw_spin_lock(&vector_lock);

539 540
	hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
		unsigned int irr, vector = apicd->cfg.old_vector;
541 542

		/*
543 544 545
		 * Paranoia: Check if the vector that needs to be cleaned
		 * up is registered at the APICs IRR. If so, then this is
		 * not the best time to clean it up. Clean it up in the
546
		 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
547 548 549
		 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
		 * priority external vector, so on return from this
		 * interrupt the device interrupt will happen first.
550
		 */
551 552
		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
		if (irr & (1U << (vector % 32))) {
553
			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
554
			continue;
555
		}
556
		hlist_del_init(&apicd->clist);
557
		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
558
		apicd->cfg.old_vector = 0;
559 560
	}

561
	raw_spin_unlock(&vector_lock);
562
	exiting_irq();
563 564
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static void __send_cleanup_vector(struct apic_chip_data *apicd)
{
	unsigned int cpu;

	raw_spin_lock(&vector_lock);
	apicd->move_in_progress = 0;
	cpu = apicd->prev_cpu;
	if (cpu_online(cpu)) {
		hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
		apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
	} else {
		apicd->cfg.old_vector = 0;
	}
	raw_spin_unlock(&vector_lock);
}

void send_cleanup_vector(struct irq_cfg *cfg)
{
	struct apic_chip_data *apicd;

	apicd = container_of(cfg, struct apic_chip_data, cfg);
	if (apicd->move_in_progress)
		__send_cleanup_vector(apicd);
}

590 591
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
{
592
	struct apic_chip_data *apicd;
593

594 595
	apicd = container_of(cfg, struct apic_chip_data, cfg);
	if (likely(!apicd->move_in_progress))
596 597
		return;

598
	if (vector == apicd->cfg.vector && apicd->cpu == smp_processor_id())
599
		__send_cleanup_vector(apicd);
600 601 602 603 604 605 606
}

void irq_complete_move(struct irq_cfg *cfg)
{
	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
}

607
/*
608
 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
609 610
 */
void irq_force_complete_move(struct irq_desc *desc)
611
{
612
	struct apic_chip_data *apicd;
613 614
	struct irq_data *irqd;
	unsigned int vector;
615

616 617 618 619 620 621 622 623 624
	/*
	 * The function is called for all descriptors regardless of which
	 * irqdomain they belong to. For example if an IRQ is provided by
	 * an irq_chip as part of a GPIO driver, the chip data for that
	 * descriptor is specific to the irq_chip in question.
	 *
	 * Check first that the chip_data is what we expect
	 * (apic_chip_data) before touching it any further.
	 */
625
	irqd = irq_domain_get_irq_data(x86_vector_domain,
626
				       irq_desc_get_irq(desc));
627
	if (!irqd)
628 629
		return;

630
	raw_spin_lock(&vector_lock);
631
	apicd = apic_chip_data(irqd);
632 633
	if (!apicd)
		goto unlock;
634

635 636 637 638 639 640
	/*
	 * If old_vector is empty, no action required.
	 */
	vector = apicd->cfg.old_vector;
	if (!vector)
		goto unlock;
641

642
	/*
643
	 * This is tricky. If the cleanup of the old vector has not been
644 645 646
	 * done yet, then the following setaffinity call will fail with
	 * -EBUSY. This can leave the interrupt in a stale state.
	 *
647 648
	 * All CPUs are stuck in stop machine with interrupts disabled so
	 * calling __irq_complete_move() would be completely pointless.
649
	 *
650 651 652 653 654 655 656
	 * 1) The interrupt is in move_in_progress state. That means that we
	 *    have not seen an interrupt since the io_apic was reprogrammed to
	 *    the new vector.
	 *
	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
	 *    have not been processed yet.
	 */
657
	if (apicd->move_in_progress) {
658
		/*
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
		 * In theory there is a race:
		 *
		 * set_ioapic(new_vector) <-- Interrupt is raised before update
		 *			      is effective, i.e. it's raised on
		 *			      the old vector.
		 *
		 * So if the target cpu cannot handle that interrupt before
		 * the old vector is cleaned up, we get a spurious interrupt
		 * and in the worst case the ioapic irq line becomes stale.
		 *
		 * But in case of cpu hotplug this should be a non issue
		 * because if the affinity update happens right before all
		 * cpus rendevouz in stop machine, there is no way that the
		 * interrupt can be blocked on the target cpu because all cpus
		 * loops first with interrupts enabled in stop machine, so the
		 * old vector is not yet cleaned up when the interrupt fires.
		 *
		 * So the only way to run into this issue is if the delivery
		 * of the interrupt on the apic/system bus would be delayed
		 * beyond the point where the target cpu disables interrupts
		 * in stop machine. I doubt that it can happen, but at least
		 * there is a theroretical chance. Virtualization might be
		 * able to expose this, but AFAICT the IOAPIC emulation is not
		 * as stupid as the real hardware.
		 *
		 * Anyway, there is nothing we can do about that at this point
		 * w/o refactoring the whole fixup_irq() business completely.
		 * We print at least the irq number and the old vector number,
		 * so we have the necessary information when a problem in that
		 * area arises.
689
		 */
690
		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
691
			irqd->irq, vector);
692
	}
693
	per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
694
	/* Cleanup the left overs of the (half finished) move */
695
	cpumask_clear(apicd->old_domain);
696
	apicd->cfg.old_vector = 0;
697
	apicd->move_in_progress = 0;
698 699
	hlist_del_init(&apicd->clist);
unlock:
700
	raw_spin_unlock(&vector_lock);
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
}
#endif

static void __init print_APIC_field(int base)
{
	int i;

	printk(KERN_DEBUG);

	for (i = 0; i < 8; i++)
		pr_cont("%08x", apic_read(base + i*0x10));

	pr_cont("\n");
}

static void __init print_local_APIC(void *dummy)
{
	unsigned int i, v, ver, maxlvt;
	u64 icr;

721 722
	pr_debug("printing local APIC contents on CPU#%d/%d:\n",
		 smp_processor_id(), hard_smp_processor_id());
723
	v = apic_read(APIC_ID);
724
	pr_info("... APIC ID:      %08x (%01x)\n", v, read_apic_id());
725
	v = apic_read(APIC_LVR);
726
	pr_info("... APIC VERSION: %08x\n", v);
727 728 729 730
	ver = GET_APIC_VERSION(v);
	maxlvt = lapic_get_maxlvt();

	v = apic_read(APIC_TASKPRI);
731
	pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
732 733 734 735 736

	/* !82489DX */
	if (APIC_INTEGRATED(ver)) {
		if (!APIC_XAPIC(ver)) {
			v = apic_read(APIC_ARBPRI);
737 738
			pr_debug("... APIC ARBPRI: %08x (%02x)\n",
				 v, v & APIC_ARBPRI_MASK);
739 740
		}
		v = apic_read(APIC_PROCPRI);
741
		pr_debug("... APIC PROCPRI: %08x\n", v);
742 743 744 745 746 747 748 749
	}

	/*
	 * Remote read supported only in the 82489DX and local APIC for
	 * Pentium processors.
	 */
	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
		v = apic_read(APIC_RRR);
750
		pr_debug("... APIC RRR: %08x\n", v);
751 752 753
	}

	v = apic_read(APIC_LDR);
754
	pr_debug("... APIC LDR: %08x\n", v);
755 756
	if (!x2apic_enabled()) {
		v = apic_read(APIC_DFR);
757
		pr_debug("... APIC DFR: %08x\n", v);
758 759
	}
	v = apic_read(APIC_SPIV);
760
	pr_debug("... APIC SPIV: %08x\n", v);
761

762
	pr_debug("... APIC ISR field:\n");
763
	print_APIC_field(APIC_ISR);
764
	pr_debug("... APIC TMR field:\n");
765
	print_APIC_field(APIC_TMR);
766
	pr_debug("... APIC IRR field:\n");
767 768 769 770 771 772 773 774 775
	print_APIC_field(APIC_IRR);

	/* !82489DX */
	if (APIC_INTEGRATED(ver)) {
		/* Due to the Pentium erratum 3AP. */
		if (maxlvt > 3)
			apic_write(APIC_ESR, 0);

		v = apic_read(APIC_ESR);
776
		pr_debug("... APIC ESR: %08x\n", v);
777 778 779
	}

	icr = apic_icr_read();
780 781
	pr_debug("... APIC ICR: %08x\n", (u32)icr);
	pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
782 783

	v = apic_read(APIC_LVTT);
784
	pr_debug("... APIC LVTT: %08x\n", v);
785 786 787 788

	if (maxlvt > 3) {
		/* PC is LVT#4. */
		v = apic_read(APIC_LVTPC);
789
		pr_debug("... APIC LVTPC: %08x\n", v);
790 791
	}
	v = apic_read(APIC_LVT0);
792
	pr_debug("... APIC LVT0: %08x\n", v);
793
	v = apic_read(APIC_LVT1);
794
	pr_debug("... APIC LVT1: %08x\n", v);
795 796 797 798

	if (maxlvt > 2) {
		/* ERR is LVT#3. */
		v = apic_read(APIC_LVTERR);
799
		pr_debug("... APIC LVTERR: %08x\n", v);
800 801 802
	}

	v = apic_read(APIC_TMICT);
803
	pr_debug("... APIC TMICT: %08x\n", v);
804
	v = apic_read(APIC_TMCCT);
805
	pr_debug("... APIC TMCCT: %08x\n", v);
806
	v = apic_read(APIC_TDCR);
807
	pr_debug("... APIC TDCR: %08x\n", v);
808 809 810 811

	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
		v = apic_read(APIC_EFEAT);
		maxlvt = (v >> 16) & 0xff;
812
		pr_debug("... APIC EFEAT: %08x\n", v);
813
		v = apic_read(APIC_ECTRL);
814
		pr_debug("... APIC ECTRL: %08x\n", v);
815 816
		for (i = 0; i < maxlvt; i++) {
			v = apic_read(APIC_EILVTn(i));
817
			pr_debug("... APIC EILVT%d: %08x\n", i, v);
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		}
	}
	pr_cont("\n");
}

static void __init print_local_APICs(int maxcpu)
{
	int cpu;

	if (!maxcpu)
		return;

	preempt_disable();
	for_each_online_cpu(cpu) {
		if (cpu >= maxcpu)
			break;
		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
	}
	preempt_enable();
}

static void __init print_PIC(void)
{
	unsigned int v;
	unsigned long flags;

	if (!nr_legacy_irqs())
		return;

847
	pr_debug("\nprinting PIC contents\n");
848 849 850 851

	raw_spin_lock_irqsave(&i8259A_lock, flags);

	v = inb(0xa1) << 8 | inb(0x21);
852
	pr_debug("... PIC  IMR: %04x\n", v);
853 854

	v = inb(0xa0) << 8 | inb(0x20);
855
	pr_debug("... PIC  IRR: %04x\n", v);
856 857 858 859 860 861 862 863 864

	outb(0x0b, 0xa0);
	outb(0x0b, 0x20);
	v = inb(0xa0) << 8 | inb(0x20);
	outb(0x0a, 0xa0);
	outb(0x0a, 0x20);

	raw_spin_unlock_irqrestore(&i8259A_lock, flags);

865
	pr_debug("... PIC  ISR: %04x\n", v);
866 867

	v = inb(0x4d1) << 8 | inb(0x4d0);
868
	pr_debug("... PIC ELCR: %04x\n", v);
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
}

static int show_lapic __initdata = 1;
static __init int setup_show_lapic(char *arg)
{
	int num = -1;

	if (strcmp(arg, "all") == 0) {
		show_lapic = CONFIG_NR_CPUS;
	} else {
		get_option(&arg, &num);
		if (num >= 0)
			show_lapic = num;
	}

	return 1;
}
__setup("show_lapic=", setup_show_lapic);

static int __init print_ICs(void)
{
	if (apic_verbosity == APIC_QUIET)
		return 0;

	print_PIC();

	/* don't print out if apic is not there */
896
	if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
897 898 899 900 901 902 903 904 905
		return 0;

	print_local_APICs(show_lapic);
	print_IO_APICs();

	return 0;
}

late_initcall(print_ICs);