vector.c 25.4 KB
Newer Older
1 2 3 4 5
/*
 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
 *
 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
 *	Moved from arch/x86/kernel/apic/io_apic.c.
6 7
 * Jiang Liu <jiang.liu@linux.intel.com>
 *	Enable support of hierarchical irqdomains
8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/slab.h>
17
#include <asm/irqdomain.h>
18 19 20 21 22 23
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/i8259.h>
#include <asm/desc.h>
#include <asm/irq_remapping.h>

24 25 26 27 28 29 30
struct apic_chip_data {
	struct irq_cfg		cfg;
	cpumask_var_t		domain;
	cpumask_var_t		old_domain;
	u8			move_in_progress : 1;
};

31
struct irq_domain *x86_vector_domain;
32
EXPORT_SYMBOL_GPL(x86_vector_domain);
33
static DEFINE_RAW_SPINLOCK(vector_lock);
34
static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
35
static struct irq_chip lapic_controller;
36
#ifdef	CONFIG_X86_IO_APIC
37
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
38
#endif
39 40 41 42 43 44 45 46 47 48 49 50 51 52

void lock_vector_lock(void)
{
	/* Used to the online set of cpus does not change
	 * during assign_irq_vector.
	 */
	raw_spin_lock(&vector_lock);
}

void unlock_vector_lock(void)
{
	raw_spin_unlock(&vector_lock);
}

53
static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
54
{
55 56 57 58 59 60
	if (!irq_data)
		return NULL;

	while (irq_data->parent_data)
		irq_data = irq_data->parent_data;

61 62 63
	return irq_data->chip_data;
}

64 65 66 67 68 69
struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
{
	struct apic_chip_data *data = apic_chip_data(irq_data);

	return data ? &data->cfg : NULL;
}
70
EXPORT_SYMBOL_GPL(irqd_cfg);
71 72

struct irq_cfg *irq_cfg(unsigned int irq)
73
{
74 75
	return irqd_cfg(irq_get_irq_data(irq));
}
76

77 78 79 80 81 82
static struct apic_chip_data *alloc_apic_chip_data(int node)
{
	struct apic_chip_data *data;

	data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
	if (!data)
83
		return NULL;
84 85 86
	if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
		goto out_data;
	if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
87
		goto out_domain;
88
	return data;
89
out_domain:
90 91 92
	free_cpumask_var(data->domain);
out_data:
	kfree(data);
93 94 95
	return NULL;
}

96
static void free_apic_chip_data(struct apic_chip_data *data)
97
{
98 99 100 101
	if (data) {
		free_cpumask_var(data->domain);
		free_cpumask_var(data->old_domain);
		kfree(data);
102
	}
103 104
}

105 106
static int __assign_irq_vector(int irq, struct apic_chip_data *d,
			       const struct cpumask *mask)
107 108 109 110 111 112 113 114 115 116 117 118 119 120
{
	/*
	 * NOTE! The local APIC isn't very good at handling
	 * multiple interrupts at the same interrupt level.
	 * As the interrupt level is determined by taking the
	 * vector number and shifting that right by 4, we
	 * want to spread these out a bit so that they don't
	 * all fall in the same interrupt level.
	 *
	 * Also, we've got to be careful not to trash gate
	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
	 */
	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
	static int current_offset = VECTOR_OFFSET_START % 16;
121
	int cpu, vector;
122

123 124 125 126 127 128
	/*
	 * If there is still a move in progress or the previous move has not
	 * been cleaned up completely, tell the caller to come back later.
	 */
	if (d->move_in_progress ||
	    cpumask_intersects(d->old_domain, cpu_online_mask))
129 130 131
		return -EBUSY;

	/* Only try and allocate irqs on cpus that are present */
132
	cpumask_clear(d->old_domain);
133
	cpumask_clear(searched_cpumask);
134 135
	cpu = cpumask_first_and(mask, cpu_online_mask);
	while (cpu < nr_cpu_ids) {
136
		int new_cpu, offset;
137

138
		/* Get the possible target cpus for @mask/@cpu from the apic */
139
		apic->vector_allocation_domain(cpu, vector_cpumask, mask);
140

141 142 143 144 145 146 147 148 149 150 151
		/*
		 * Clear the offline cpus from @vector_cpumask for searching
		 * and verify whether the result overlaps with @mask. If true,
		 * then the call to apic->cpu_mask_to_apicid_and() will
		 * succeed as well. If not, no point in trying to find a
		 * vector in this mask.
		 */
		cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
		if (!cpumask_intersects(vector_searchmask, mask))
			goto next_cpu;

152 153
		if (cpumask_subset(vector_cpumask, d->domain)) {
			if (cpumask_equal(vector_cpumask, d->domain))
154
				goto success;
155
			/*
156 157
			 * Mark the cpus which are not longer in the mask for
			 * cleanup.
158
			 */
159 160 161
			cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
			vector = d->cfg.vector;
			goto update;
162 163 164 165 166 167 168 169 170 171 172
		}

		vector = current_vector;
		offset = current_offset;
next:
		vector += 16;
		if (vector >= first_system_vector) {
			offset = (offset + 1) % 16;
			vector = FIRST_EXTERNAL_VECTOR + offset;
		}

173 174 175
		/* If the search wrapped around, try the next cpu */
		if (unlikely(current_vector == vector))
			goto next_cpu;
176 177 178 179

		if (test_bit(vector, used_vectors))
			goto next;

180
		for_each_cpu(new_cpu, vector_searchmask) {
181
			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
182 183 184 185 186
				goto next;
		}
		/* Found one! */
		current_vector = vector;
		current_offset = offset;
187 188
		/* Schedule the old vector for cleanup on all cpus */
		if (d->cfg.vector)
189
			cpumask_copy(d->old_domain, d->domain);
190
		for_each_cpu(new_cpu, vector_searchmask)
191
			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
192
		goto update;
193 194 195 196 197 198 199 200 201 202 203 204 205

next_cpu:
		/*
		 * We exclude the current @vector_cpumask from the requested
		 * @mask and try again with the next online cpu in the
		 * result. We cannot modify @mask, so we use @vector_cpumask
		 * as a temporary buffer here as it will be reassigned when
		 * calling apic->vector_allocation_domain() above.
		 */
		cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
		cpumask_andnot(vector_cpumask, mask, searched_cpumask);
		cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
		continue;
206
	}
207
	return -ENOSPC;
208

209
update:
210 211 212 213 214 215
	/*
	 * Exclude offline cpus from the cleanup mask and set the
	 * move_in_progress flag when the result is not empty.
	 */
	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
	d->move_in_progress = !cpumask_empty(d->old_domain);
216
	d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
217 218
	d->cfg.vector = vector;
	cpumask_copy(d->domain, vector_cpumask);
219
success:
220 221 222 223
	/*
	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
	 * as we already established, that mask & d->domain & cpu_online_mask
	 * is not empty.
224 225 226
	 *
	 * vector_searchmask is a subset of d->domain and has the offline
	 * cpus masked out.
227
	 */
228
	BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
229 230
					    &d->cfg.dest_apicid));
	return 0;
231 232
}

233
static int assign_irq_vector(int irq, struct apic_chip_data *data,
234
			     const struct cpumask *mask)
235 236 237 238 239
{
	int err;
	unsigned long flags;

	raw_spin_lock_irqsave(&vector_lock, flags);
240
	err = __assign_irq_vector(irq, data, mask);
241 242 243 244
	raw_spin_unlock_irqrestore(&vector_lock, flags);
	return err;
}

245 246 247 248 249 250 251 252 253 254 255 256
static int assign_irq_vector_policy(int irq, int node,
				    struct apic_chip_data *data,
				    struct irq_alloc_info *info)
{
	if (info && info->mask)
		return assign_irq_vector(irq, data, info->mask);
	if (node != NUMA_NO_NODE &&
	    assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
		return 0;
	return assign_irq_vector(irq, data, apic->target_cpus());
}

257
static void clear_irq_vector(int irq, struct apic_chip_data *data)
258
{
259 260
	struct irq_desc *desc;
	int cpu, vector;
261

262 263
	if (!data->cfg.vector)
		return;
264

265 266
	vector = data->cfg.vector;
	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
267
		per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
268

269 270
	data->cfg.vector = 0;
	cpumask_clear(data->domain);
271

272 273 274 275 276 277
	/*
	 * If move is in progress or the old_domain mask is not empty,
	 * i.e. the cleanup IPI has not been processed yet, we need to remove
	 * the old references to desc from all cpus vector tables.
	 */
	if (!data->move_in_progress && cpumask_empty(data->old_domain))
278 279
		return;

280
	desc = irq_to_desc(irq);
281
	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
282 283
		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
		     vector++) {
284
			if (per_cpu(vector_irq, cpu)[vector] != desc)
285
				continue;
286
			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
287 288 289
			break;
		}
	}
290
	data->move_in_progress = 0;
291 292
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
void init_irq_alloc_info(struct irq_alloc_info *info,
			 const struct cpumask *mask)
{
	memset(info, 0, sizeof(*info));
	info->mask = mask;
}

void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
{
	if (src)
		*dst = *src;
	else
		memset(dst, 0, sizeof(*dst));
}

static void x86_vector_free_irqs(struct irq_domain *domain,
				 unsigned int virq, unsigned int nr_irqs)
{
311
	struct apic_chip_data *apic_data;
312
	struct irq_data *irq_data;
313
	unsigned long flags;
314 315 316 317 318
	int i;

	for (i = 0; i < nr_irqs; i++) {
		irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
		if (irq_data && irq_data->chip_data) {
319
			raw_spin_lock_irqsave(&vector_lock, flags);
320
			clear_irq_vector(virq + i, irq_data->chip_data);
321 322 323 324
			apic_data = irq_data->chip_data;
			irq_domain_reset_irq_data(irq_data);
			raw_spin_unlock_irqrestore(&vector_lock, flags);
			free_apic_chip_data(apic_data);
325 326
#ifdef	CONFIG_X86_IO_APIC
			if (virq + i < nr_legacy_irqs())
327
				legacy_irq_data[virq + i] = NULL;
328
#endif
329 330 331 332 333 334 335 336
		}
	}
}

static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
				 unsigned int nr_irqs, void *arg)
{
	struct irq_alloc_info *info = arg;
337
	struct apic_chip_data *data;
338
	struct irq_data *irq_data;
339
	int i, err, node;
340 341 342 343 344 345 346 347 348 349 350

	if (disable_apic)
		return -ENXIO;

	/* Currently vector allocator can't guarantee contiguous allocations */
	if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
		return -ENOSYS;

	for (i = 0; i < nr_irqs; i++) {
		irq_data = irq_domain_get_irq_data(domain, virq + i);
		BUG_ON(!irq_data);
351
		node = irq_data_get_node(irq_data);
352
#ifdef	CONFIG_X86_IO_APIC
353 354
		if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
			data = legacy_irq_data[virq + i];
355 356
		else
#endif
357
			data = alloc_apic_chip_data(node);
358
		if (!data) {
359 360 361 362 363
			err = -ENOMEM;
			goto error;
		}

		irq_data->chip = &lapic_controller;
364
		irq_data->chip_data = data;
365
		irq_data->hwirq = virq + i;
366
		err = assign_irq_vector_policy(virq + i, node, data, info);
367 368 369 370 371 372 373 374 375 376 377
		if (err)
			goto error;
	}

	return 0;

error:
	x86_vector_free_irqs(domain, virq, i + 1);
	return err;
}

T
Thomas Gleixner 已提交
378 379 380
static const struct irq_domain_ops x86_vector_domain_ops = {
	.alloc	= x86_vector_alloc_irqs,
	.free	= x86_vector_free_irqs,
381 382
};

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
int __init arch_probe_nr_irqs(void)
{
	int nr;

	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
		nr_irqs = NR_VECTORS * nr_cpu_ids;

	nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
	/*
	 * for MSI and HT dyn irq
	 */
	if (gsi_top <= NR_IRQS_LEGACY)
		nr +=  8 * nr_cpu_ids;
	else
		nr += gsi_top * 16;
#endif
	if (nr < nr_irqs)
		nr_irqs = nr;

403 404 405 406 407
	/*
	 * We don't know if PIC is present at this point so we need to do
	 * probe() to get the right number of legacy IRQs.
	 */
	return legacy_pic->probe();
408 409
}

410 411 412 413
#ifdef	CONFIG_X86_IO_APIC
static void init_legacy_irqs(void)
{
	int i, node = cpu_to_node(0);
414
	struct apic_chip_data *data;
415 416 417

	/*
	 * For legacy IRQ's, start with assigning irq0 to irq15 to
418
	 * ISA_IRQ_VECTOR(i) for all cpu's.
419 420
	 */
	for (i = 0; i < nr_legacy_irqs(); i++) {
421 422
		data = legacy_irq_data[i] = alloc_apic_chip_data(node);
		BUG_ON(!data);
423 424

		data->cfg.vector = ISA_IRQ_VECTOR(i);
425 426
		cpumask_setall(data->domain);
		irq_set_chip_data(i, data);
427 428 429 430 431 432
	}
}
#else
static void init_legacy_irqs(void) { }
#endif

433 434
int __init arch_early_irq_init(void)
{
435 436
	struct fwnode_handle *fn;

437 438
	init_legacy_irqs();

439 440 441 442
	fn = irq_domain_alloc_named_fwnode("VECTOR");
	BUG_ON(!fn);
	x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
						   NULL);
443
	BUG_ON(x86_vector_domain == NULL);
444
	irq_domain_free_fwnode(fn);
445 446
	irq_set_default_host(x86_vector_domain);

447
	arch_init_msi_domain(x86_vector_domain);
448
	arch_init_htirq_domain(x86_vector_domain);
449

450
	BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
451
	BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
452
	BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
453

454 455 456
	return arch_early_ioapic_init();
}

457
/* Initialize vector_irq on a new cpu */
458 459
static void __setup_vector_irq(int cpu)
{
460
	struct apic_chip_data *data;
461 462
	struct irq_desc *desc;
	int irq, vector;
463 464

	/* Mark the inuse vectors */
465 466
	for_each_irq_desc(irq, desc) {
		struct irq_data *idata = irq_desc_get_irq_data(desc);
467

468 469
		data = apic_chip_data(idata);
		if (!data || !cpumask_test_cpu(cpu, data->domain))
470
			continue;
471
		vector = data->cfg.vector;
472
		per_cpu(vector_irq, cpu)[vector] = desc;
473 474 475
	}
	/* Mark the free vectors */
	for (vector = 0; vector < NR_VECTORS; ++vector) {
476 477
		desc = per_cpu(vector_irq, cpu)[vector];
		if (IS_ERR_OR_NULL(desc))
478 479
			continue;

480
		data = apic_chip_data(irq_desc_get_irq_data(desc));
481
		if (!cpumask_test_cpu(cpu, data->domain))
482
			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
483 484 485 486
	}
}

/*
487
 * Setup the vector to irq mappings. Must be called with vector_lock held.
488 489 490 491 492
 */
void setup_vector_irq(int cpu)
{
	int irq;

493
	lockdep_assert_held(&vector_lock);
494 495 496 497 498 499 500 501
	/*
	 * On most of the platforms, legacy PIC delivers the interrupts on the
	 * boot cpu. But there are certain platforms where PIC interrupts are
	 * delivered to multiple cpu's. If the legacy IRQ is handled by the
	 * legacy PIC, for the new cpu that is coming online, setup the static
	 * legacy vector to irq mapping:
	 */
	for (irq = 0; irq < nr_legacy_irqs(); irq++)
502
		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
503 504 505 506

	__setup_vector_irq(cpu);
}

507
static int apic_retrigger_irq(struct irq_data *irq_data)
508
{
509
	struct apic_chip_data *data = apic_chip_data(irq_data);
510 511 512 513
	unsigned long flags;
	int cpu;

	raw_spin_lock_irqsave(&vector_lock, flags);
514 515
	cpu = cpumask_first_and(data->domain, cpu_online_mask);
	apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
516 517 518 519 520 521 522
	raw_spin_unlock_irqrestore(&vector_lock, flags);

	return 1;
}

void apic_ack_edge(struct irq_data *data)
{
523
	irq_complete_move(irqd_cfg(data));
524 525 526 527
	irq_move_irq(data);
	ack_APIC_irq();
}

528 529
static int apic_set_affinity(struct irq_data *irq_data,
			     const struct cpumask *dest, bool force)
530
{
531
	struct apic_chip_data *data = irq_data->chip_data;
532 533
	int err, irq = irq_data->irq;

534
	if (!IS_ENABLED(CONFIG_SMP))
535 536 537 538 539
		return -EPERM;

	if (!cpumask_intersects(dest, cpu_online_mask))
		return -EINVAL;

540
	err = assign_irq_vector(irq, data, dest);
541
	return err ? err : IRQ_SET_MASK_OK;
542 543 544
}

static struct irq_chip lapic_controller = {
T
Thomas Gleixner 已提交
545
	.name			= "APIC",
546
	.irq_ack		= apic_ack_edge,
547
	.irq_set_affinity	= apic_set_affinity,
548 549 550
	.irq_retrigger		= apic_retrigger_irq,
};

551
#ifdef CONFIG_SMP
552
static void __send_cleanup_vector(struct apic_chip_data *data)
553
{
554
	raw_spin_lock(&vector_lock);
555
	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
556
	data->move_in_progress = 0;
557 558
	if (!cpumask_empty(data->old_domain))
		apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
559
	raw_spin_unlock(&vector_lock);
560 561
}

562 563
void send_cleanup_vector(struct irq_cfg *cfg)
{
564 565 566 567 568
	struct apic_chip_data *data;

	data = container_of(cfg, struct apic_chip_data, cfg);
	if (data->move_in_progress)
		__send_cleanup_vector(data);
569 570
}

571
asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
572 573 574
{
	unsigned vector, me;

575
	entering_ack_irq();
576

577 578 579
	/* Prevent vectors vanishing under us */
	raw_spin_lock(&vector_lock);

580 581
	me = smp_processor_id();
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
582
		struct apic_chip_data *data;
583 584
		struct irq_desc *desc;
		unsigned int irr;
585

586
	retry:
587 588
		desc = __this_cpu_read(vector_irq[vector]);
		if (IS_ERR_OR_NULL(desc))
589 590
			continue;

591 592 593 594 595 596
		if (!raw_spin_trylock(&desc->lock)) {
			raw_spin_unlock(&vector_lock);
			cpu_relax();
			raw_spin_lock(&vector_lock);
			goto retry;
		}
597

598
		data = apic_chip_data(irq_desc_get_irq_data(desc));
599
		if (!data)
600
			goto unlock;
601 602

		/*
603 604
		 * Nothing to cleanup if irq migration is in progress
		 * or this cpu is not set in the cleanup mask.
605
		 */
606 607
		if (data->move_in_progress ||
		    !cpumask_test_cpu(me, data->old_domain))
608 609
			goto unlock;

610 611 612 613 614 615 616 617 618 619 620 621
		/*
		 * We have two cases to handle here:
		 * 1) vector is unchanged but the target mask got reduced
		 * 2) vector and the target mask has changed
		 *
		 * #1 is obvious, but in #2 we have two vectors with the same
		 * irq descriptor: the old and the new vector. So we need to
		 * make sure that we only cleanup the old vector. The new
		 * vector has the current @vector number in the config and
		 * this cpu is part of the target mask. We better leave that
		 * one alone.
		 */
622 623
		if (vector == data->cfg.vector &&
		    cpumask_test_cpu(me, data->domain))
624 625 626 627 628 629 630 631 632 633 634 635 636 637
			goto unlock;

		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
		/*
		 * Check if the vector that needs to be cleanedup is
		 * registered at the cpu's IRR. If so, then this is not
		 * the best time to clean it up. Lets clean it up in the
		 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
		 * to myself.
		 */
		if (irr  & (1 << (vector % 32))) {
			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
			goto unlock;
		}
638
		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
639
		cpumask_clear_cpu(me, data->old_domain);
640 641 642 643
unlock:
		raw_spin_unlock(&desc->lock);
	}

644 645
	raw_spin_unlock(&vector_lock);

646
	exiting_irq();
647 648 649 650 651
}

static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
{
	unsigned me;
652
	struct apic_chip_data *data;
653

654 655
	data = container_of(cfg, struct apic_chip_data, cfg);
	if (likely(!data->move_in_progress))
656 657 658
		return;

	me = smp_processor_id();
659 660
	if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
		__send_cleanup_vector(data);
661 662 663 664 665 666 667
}

void irq_complete_move(struct irq_cfg *cfg)
{
	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
}

668
/*
669
 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
670 671
 */
void irq_force_complete_move(struct irq_desc *desc)
672
{
673 674 675
	struct irq_data *irqdata;
	struct apic_chip_data *data;
	struct irq_cfg *cfg;
676
	unsigned int cpu;
677

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	/*
	 * The function is called for all descriptors regardless of which
	 * irqdomain they belong to. For example if an IRQ is provided by
	 * an irq_chip as part of a GPIO driver, the chip data for that
	 * descriptor is specific to the irq_chip in question.
	 *
	 * Check first that the chip_data is what we expect
	 * (apic_chip_data) before touching it any further.
	 */
	irqdata = irq_domain_get_irq_data(x86_vector_domain,
					  irq_desc_get_irq(desc));
	if (!irqdata)
		return;

	data = apic_chip_data(irqdata);
	cfg = data ? &data->cfg : NULL;

695 696
	if (!cfg)
		return;
697

698
	/*
699 700 701 702
	 * This is tricky. If the cleanup of @data->old_domain has not been
	 * done yet, then the following setaffinity call will fail with
	 * -EBUSY. This can leave the interrupt in a stale state.
	 *
703 704
	 * All CPUs are stuck in stop machine with interrupts disabled so
	 * calling __irq_complete_move() would be completely pointless.
705 706
	 */
	raw_spin_lock(&vector_lock);
707 708 709 710
	/*
	 * Clean out all offline cpus (including the outgoing one) from the
	 * old_domain mask.
	 */
711
	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
712 713 714 715 716 717 718

	/*
	 * If move_in_progress is cleared and the old_domain mask is empty,
	 * then there is nothing to cleanup. fixup_irqs() will take care of
	 * the stale vectors on the outgoing cpu.
	 */
	if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
719
		raw_spin_unlock(&vector_lock);
720 721 722 723 724 725 726 727 728 729 730 731
		return;
	}

	/*
	 * 1) The interrupt is in move_in_progress state. That means that we
	 *    have not seen an interrupt since the io_apic was reprogrammed to
	 *    the new vector.
	 *
	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
	 *    have not been processed yet.
	 */
	if (data->move_in_progress) {
732
		/*
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
		 * In theory there is a race:
		 *
		 * set_ioapic(new_vector) <-- Interrupt is raised before update
		 *			      is effective, i.e. it's raised on
		 *			      the old vector.
		 *
		 * So if the target cpu cannot handle that interrupt before
		 * the old vector is cleaned up, we get a spurious interrupt
		 * and in the worst case the ioapic irq line becomes stale.
		 *
		 * But in case of cpu hotplug this should be a non issue
		 * because if the affinity update happens right before all
		 * cpus rendevouz in stop machine, there is no way that the
		 * interrupt can be blocked on the target cpu because all cpus
		 * loops first with interrupts enabled in stop machine, so the
		 * old vector is not yet cleaned up when the interrupt fires.
		 *
		 * So the only way to run into this issue is if the delivery
		 * of the interrupt on the apic/system bus would be delayed
		 * beyond the point where the target cpu disables interrupts
		 * in stop machine. I doubt that it can happen, but at least
		 * there is a theroretical chance. Virtualization might be
		 * able to expose this, but AFAICT the IOAPIC emulation is not
		 * as stupid as the real hardware.
		 *
		 * Anyway, there is nothing we can do about that at this point
		 * w/o refactoring the whole fixup_irq() business completely.
		 * We print at least the irq number and the old vector number,
		 * so we have the necessary information when a problem in that
		 * area arises.
763
		 */
764 765
		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
			irqdata->irq, cfg->old_vector);
766
	}
767 768 769 770 771 772 773 774 775 776
	/*
	 * If old_domain is not empty, then other cpus still have the irq
	 * descriptor set in their vector array. Clean it up.
	 */
	for_each_cpu(cpu, data->old_domain)
		per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;

	/* Cleanup the left overs of the (half finished) move */
	cpumask_clear(data->old_domain);
	data->move_in_progress = 0;
777
	raw_spin_unlock(&vector_lock);
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
}
#endif

static void __init print_APIC_field(int base)
{
	int i;

	printk(KERN_DEBUG);

	for (i = 0; i < 8; i++)
		pr_cont("%08x", apic_read(base + i*0x10));

	pr_cont("\n");
}

static void __init print_local_APIC(void *dummy)
{
	unsigned int i, v, ver, maxlvt;
	u64 icr;

798 799
	pr_debug("printing local APIC contents on CPU#%d/%d:\n",
		 smp_processor_id(), hard_smp_processor_id());
800
	v = apic_read(APIC_ID);
801
	pr_info("... APIC ID:      %08x (%01x)\n", v, read_apic_id());
802
	v = apic_read(APIC_LVR);
803
	pr_info("... APIC VERSION: %08x\n", v);
804 805 806 807
	ver = GET_APIC_VERSION(v);
	maxlvt = lapic_get_maxlvt();

	v = apic_read(APIC_TASKPRI);
808
	pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
809 810 811 812 813

	/* !82489DX */
	if (APIC_INTEGRATED(ver)) {
		if (!APIC_XAPIC(ver)) {
			v = apic_read(APIC_ARBPRI);
814 815
			pr_debug("... APIC ARBPRI: %08x (%02x)\n",
				 v, v & APIC_ARBPRI_MASK);
816 817
		}
		v = apic_read(APIC_PROCPRI);
818
		pr_debug("... APIC PROCPRI: %08x\n", v);
819 820 821 822 823 824 825 826
	}

	/*
	 * Remote read supported only in the 82489DX and local APIC for
	 * Pentium processors.
	 */
	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
		v = apic_read(APIC_RRR);
827
		pr_debug("... APIC RRR: %08x\n", v);
828 829 830
	}

	v = apic_read(APIC_LDR);
831
	pr_debug("... APIC LDR: %08x\n", v);
832 833
	if (!x2apic_enabled()) {
		v = apic_read(APIC_DFR);
834
		pr_debug("... APIC DFR: %08x\n", v);
835 836
	}
	v = apic_read(APIC_SPIV);
837
	pr_debug("... APIC SPIV: %08x\n", v);
838

839
	pr_debug("... APIC ISR field:\n");
840
	print_APIC_field(APIC_ISR);
841
	pr_debug("... APIC TMR field:\n");
842
	print_APIC_field(APIC_TMR);
843
	pr_debug("... APIC IRR field:\n");
844 845 846 847 848 849 850 851 852
	print_APIC_field(APIC_IRR);

	/* !82489DX */
	if (APIC_INTEGRATED(ver)) {
		/* Due to the Pentium erratum 3AP. */
		if (maxlvt > 3)
			apic_write(APIC_ESR, 0);

		v = apic_read(APIC_ESR);
853
		pr_debug("... APIC ESR: %08x\n", v);
854 855 856
	}

	icr = apic_icr_read();
857 858
	pr_debug("... APIC ICR: %08x\n", (u32)icr);
	pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
859 860

	v = apic_read(APIC_LVTT);
861
	pr_debug("... APIC LVTT: %08x\n", v);
862 863 864 865

	if (maxlvt > 3) {
		/* PC is LVT#4. */
		v = apic_read(APIC_LVTPC);
866
		pr_debug("... APIC LVTPC: %08x\n", v);
867 868
	}
	v = apic_read(APIC_LVT0);
869
	pr_debug("... APIC LVT0: %08x\n", v);
870
	v = apic_read(APIC_LVT1);
871
	pr_debug("... APIC LVT1: %08x\n", v);
872 873 874 875

	if (maxlvt > 2) {
		/* ERR is LVT#3. */
		v = apic_read(APIC_LVTERR);
876
		pr_debug("... APIC LVTERR: %08x\n", v);
877 878 879
	}

	v = apic_read(APIC_TMICT);
880
	pr_debug("... APIC TMICT: %08x\n", v);
881
	v = apic_read(APIC_TMCCT);
882
	pr_debug("... APIC TMCCT: %08x\n", v);
883
	v = apic_read(APIC_TDCR);
884
	pr_debug("... APIC TDCR: %08x\n", v);
885 886 887 888

	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
		v = apic_read(APIC_EFEAT);
		maxlvt = (v >> 16) & 0xff;
889
		pr_debug("... APIC EFEAT: %08x\n", v);
890
		v = apic_read(APIC_ECTRL);
891
		pr_debug("... APIC ECTRL: %08x\n", v);
892 893
		for (i = 0; i < maxlvt; i++) {
			v = apic_read(APIC_EILVTn(i));
894
			pr_debug("... APIC EILVT%d: %08x\n", i, v);
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
		}
	}
	pr_cont("\n");
}

static void __init print_local_APICs(int maxcpu)
{
	int cpu;

	if (!maxcpu)
		return;

	preempt_disable();
	for_each_online_cpu(cpu) {
		if (cpu >= maxcpu)
			break;
		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
	}
	preempt_enable();
}

static void __init print_PIC(void)
{
	unsigned int v;
	unsigned long flags;

	if (!nr_legacy_irqs())
		return;

924
	pr_debug("\nprinting PIC contents\n");
925 926 927 928

	raw_spin_lock_irqsave(&i8259A_lock, flags);

	v = inb(0xa1) << 8 | inb(0x21);
929
	pr_debug("... PIC  IMR: %04x\n", v);
930 931

	v = inb(0xa0) << 8 | inb(0x20);
932
	pr_debug("... PIC  IRR: %04x\n", v);
933 934 935 936 937 938 939 940 941

	outb(0x0b, 0xa0);
	outb(0x0b, 0x20);
	v = inb(0xa0) << 8 | inb(0x20);
	outb(0x0a, 0xa0);
	outb(0x0a, 0x20);

	raw_spin_unlock_irqrestore(&i8259A_lock, flags);

942
	pr_debug("... PIC  ISR: %04x\n", v);
943 944

	v = inb(0x4d1) << 8 | inb(0x4d0);
945
	pr_debug("... PIC ELCR: %04x\n", v);
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
}

static int show_lapic __initdata = 1;
static __init int setup_show_lapic(char *arg)
{
	int num = -1;

	if (strcmp(arg, "all") == 0) {
		show_lapic = CONFIG_NR_CPUS;
	} else {
		get_option(&arg, &num);
		if (num >= 0)
			show_lapic = num;
	}

	return 1;
}
__setup("show_lapic=", setup_show_lapic);

static int __init print_ICs(void)
{
	if (apic_verbosity == APIC_QUIET)
		return 0;

	print_PIC();

	/* don't print out if apic is not there */
973
	if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
974 975 976 977 978 979 980 981 982
		return 0;

	print_local_APICs(show_lapic);
	print_IO_APICs();

	return 0;
}

late_initcall(print_ICs);