intel_irq_remapping.c 30.7 KB
Newer Older
Y
Yinghai Lu 已提交
1
#include <linux/interrupt.h>
2
#include <linux/dmar.h>
3
#include <linux/spinlock.h>
4
#include <linux/slab.h>
5
#include <linux/jiffies.h>
6
#include <linux/hpet.h>
7
#include <linux/pci.h>
8
#include <linux/irq.h>
9 10
#include <linux/intel-iommu.h>
#include <linux/acpi.h>
11
#include <asm/io_apic.h>
Y
Yinghai Lu 已提交
12
#include <asm/smp.h>
13
#include <asm/cpu.h>
14
#include <asm/irq_remapping.h>
15
#include <asm/pci-direct.h>
16
#include <asm/msidef.h>
17

18
#include "irq_remapping.h"
19

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
struct ioapic_scope {
	struct intel_iommu *iommu;
	unsigned int id;
	unsigned int bus;	/* PCI bus number */
	unsigned int devfn;	/* PCI devfn number */
};

struct hpet_scope {
	struct intel_iommu *iommu;
	u8 id;
	unsigned int bus;
	unsigned int devfn;
};

#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35
#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
36

37
static int __read_mostly eim_mode;
38
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
39
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
40

41 42 43 44 45 46 47 48 49 50 51
/*
 * Lock ordering:
 * ->dmar_global_lock
 *	->irq_2_ir_lock
 *		->qi->q_lock
 *	->iommu->register_lock
 * Note:
 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
 * in single-threaded environment with interrupt disabled, so no need to tabke
 * the dmar_global_lock.
 */
52
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
53

54 55
static int __init parse_ioapics_under_ir(void);

56 57
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
58
	struct irq_cfg *cfg = irq_cfg(irq);
59
	return cfg ? &cfg->irq_2_iommu : NULL;
60 61
}

62
static int get_irte(int irq, struct irte *entry)
63
{
64
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
65
	unsigned long flags;
66
	int index;
67

68
	if (!entry || !irq_iommu)
69 70
		return -1;

71
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
72

73 74 75 76 77
	if (unlikely(!irq_iommu->iommu)) {
		raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
		return -1;
	}

78 79
	index = irq_iommu->irte_index + irq_iommu->sub_handle;
	*entry = *(irq_iommu->iommu->ir_table->base + index);
80

81
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
82 83 84
	return 0;
}

85
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
86 87
{
	struct ir_table *table = iommu->ir_table;
88
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
89
	struct irq_cfg *cfg = irq_cfg(irq);
90
	unsigned int mask = 0;
91
	unsigned long flags;
92
	int index;
93

94
	if (!count || !irq_iommu)
95 96
		return -1;

97 98 99 100 101 102 103 104 105 106 107 108 109
	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

110
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
111 112 113 114 115 116 117 118 119 120 121
	index = bitmap_find_free_region(table->bitmap,
					INTR_REMAP_TABLE_ENTRIES, mask);
	if (index < 0) {
		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
	} else {
		cfg->remapped = 1;
		irq_iommu->iommu = iommu;
		irq_iommu->irte_index =  index;
		irq_iommu->sub_handle = 0;
		irq_iommu->irte_mask = mask;
	}
122
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
123 124 125 126

	return index;
}

127
static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
128 129 130 131 132 133 134
{
	struct qi_desc desc;

	desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
		   | QI_IEC_SELECTIVE;
	desc.high = 0;

135
	return qi_submit_sync(&desc, iommu);
136 137
}

138
static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
139
{
140
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
141
	unsigned long flags;
142
	int index;
143

144
	if (!irq_iommu)
145 146
		return -1;

147
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
148 149
	*sub_handle = irq_iommu->sub_handle;
	index = irq_iommu->irte_index;
150
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
151 152 153
	return index;
}

154
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
155
{
156
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
157
	struct irq_cfg *cfg = irq_cfg(irq);
158
	unsigned long flags;
159

160
	if (!irq_iommu)
161
		return -1;
162

163
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
164

165
	cfg->remapped = 1;
166 167 168 169
	irq_iommu->iommu = iommu;
	irq_iommu->irte_index = index;
	irq_iommu->sub_handle = subhandle;
	irq_iommu->irte_mask = 0;
170

171
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
172 173 174 175

	return 0;
}

176
static int modify_irte(int irq, struct irte *irte_modified)
177
{
178
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
179
	struct intel_iommu *iommu;
180
	unsigned long flags;
181 182
	struct irte *irte;
	int rc, index;
183

184
	if (!irq_iommu)
185
		return -1;
186

187
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
188

189
	iommu = irq_iommu->iommu;
190

191
	index = irq_iommu->irte_index + irq_iommu->sub_handle;
192 193
	irte = &iommu->ir_table->base[index];

194 195
	set_64bit(&irte->low, irte_modified->low);
	set_64bit(&irte->high, irte_modified->high);
196 197
	__iommu_flush_cache(iommu, irte, sizeof(*irte));

198
	rc = qi_flush_iec(iommu, index, 0);
199
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
200 201

	return rc;
202 203
}

204
static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
205 206 207 208
{
	int i;

	for (i = 0; i < MAX_HPET_TBS; i++)
209
		if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
210 211 212 213
			return ir_hpet[i].iommu;
	return NULL;
}

214
static struct intel_iommu *map_ioapic_to_ir(int apic)
215 216 217 218
{
	int i;

	for (i = 0; i < MAX_IO_APICS; i++)
219
		if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
220 221 222 223
			return ir_ioapic[i].iommu;
	return NULL;
}

224
static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
225 226 227 228 229 230 231 232 233 234
{
	struct dmar_drhd_unit *drhd;

	drhd = dmar_find_matched_drhd_unit(dev);
	if (!drhd)
		return NULL;

	return drhd->iommu;
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
static int clear_entries(struct irq_2_iommu *irq_iommu)
{
	struct irte *start, *entry, *end;
	struct intel_iommu *iommu;
	int index;

	if (irq_iommu->sub_handle)
		return 0;

	iommu = irq_iommu->iommu;
	index = irq_iommu->irte_index + irq_iommu->sub_handle;

	start = iommu->ir_table->base + index;
	end = start + (1 << irq_iommu->irte_mask);

	for (entry = start; entry < end; entry++) {
251 252
		set_64bit(&entry->low, 0);
		set_64bit(&entry->high, 0);
253
	}
254 255
	bitmap_release_region(iommu->ir_table->bitmap, index,
			      irq_iommu->irte_mask);
256 257 258 259

	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}

260
static int free_irte(int irq)
261
{
262
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
263
	unsigned long flags;
264
	int rc;
265

266
	if (!irq_iommu)
267
		return -1;
268

269
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
270

271
	rc = clear_entries(irq_iommu);
272

273 274 275 276
	irq_iommu->iommu = NULL;
	irq_iommu->irte_index = 0;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = 0;
277

278
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
279

280
	return rc;
281 282
}

283 284 285 286
/*
 * source validation type
 */
#define SVT_NO_VERIFY		0x0  /* no verification is required */
L
Lucas De Marchi 已提交
287
#define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
#define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */

/*
 * source-id qualifier
 */
#define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */
#define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore
			      * the third least significant bit
			      */
#define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore
			      * the second and third least significant bits
			      */
#define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore
			      * the least three significant bits
			      */

/*
 * set SVT, SQ and SID fields of irte to verify
 * source ids of interrupt requests
 */
static void set_irte_sid(struct irte *irte, unsigned int svt,
			 unsigned int sq, unsigned int sid)
{
311 312
	if (disable_sourceid_checking)
		svt = SVT_NO_VERIFY;
313 314 315 316 317
	irte->svt = svt;
	irte->sq = sq;
	irte->sid = sid;
}

318
static int set_ioapic_sid(struct irte *irte, int apic)
319 320 321 322 323 324 325
{
	int i;
	u16 sid = 0;

	if (!irte)
		return -1;

326
	down_read(&dmar_global_lock);
327
	for (i = 0; i < MAX_IO_APICS; i++) {
328
		if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
329 330 331 332
			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
			break;
		}
	}
333
	up_read(&dmar_global_lock);
334 335 336 337 338 339

	if (sid == 0) {
		pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
		return -1;
	}

340
	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
341 342 343 344

	return 0;
}

345
static int set_hpet_sid(struct irte *irte, u8 id)
346 347 348 349 350 351 352
{
	int i;
	u16 sid = 0;

	if (!irte)
		return -1;

353
	down_read(&dmar_global_lock);
354
	for (i = 0; i < MAX_HPET_TBS; i++) {
355
		if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
356 357 358 359
			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
			break;
		}
	}
360
	up_read(&dmar_global_lock);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	if (sid == 0) {
		pr_warning("Failed to set source-id of HPET block (%d)\n", id);
		return -1;
	}

	/*
	 * Should really use SQ_ALL_16. Some platforms are broken.
	 * While we figure out the right quirks for these broken platforms, use
	 * SQ_13_IGNORE_3 for now.
	 */
	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);

	return 0;
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
struct set_msi_sid_data {
	struct pci_dev *pdev;
	u16 alias;
};

static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct set_msi_sid_data *data = opaque;

	data->pdev = pdev;
	data->alias = alias;

	return 0;
}

392
static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
393
{
394
	struct set_msi_sid_data data;
395 396 397 398

	if (!irte || !dev)
		return -1;

399
	pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
400

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	/*
	 * DMA alias provides us with a PCI device and alias.  The only case
	 * where the it will return an alias on a different bus than the
	 * device is the case of a PCIe-to-PCI bridge, where the alias is for
	 * the subordinate bus.  In this case we can only verify the bus.
	 *
	 * If the alias device is on a different bus than our source device
	 * then we have a topology based alias, use it.
	 *
	 * Otherwise, the alias is for a device DMA quirk and we cannot
	 * assume that MSI uses the same requester ID.  Therefore use the
	 * original device.
	 */
	if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
		set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
			     PCI_DEVID(PCI_BUS_NUM(data.alias),
				       dev->bus->number));
	else if (data.pdev->bus->number != dev->bus->number)
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
	else
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
			     PCI_DEVID(dev->bus->number, dev->devfn));
423 424 425 426

	return 0;
}

427
static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
428 429
{
	u64 addr;
430
	u32 sts;
431 432 433 434
	unsigned long flags;

	addr = virt_to_phys((void *)iommu->ir_table->base);

435
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
436 437 438 439 440

	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);

	/* Set interrupt-remapping table pointer */
441
	writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
442 443 444

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, (sts & DMA_GSTS_IRTPS), sts);
445
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
446 447 448 449 450 451 452

	/*
	 * global invalidation of interrupt entry cache before enabling
	 * interrupt-remapping.
	 */
	qi_global_iec(iommu);

453
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
454 455 456

	/* Enable interrupt-remapping */
	iommu->gcmd |= DMA_GCMD_IRE;
457
	iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
458
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
459 460 461 462

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, (sts & DMA_GSTS_IRES), sts);

463 464 465 466 467 468 469 470 471 472
	/*
	 * With CFI clear in the Global Command register, we should be
	 * protected from dangerous (i.e. compatibility) interrupts
	 * regardless of x2apic status.  Check just to be sure.
	 */
	if (sts & DMA_GSTS_CFIS)
		WARN(1, KERN_WARNING
			"Compatibility-format IRQs enabled despite intr remapping;\n"
			"you are vulnerable to IRQ injection.\n");

473
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
474 475
}

476
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
477 478 479
{
	struct ir_table *ir_table;
	struct page *pages;
480
	unsigned long *bitmap;
481

482 483
	if (iommu->ir_table)
		return 0;
484

485
	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
486
	if (!ir_table)
487 488
		return -ENOMEM;

489
	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
490
				 INTR_REMAP_PAGE_ORDER);
491 492

	if (!pages) {
493 494
		pr_err("IR%d: failed to allocate pages of order %d\n",
		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
495
		goto out_free_table;
496 497
	}

498 499 500 501
	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
			 sizeof(long), GFP_ATOMIC);
	if (bitmap == NULL) {
		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
502
		goto out_free_pages;
503 504
	}

505
	ir_table->base = page_address(pages);
506
	ir_table->bitmap = bitmap;
507
	iommu->ir_table = ir_table;
508
	return 0;
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525

out_free_pages:
	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
	kfree(ir_table);
	return -ENOMEM;
}

static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{
	if (iommu && iommu->ir_table) {
		free_pages((unsigned long)iommu->ir_table->base,
			   INTR_REMAP_PAGE_ORDER);
		kfree(iommu->ir_table->bitmap);
		kfree(iommu->ir_table);
		iommu->ir_table = NULL;
	}
526 527
}

528 529 530
/*
 * Disable Interrupt Remapping.
 */
531
static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
532 533 534 535 536 537 538
{
	unsigned long flags;
	u32 sts;

	if (!ecap_ir_support(iommu->ecap))
		return;

539 540 541 542 543 544
	/*
	 * global invalidation of interrupt entry cache before disabling
	 * interrupt-remapping.
	 */
	qi_global_iec(iommu);

545
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
546 547 548 549 550 551 552 553 554 555 556 557

	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
	if (!(sts & DMA_GSTS_IRES))
		goto end;

	iommu->gcmd &= ~DMA_GCMD_IRE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, !(sts & DMA_GSTS_IRES), sts);

end:
558
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
559 560
}

561 562 563 564 565 566 567 568 569
static int __init dmar_x2apic_optout(void)
{
	struct acpi_table_dmar *dmar;
	dmar = (struct acpi_table_dmar *)dmar_tbl;
	if (!dmar || no_x2apic_optout)
		return 0;
	return dmar->flags & DMAR_X2APIC_OPT_OUT;
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
static void __init intel_cleanup_irq_remapping(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;

	for_each_iommu(iommu, drhd) {
		if (ecap_ir_support(iommu->ecap)) {
			iommu_disable_irq_remapping(iommu);
			intel_teardown_irq_remapping(iommu);
		}
	}

	if (x2apic_supported())
		pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
}

static int __init intel_prepare_irq_remapping(void)
587 588
{
	struct dmar_drhd_unit *drhd;
589
	struct intel_iommu *iommu;
590

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
	/* First check whether IRQ remapping should be enabled */
	if (disable_irq_remap)
		return -ENODEV;

	if (irq_remap_broken) {
		printk(KERN_WARNING
			"This system BIOS has enabled interrupt remapping\n"
			"on a chipset that contains an erratum making that\n"
			"feature unstable.  To maintain system stability\n"
			"interrupt remapping is being disabled.  Please\n"
			"contact your BIOS vendor for an update\n");
		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
		disable_irq_remap = 1;
		return -ENODEV;
	}

607
	if (dmar_table_init() < 0)
608 609 610 611
		return -ENODEV;

	if (!dmar_ir_support())
		return -ENODEV;
612

613
	if (parse_ioapics_under_ir() != 1) {
614
		printk(KERN_INFO "Not enabling interrupt remapping\n");
615
		goto error;
616 617
	}

618
	/* First make sure all IOMMUs support IRQ remapping */
619
	for_each_iommu(iommu, drhd)
620 621 622 623 624 625
		if (!ecap_ir_support(iommu->ecap))
			goto error;

	/* Do the allocations early */
	for_each_iommu(iommu, drhd)
		if (intel_setup_irq_remapping(iommu))
626
			goto error;
627

628
	return 0;
629

630 631
error:
	intel_cleanup_irq_remapping();
632
	return -ENODEV;
633 634 635 636 637 638 639 640 641 642
}

static int __init intel_enable_irq_remapping(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	int setup = 0;
	int eim = 0;

	if (x2apic_supported()) {
643
		eim = !dmar_x2apic_optout();
644 645 646 647 648
		if (!eim)
			printk(KERN_WARNING
				"Your BIOS is broken and requested that x2apic be disabled.\n"
				"This will slightly decrease performance.\n"
				"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
649 650
	}

651
	for_each_iommu(iommu, drhd) {
652 653 654 655 656 657 658
		/*
		 * If the queued invalidation is already initialized,
		 * shouldn't disable it.
		 */
		if (iommu->qi)
			continue;

659 660 661 662 663 664 665 666 667
		/*
		 * Clear previous faults.
		 */
		dmar_fault(-1, iommu);

		/*
		 * Disable intr remapping and queued invalidation, if already
		 * enabled prior to OS handover.
		 */
668
		iommu_disable_irq_remapping(iommu);
669 670 671 672

		dmar_disable_qi(iommu);
	}

673 674 675
	/*
	 * check for the Interrupt-remapping support
	 */
676
	for_each_iommu(iommu, drhd)
677 678 679
		if (eim && !ecap_eim_support(iommu->ecap)) {
			printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
			       " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
680
			eim = 0;
681
		}
682 683 684
	eim_mode = eim;
	if (eim)
		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
685 686 687 688

	/*
	 * Enable queued invalidation for all the DRHD's.
	 */
689 690
	for_each_iommu(iommu, drhd) {
		int ret = dmar_enable_qi(iommu);
691 692 693 694 695

		if (ret) {
			printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
			       " invalidation, ecap %Lx, ret %d\n",
			       drhd->reg_base_addr, iommu->ecap, ret);
696
			goto error;
697 698 699 700 701 702
		}
	}

	/*
	 * Setup Interrupt-remapping for all the DRHD's now.
	 */
703
	for_each_iommu(iommu, drhd) {
704
		iommu_set_irq_remapping(iommu, eim);
705 706 707 708 709 710
		setup = 1;
	}

	if (!setup)
		goto error;

711
	irq_remapping_enabled = 1;
712 713 714 715 716 717 718 719

	/*
	 * VT-d has a different layout for IO-APIC entries when
	 * interrupt remapping is enabled. So it needs a special routine
	 * to print IO-APIC entries for debugging purposes too.
	 */
	x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;

720
	pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
721

722
	return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
723 724

error:
725
	intel_cleanup_irq_remapping();
726 727
	return -1;
}
728

729 730 731
static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
				   struct intel_iommu *iommu,
				   struct acpi_dmar_hardware_unit *drhd)
732 733 734
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
735
	int count, free = -1;
736 737 738 739 740 741 742 743 744 745 746

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
L
Lv Zheng 已提交
747
		bus = read_pci_config_byte(bus, path->device, path->function,
748 749 750
					   PCI_SECONDARY_BUS);
		path++;
	}
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771

	for (count = 0; count < MAX_HPET_TBS; count++) {
		if (ir_hpet[count].iommu == iommu &&
		    ir_hpet[count].id == scope->enumeration_id)
			return 0;
		else if (ir_hpet[count].iommu == NULL && free == -1)
			free = count;
	}
	if (free == -1) {
		pr_warn("Exceeded Max HPET blocks\n");
		return -ENOSPC;
	}

	ir_hpet[free].iommu = iommu;
	ir_hpet[free].id    = scope->enumeration_id;
	ir_hpet[free].bus   = bus;
	ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
	pr_info("HPET id %d under DRHD base 0x%Lx\n",
		scope->enumeration_id, drhd->address);

	return 0;
772 773
}

774 775 776
static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
				     struct intel_iommu *iommu,
				     struct acpi_dmar_hardware_unit *drhd)
777 778 779
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
780
	int count, free = -1;
781 782 783 784 785 786 787 788 789 790 791

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
L
Lv Zheng 已提交
792
		bus = read_pci_config_byte(bus, path->device, path->function,
793 794 795 796
					   PCI_SECONDARY_BUS);
		path++;
	}

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	for (count = 0; count < MAX_IO_APICS; count++) {
		if (ir_ioapic[count].iommu == iommu &&
		    ir_ioapic[count].id == scope->enumeration_id)
			return 0;
		else if (ir_ioapic[count].iommu == NULL && free == -1)
			free = count;
	}
	if (free == -1) {
		pr_warn("Exceeded Max IO APICS\n");
		return -ENOSPC;
	}

	ir_ioapic[free].bus   = bus;
	ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
	ir_ioapic[free].iommu = iommu;
	ir_ioapic[free].id    = scope->enumeration_id;
	pr_info("IOAPIC id %d under DRHD base  0x%Lx IOMMU %d\n",
		scope->enumeration_id, drhd->address, iommu->seq_id);

	return 0;
817 818
}

819 820
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
				      struct intel_iommu *iommu)
821
{
822
	int ret = 0;
823 824 825 826 827 828 829 830
	struct acpi_dmar_hardware_unit *drhd;
	struct acpi_dmar_device_scope *scope;
	void *start, *end;

	drhd = (struct acpi_dmar_hardware_unit *)header;
	start = (void *)(drhd + 1);
	end = ((void *)drhd) + header->length;

831
	while (start < end && ret == 0) {
832
		scope = start;
833 834 835 836 837 838
		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
			ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
		else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
			ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
		start += scope->length;
	}
839

840 841
	return ret;
}
842

843 844 845
static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
{
	int i;
846

847 848 849
	for (i = 0; i < MAX_HPET_TBS; i++)
		if (ir_hpet[i].iommu == iommu)
			ir_hpet[i].iommu = NULL;
850

851 852 853
	for (i = 0; i < MAX_IO_APICS; i++)
		if (ir_ioapic[i].iommu == iommu)
			ir_ioapic[i].iommu = NULL;
854 855 856 857 858 859
}

/*
 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
 * hardware unit.
 */
860
static int __init parse_ioapics_under_ir(void)
861 862
{
	struct dmar_drhd_unit *drhd;
863
	struct intel_iommu *iommu;
864
	int ir_supported = 0;
865
	int ioapic_idx;
866

867
	for_each_iommu(iommu, drhd)
868
		if (ecap_ir_support(iommu->ecap)) {
869
			if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
870 871 872 873 874
				return -1;

			ir_supported = 1;
		}

875 876 877 878 879 880 881 882 883 884 885
	if (!ir_supported)
		return 0;

	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
		int ioapic_id = mpc_ioapic_id(ioapic_idx);
		if (!map_ioapic_to_ir(ioapic_id)) {
			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
			       "interrupt remapping will be disabled\n",
			       ioapic_id);
			return -1;
		}
886 887
	}

888
	return 1;
889
}
890

891
static int __init ir_dev_scope_init(void)
892
{
893 894
	int ret;

895
	if (!irq_remapping_enabled)
896 897
		return 0;

898 899 900 901 902
	down_write(&dmar_global_lock);
	ret = dmar_dev_scope_init();
	up_write(&dmar_global_lock);

	return ret;
903 904 905
}
rootfs_initcall(ir_dev_scope_init);

906
static void disable_irq_remapping(void)
907 908 909 910 911 912 913 914 915 916 917
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;

	/*
	 * Disable Interrupt-remapping for all the DRHD's now.
	 */
	for_each_iommu(iommu, drhd) {
		if (!ecap_ir_support(iommu->ecap))
			continue;

918
		iommu_disable_irq_remapping(iommu);
919 920 921
	}
}

922
static int reenable_irq_remapping(int eim)
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
{
	struct dmar_drhd_unit *drhd;
	int setup = 0;
	struct intel_iommu *iommu = NULL;

	for_each_iommu(iommu, drhd)
		if (iommu->qi)
			dmar_reenable_qi(iommu);

	/*
	 * Setup Interrupt-remapping for all the DRHD's now.
	 */
	for_each_iommu(iommu, drhd) {
		if (!ecap_ir_support(iommu->ecap))
			continue;

		/* Set up interrupt remapping for iommu.*/
940
		iommu_set_irq_remapping(iommu, eim);
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
		setup = 1;
	}

	if (!setup)
		goto error;

	return 0;

error:
	/*
	 * handle error condition gracefully here!
	 */
	return -1;
}

956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
static void prepare_irte(struct irte *irte, int vector,
			 unsigned int dest)
{
	memset(irte, 0, sizeof(*irte));

	irte->present = 1;
	irte->dst_mode = apic->irq_dest_mode;
	/*
	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
	 * actual level or edge trigger will be setup in the IO-APIC
	 * RTE. This will help simplify level triggered irq migration.
	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
	 * irq migration in the presence of interrupt-remapping.
	*/
	irte->trigger_mode = 0;
	irte->dlvry_mode = apic->irq_delivery_mode;
	irte->vector = vector;
	irte->dest_id = IRTE_DEST(dest);
	irte->redir_hint = 1;
}

static int intel_setup_ioapic_entry(int irq,
				    struct IO_APIC_route_entry *route_entry,
				    unsigned int destination, int vector,
				    struct io_apic_irq_attr *attr)
{
	int ioapic_id = mpc_ioapic_id(attr->ioapic);
983
	struct intel_iommu *iommu;
984 985 986 987
	struct IR_IO_APIC_route_entry *entry;
	struct irte irte;
	int index;

988 989
	down_read(&dmar_global_lock);
	iommu = map_ioapic_to_ir(ioapic_id);
990 991
	if (!iommu) {
		pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
992 993 994 995 996 997 998 999
		index = -ENODEV;
	} else {
		index = alloc_irte(iommu, irq, 1);
		if (index < 0) {
			pr_warn("Failed to allocate IRTE for ioapic %d\n",
				ioapic_id);
			index = -ENOMEM;
		}
1000
	}
1001 1002 1003
	up_read(&dmar_global_lock);
	if (index < 0)
		return index;
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	prepare_irte(&irte, vector, destination);

	/* Set source-id of interrupt request */
	set_ioapic_sid(&irte, ioapic_id);

	modify_irte(irq, &irte);

	apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
		"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
		"Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
		"Avail:%X Vector:%02X Dest:%08X "
		"SID:%04X SQ:%X SVT:%X)\n",
		attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
		irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
		irte.avail, irte.vector, irte.dest_id,
		irte.sid, irte.sq, irte.svt);

1022
	entry = (struct IR_IO_APIC_route_entry *)route_entry;
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	memset(entry, 0, sizeof(*entry));

	entry->index2	= (index >> 15) & 0x1;
	entry->zero	= 0;
	entry->format	= 1;
	entry->index	= (index & 0x7fff);
	/*
	 * IO-APIC RTE will be configured with virtual vector.
	 * irq handler will do the explicit EOI to the io-apic.
	 */
	entry->vector	= attr->ioapic_pin;
	entry->mask	= 0;			/* enable IRQ */
	entry->trigger	= attr->trigger;
	entry->polarity	= attr->polarity;

	/* Mask level triggered irqs.
	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
	 */
	if (attr->trigger)
		entry->mask = 1;

	return 0;
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
/*
 * Migrate the IO-APIC irq in the presence of intr-remapping.
 *
 * For both level and edge triggered, irq migration is a simple atomic
 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
 *
 * For level triggered, we eliminate the io-apic RTE modification (with the
 * updated vector information), by using a virtual vector (io-apic pin number).
 * Real vector that is used for interrupting cpu will be coming from
 * the interrupt-remapping table entry.
 *
 * As the migration is a simple atomic update of IRTE, the same mechanism
 * is used to migrate MSI irq's in the presence of interrupt-remapping.
 */
static int
intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
			  bool force)
{
1065
	struct irq_cfg *cfg = irqd_cfg(data);
1066 1067
	unsigned int dest, irq = data->irq;
	struct irte irte;
1068
	int err;
1069

1070 1071 1072
	if (!config_enabled(CONFIG_SMP))
		return -EINVAL;

1073 1074 1075 1076 1077 1078
	if (!cpumask_intersects(mask, cpu_online_mask))
		return -EINVAL;

	if (get_irte(irq, &irte))
		return -EBUSY;

1079 1080 1081
	err = assign_irq_vector(irq, cfg, mask);
	if (err)
		return err;
1082

1083 1084
	err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
	if (err) {
1085
		if (assign_irq_vector(irq, cfg, data->affinity))
1086 1087 1088
			pr_err("Failed to recover vector for irq %d\n", irq);
		return err;
	}
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * Atomically updates the IRTE with the new destination, vector
	 * and flushes the interrupt entry cache.
	 */
	modify_irte(irq, &irte);

	/*
	 * After this point, all the interrupts will start arriving
	 * at the new destination. So, time to cleanup the previous
	 * vector allocation.
	 */
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);

	cpumask_copy(data->affinity, mask);
	return 0;
}
1110

1111 1112 1113 1114 1115 1116
static void intel_compose_msi_msg(struct pci_dev *pdev,
				  unsigned int irq, unsigned int dest,
				  struct msi_msg *msg, u8 hpet_id)
{
	struct irq_cfg *cfg;
	struct irte irte;
1117
	u16 sub_handle = 0;
1118 1119
	int ir_index;

1120
	cfg = irq_cfg(irq);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152

	ir_index = map_irq_to_irte_handle(irq, &sub_handle);
	BUG_ON(ir_index == -1);

	prepare_irte(&irte, cfg->vector, dest);

	/* Set source-id of interrupt request */
	if (pdev)
		set_msi_sid(&irte, pdev);
	else
		set_hpet_sid(&irte, hpet_id);

	modify_irte(irq, &irte);

	msg->address_hi = MSI_ADDR_BASE_HI;
	msg->data = sub_handle;
	msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
			  MSI_ADDR_IR_SHV |
			  MSI_ADDR_IR_INDEX1(ir_index) |
			  MSI_ADDR_IR_INDEX2(ir_index);
}

/*
 * Map the PCI dev to the corresponding remapping hardware unit
 * and allocate 'nvec' consecutive interrupt-remapping table entries
 * in it.
 */
static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
{
	struct intel_iommu *iommu;
	int index;

1153
	down_read(&dmar_global_lock);
1154 1155 1156 1157
	iommu = map_dev_to_ir(dev);
	if (!iommu) {
		printk(KERN_ERR
		       "Unable to map PCI %s to iommu\n", pci_name(dev));
1158 1159 1160 1161 1162 1163 1164 1165 1166
		index = -ENOENT;
	} else {
		index = alloc_irte(iommu, irq, nvec);
		if (index < 0) {
			printk(KERN_ERR
			       "Unable to allocate %d IRTE for PCI %s\n",
			       nvec, pci_name(dev));
			index = -ENOSPC;
		}
1167
	}
1168
	up_read(&dmar_global_lock);
1169 1170 1171 1172 1173 1174 1175 1176

	return index;
}

static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
			       int index, int sub_handle)
{
	struct intel_iommu *iommu;
1177
	int ret = -ENOENT;
1178

1179
	down_read(&dmar_global_lock);
1180
	iommu = map_dev_to_ir(pdev);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	if (iommu) {
		/*
		 * setup the mapping between the irq and the IRTE
		 * base index, the sub_handle pointing to the
		 * appropriate interrupt remap table entry.
		 */
		set_irte_irq(irq, iommu, index, sub_handle);
		ret = 0;
	}
	up_read(&dmar_global_lock);
1191

1192
	return ret;
1193 1194
}

1195
static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
1196
{
1197 1198
	int ret = -1;
	struct intel_iommu *iommu;
1199 1200
	int index;

1201 1202 1203 1204 1205 1206 1207 1208
	down_read(&dmar_global_lock);
	iommu = map_hpet_to_ir(id);
	if (iommu) {
		index = alloc_irte(iommu, irq, 1);
		if (index >= 0)
			ret = 0;
	}
	up_read(&dmar_global_lock);
1209

1210
	return ret;
1211 1212
}

1213
struct irq_remap_ops intel_irq_remap_ops = {
1214
	.prepare		= intel_prepare_irq_remapping,
1215 1216 1217
	.enable			= intel_enable_irq_remapping,
	.disable		= disable_irq_remapping,
	.reenable		= reenable_irq_remapping,
1218
	.enable_faulting	= enable_drhd_fault_handling,
1219
	.setup_ioapic_entry	= intel_setup_ioapic_entry,
1220
	.set_affinity		= intel_ioapic_set_affinity,
1221
	.free_irq		= free_irte,
1222 1223 1224
	.compose_msi_msg	= intel_compose_msi_msg,
	.msi_alloc_irq		= intel_msi_alloc_irq,
	.msi_setup_irq		= intel_msi_setup_irq,
1225
	.alloc_hpet_msi		= intel_alloc_hpet_msi,
1226
};
1227

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
/*
 * Support of Interrupt Remapping Unit Hotplug
 */
static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
{
	int ret;
	int eim = x2apic_enabled();

	if (eim && !ecap_eim_support(iommu->ecap)) {
		pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
			iommu->reg_phys, iommu->ecap);
		return -ENODEV;
	}

	if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
		pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
			iommu->reg_phys);
		return -ENODEV;
	}

	/* TODO: check all IOAPICs are covered by IOMMU */

	/* Setup Interrupt-remapping now. */
	ret = intel_setup_irq_remapping(iommu);
	if (ret) {
		pr_err("DRHD %Lx: failed to allocate resource\n",
		       iommu->reg_phys);
		ir_remove_ioapic_hpet_scope(iommu);
		return ret;
	}

	if (!iommu->qi) {
		/* Clear previous faults. */
		dmar_fault(-1, iommu);
		iommu_disable_irq_remapping(iommu);
		dmar_disable_qi(iommu);
	}

	/* Enable queued invalidation */
	ret = dmar_enable_qi(iommu);
	if (!ret) {
		iommu_set_irq_remapping(iommu, eim);
	} else {
		pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
		       iommu->reg_phys, iommu->ecap, ret);
		intel_teardown_irq_remapping(iommu);
		ir_remove_ioapic_hpet_scope(iommu);
	}

	return ret;
}

1280 1281
int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	int ret = 0;
	struct intel_iommu *iommu = dmaru->iommu;

	if (!irq_remapping_enabled)
		return 0;
	if (iommu == NULL)
		return -EINVAL;
	if (!ecap_ir_support(iommu->ecap))
		return 0;

	if (insert) {
		if (!iommu->ir_table)
			ret = dmar_ir_add(dmaru, iommu);
	} else {
		if (iommu->ir_table) {
			if (!bitmap_empty(iommu->ir_table->bitmap,
					  INTR_REMAP_TABLE_ENTRIES)) {
				ret = -EBUSY;
			} else {
				iommu_disable_irq_remapping(iommu);
				intel_teardown_irq_remapping(iommu);
				ir_remove_ioapic_hpet_scope(iommu);
			}
		}
	}

	return ret;
1309
}