intel_irq_remapping.c 27.4 KB
Newer Older
Y
Yinghai Lu 已提交
1
#include <linux/interrupt.h>
2
#include <linux/dmar.h>
3
#include <linux/spinlock.h>
4
#include <linux/slab.h>
5
#include <linux/jiffies.h>
6
#include <linux/hpet.h>
7
#include <linux/pci.h>
8
#include <linux/irq.h>
9 10
#include <linux/intel-iommu.h>
#include <linux/acpi.h>
11
#include <asm/io_apic.h>
Y
Yinghai Lu 已提交
12
#include <asm/smp.h>
13
#include <asm/cpu.h>
14
#include <asm/irq_remapping.h>
15
#include <asm/pci-direct.h>
16
#include <asm/msidef.h>
17

18
#include "irq_remapping.h"
19

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
struct ioapic_scope {
	struct intel_iommu *iommu;
	unsigned int id;
	unsigned int bus;	/* PCI bus number */
	unsigned int devfn;	/* PCI devfn number */
};

struct hpet_scope {
	struct intel_iommu *iommu;
	u8 id;
	unsigned int bus;
	unsigned int devfn;
};

#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
36

37
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38 39
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
40

41 42 43 44 45 46 47 48 49 50 51
/*
 * Lock ordering:
 * ->dmar_global_lock
 *	->irq_2_ir_lock
 *		->qi->q_lock
 *	->iommu->register_lock
 * Note:
 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
 * in single-threaded environment with interrupt disabled, so no need to tabke
 * the dmar_global_lock.
 */
52
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
53

54 55
static int __init parse_ioapics_under_ir(void);

56 57
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
58
	struct irq_cfg *cfg = irq_get_chip_data(irq);
59
	return cfg ? &cfg->irq_2_iommu : NULL;
60 61
}

62
static int get_irte(int irq, struct irte *entry)
63
{
64
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
65
	unsigned long flags;
66
	int index;
67

68
	if (!entry || !irq_iommu)
69 70
		return -1;

71
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
72

73 74 75 76 77
	if (unlikely(!irq_iommu->iommu)) {
		raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
		return -1;
	}

78 79
	index = irq_iommu->irte_index + irq_iommu->sub_handle;
	*entry = *(irq_iommu->iommu->ir_table->base + index);
80

81
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
82 83 84
	return 0;
}

85
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
86 87
{
	struct ir_table *table = iommu->ir_table;
88
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
89
	struct irq_cfg *cfg = irq_get_chip_data(irq);
90
	unsigned int mask = 0;
91
	unsigned long flags;
92
	int index;
93

94
	if (!count || !irq_iommu)
95 96
		return -1;

97 98 99 100 101 102 103 104 105 106 107 108 109
	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

110
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
111 112 113 114 115 116 117 118 119 120 121
	index = bitmap_find_free_region(table->bitmap,
					INTR_REMAP_TABLE_ENTRIES, mask);
	if (index < 0) {
		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
	} else {
		cfg->remapped = 1;
		irq_iommu->iommu = iommu;
		irq_iommu->irte_index =  index;
		irq_iommu->sub_handle = 0;
		irq_iommu->irte_mask = mask;
	}
122
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
123 124 125 126

	return index;
}

127
static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
128 129 130 131 132 133 134
{
	struct qi_desc desc;

	desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
		   | QI_IEC_SELECTIVE;
	desc.high = 0;

135
	return qi_submit_sync(&desc, iommu);
136 137
}

138
static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
139
{
140
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
141
	unsigned long flags;
142
	int index;
143

144
	if (!irq_iommu)
145 146
		return -1;

147
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
148 149
	*sub_handle = irq_iommu->sub_handle;
	index = irq_iommu->irte_index;
150
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
151 152 153
	return index;
}

154
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
155
{
156
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
157
	struct irq_cfg *cfg = irq_get_chip_data(irq);
158
	unsigned long flags;
159

160
	if (!irq_iommu)
161
		return -1;
162

163
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
164

165
	cfg->remapped = 1;
166 167 168 169
	irq_iommu->iommu = iommu;
	irq_iommu->irte_index = index;
	irq_iommu->sub_handle = subhandle;
	irq_iommu->irte_mask = 0;
170

171
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
172 173 174 175

	return 0;
}

176
static int modify_irte(int irq, struct irte *irte_modified)
177
{
178
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
179
	struct intel_iommu *iommu;
180
	unsigned long flags;
181 182
	struct irte *irte;
	int rc, index;
183

184
	if (!irq_iommu)
185
		return -1;
186

187
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
188

189
	iommu = irq_iommu->iommu;
190

191
	index = irq_iommu->irte_index + irq_iommu->sub_handle;
192 193
	irte = &iommu->ir_table->base[index];

194 195
	set_64bit(&irte->low, irte_modified->low);
	set_64bit(&irte->high, irte_modified->high);
196 197
	__iommu_flush_cache(iommu, irte, sizeof(*irte));

198
	rc = qi_flush_iec(iommu, index, 0);
199
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
200 201

	return rc;
202 203
}

204
static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
205 206 207 208 209 210 211 212 213
{
	int i;

	for (i = 0; i < MAX_HPET_TBS; i++)
		if (ir_hpet[i].id == hpet_id)
			return ir_hpet[i].iommu;
	return NULL;
}

214
static struct intel_iommu *map_ioapic_to_ir(int apic)
215 216 217 218 219 220 221 222 223
{
	int i;

	for (i = 0; i < MAX_IO_APICS; i++)
		if (ir_ioapic[i].id == apic)
			return ir_ioapic[i].iommu;
	return NULL;
}

224
static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
225 226 227 228 229 230 231 232 233 234
{
	struct dmar_drhd_unit *drhd;

	drhd = dmar_find_matched_drhd_unit(dev);
	if (!drhd)
		return NULL;

	return drhd->iommu;
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
static int clear_entries(struct irq_2_iommu *irq_iommu)
{
	struct irte *start, *entry, *end;
	struct intel_iommu *iommu;
	int index;

	if (irq_iommu->sub_handle)
		return 0;

	iommu = irq_iommu->iommu;
	index = irq_iommu->irte_index + irq_iommu->sub_handle;

	start = iommu->ir_table->base + index;
	end = start + (1 << irq_iommu->irte_mask);

	for (entry = start; entry < end; entry++) {
251 252
		set_64bit(&entry->low, 0);
		set_64bit(&entry->high, 0);
253
	}
254 255
	bitmap_release_region(iommu->ir_table->bitmap, index,
			      irq_iommu->irte_mask);
256 257 258 259

	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}

260
static int free_irte(int irq)
261
{
262
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
263
	unsigned long flags;
264
	int rc;
265

266
	if (!irq_iommu)
267
		return -1;
268

269
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
270

271
	rc = clear_entries(irq_iommu);
272

273 274 275 276
	irq_iommu->iommu = NULL;
	irq_iommu->irte_index = 0;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = 0;
277

278
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
279

280
	return rc;
281 282
}

283 284 285 286
/*
 * source validation type
 */
#define SVT_NO_VERIFY		0x0  /* no verification is required */
L
Lucas De Marchi 已提交
287
#define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
#define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */

/*
 * source-id qualifier
 */
#define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */
#define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore
			      * the third least significant bit
			      */
#define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore
			      * the second and third least significant bits
			      */
#define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore
			      * the least three significant bits
			      */

/*
 * set SVT, SQ and SID fields of irte to verify
 * source ids of interrupt requests
 */
static void set_irte_sid(struct irte *irte, unsigned int svt,
			 unsigned int sq, unsigned int sid)
{
311 312
	if (disable_sourceid_checking)
		svt = SVT_NO_VERIFY;
313 314 315 316 317
	irte->svt = svt;
	irte->sq = sq;
	irte->sid = sid;
}

318
static int set_ioapic_sid(struct irte *irte, int apic)
319 320 321 322 323 324 325
{
	int i;
	u16 sid = 0;

	if (!irte)
		return -1;

326
	down_read(&dmar_global_lock);
327 328 329 330 331 332
	for (i = 0; i < MAX_IO_APICS; i++) {
		if (ir_ioapic[i].id == apic) {
			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
			break;
		}
	}
333
	up_read(&dmar_global_lock);
334 335 336 337 338 339

	if (sid == 0) {
		pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
		return -1;
	}

340
	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
341 342 343 344

	return 0;
}

345
static int set_hpet_sid(struct irte *irte, u8 id)
346 347 348 349 350 351 352
{
	int i;
	u16 sid = 0;

	if (!irte)
		return -1;

353
	down_read(&dmar_global_lock);
354 355 356 357 358 359
	for (i = 0; i < MAX_HPET_TBS; i++) {
		if (ir_hpet[i].id == id) {
			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
			break;
		}
	}
360
	up_read(&dmar_global_lock);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	if (sid == 0) {
		pr_warning("Failed to set source-id of HPET block (%d)\n", id);
		return -1;
	}

	/*
	 * Should really use SQ_ALL_16. Some platforms are broken.
	 * While we figure out the right quirks for these broken platforms, use
	 * SQ_13_IGNORE_3 for now.
	 */
	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);

	return 0;
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
struct set_msi_sid_data {
	struct pci_dev *pdev;
	u16 alias;
};

static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct set_msi_sid_data *data = opaque;

	data->pdev = pdev;
	data->alias = alias;

	return 0;
}

392
static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
393
{
394
	struct set_msi_sid_data data;
395 396 397 398

	if (!irte || !dev)
		return -1;

399
	pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
400

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	/*
	 * DMA alias provides us with a PCI device and alias.  The only case
	 * where the it will return an alias on a different bus than the
	 * device is the case of a PCIe-to-PCI bridge, where the alias is for
	 * the subordinate bus.  In this case we can only verify the bus.
	 *
	 * If the alias device is on a different bus than our source device
	 * then we have a topology based alias, use it.
	 *
	 * Otherwise, the alias is for a device DMA quirk and we cannot
	 * assume that MSI uses the same requester ID.  Therefore use the
	 * original device.
	 */
	if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
		set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
			     PCI_DEVID(PCI_BUS_NUM(data.alias),
				       dev->bus->number));
	else if (data.pdev->bus->number != dev->bus->number)
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
	else
		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
			     PCI_DEVID(dev->bus->number, dev->devfn));
423 424 425 426

	return 0;
}

427
static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
428 429
{
	u64 addr;
430
	u32 sts;
431 432 433 434
	unsigned long flags;

	addr = virt_to_phys((void *)iommu->ir_table->base);

435
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
436 437 438 439 440

	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);

	/* Set interrupt-remapping table pointer */
441
	writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
442 443 444

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, (sts & DMA_GSTS_IRTPS), sts);
445
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
446 447 448 449 450 451 452

	/*
	 * global invalidation of interrupt entry cache before enabling
	 * interrupt-remapping.
	 */
	qi_global_iec(iommu);

453
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
454 455 456

	/* Enable interrupt-remapping */
	iommu->gcmd |= DMA_GCMD_IRE;
457
	iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
458
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
459 460 461 462

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, (sts & DMA_GSTS_IRES), sts);

463 464 465 466 467 468 469 470 471 472
	/*
	 * With CFI clear in the Global Command register, we should be
	 * protected from dangerous (i.e. compatibility) interrupts
	 * regardless of x2apic status.  Check just to be sure.
	 */
	if (sts & DMA_GSTS_CFIS)
		WARN(1, KERN_WARNING
			"Compatibility-format IRQs enabled despite intr remapping;\n"
			"you are vulnerable to IRQ injection.\n");

473
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
474 475 476
}


477
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
478 479 480
{
	struct ir_table *ir_table;
	struct page *pages;
481
	unsigned long *bitmap;
482 483

	ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
484
					     GFP_ATOMIC);
485 486 487 488

	if (!iommu->ir_table)
		return -ENOMEM;

489 490
	pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
				 INTR_REMAP_PAGE_ORDER);
491 492

	if (!pages) {
493 494
		pr_err("IR%d: failed to allocate pages of order %d\n",
		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
495 496 497 498
		kfree(iommu->ir_table);
		return -ENOMEM;
	}

499 500 501 502 503 504 505 506 507
	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
			 sizeof(long), GFP_ATOMIC);
	if (bitmap == NULL) {
		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
		__free_pages(pages, INTR_REMAP_PAGE_ORDER);
		kfree(ir_table);
		return -ENOMEM;
	}

508
	ir_table->base = page_address(pages);
509
	ir_table->bitmap = bitmap;
510

511
	iommu_set_irq_remapping(iommu, mode);
512 513 514
	return 0;
}

515 516 517
/*
 * Disable Interrupt Remapping.
 */
518
static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
519 520 521 522 523 524 525
{
	unsigned long flags;
	u32 sts;

	if (!ecap_ir_support(iommu->ecap))
		return;

526 527 528 529 530 531
	/*
	 * global invalidation of interrupt entry cache before disabling
	 * interrupt-remapping.
	 */
	qi_global_iec(iommu);

532
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
533 534 535 536 537 538 539 540 541 542 543 544

	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
	if (!(sts & DMA_GSTS_IRES))
		goto end;

	iommu->gcmd &= ~DMA_GCMD_IRE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);

	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
		      readl, !(sts & DMA_GSTS_IRES), sts);

end:
545
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
546 547
}

548 549 550 551 552 553 554 555 556
static int __init dmar_x2apic_optout(void)
{
	struct acpi_table_dmar *dmar;
	dmar = (struct acpi_table_dmar *)dmar_tbl;
	if (!dmar || no_x2apic_optout)
		return 0;
	return dmar->flags & DMAR_X2APIC_OPT_OUT;
}

557
static int __init intel_irq_remapping_supported(void)
558 559
{
	struct dmar_drhd_unit *drhd;
560
	struct intel_iommu *iommu;
561

562
	if (disable_irq_remap)
563
		return 0;
564
	if (irq_remap_broken) {
565 566 567 568 569 570 571
		printk(KERN_WARNING
			"This system BIOS has enabled interrupt remapping\n"
			"on a chipset that contains an erratum making that\n"
			"feature unstable.  To maintain system stability\n"
			"interrupt remapping is being disabled.  Please\n"
			"contact your BIOS vendor for an update\n");
		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
572 573 574
		disable_irq_remap = 1;
		return 0;
	}
575

576 577 578
	if (!dmar_ir_support())
		return 0;

579
	for_each_iommu(iommu, drhd)
580 581 582 583 584 585
		if (!ecap_ir_support(iommu->ecap))
			return 0;

	return 1;
}

586
static int __init intel_enable_irq_remapping(void)
587 588
{
	struct dmar_drhd_unit *drhd;
589
	struct intel_iommu *iommu;
590
	bool x2apic_present;
591
	int setup = 0;
592
	int eim = 0;
593

594 595
	x2apic_present = x2apic_supported();

596 597
	if (parse_ioapics_under_ir() != 1) {
		printk(KERN_INFO "Not enable interrupt remapping\n");
598
		goto error;
599 600
	}

601
	if (x2apic_present) {
602 603
		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");

604
		eim = !dmar_x2apic_optout();
605 606 607 608 609
		if (!eim)
			printk(KERN_WARNING
				"Your BIOS is broken and requested that x2apic be disabled.\n"
				"This will slightly decrease performance.\n"
				"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
610 611
	}

612
	for_each_iommu(iommu, drhd) {
613 614 615 616 617 618 619
		/*
		 * If the queued invalidation is already initialized,
		 * shouldn't disable it.
		 */
		if (iommu->qi)
			continue;

620 621 622 623 624 625 626 627 628
		/*
		 * Clear previous faults.
		 */
		dmar_fault(-1, iommu);

		/*
		 * Disable intr remapping and queued invalidation, if already
		 * enabled prior to OS handover.
		 */
629
		iommu_disable_irq_remapping(iommu);
630 631 632 633

		dmar_disable_qi(iommu);
	}

634 635 636
	/*
	 * check for the Interrupt-remapping support
	 */
637
	for_each_iommu(iommu, drhd) {
638 639 640 641 642 643
		if (!ecap_ir_support(iommu->ecap))
			continue;

		if (eim && !ecap_eim_support(iommu->ecap)) {
			printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
			       " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
644
			goto error;
645 646 647 648 649 650
		}
	}

	/*
	 * Enable queued invalidation for all the DRHD's.
	 */
651 652
	for_each_iommu(iommu, drhd) {
		int ret = dmar_enable_qi(iommu);
653 654 655 656 657

		if (ret) {
			printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
			       " invalidation, ecap %Lx, ret %d\n",
			       drhd->reg_base_addr, iommu->ecap, ret);
658
			goto error;
659 660 661 662 663 664
		}
	}

	/*
	 * Setup Interrupt-remapping for all the DRHD's now.
	 */
665
	for_each_iommu(iommu, drhd) {
666 667 668
		if (!ecap_ir_support(iommu->ecap))
			continue;

669
		if (intel_setup_irq_remapping(iommu, eim))
670 671 672 673 674 675 676 677
			goto error;

		setup = 1;
	}

	if (!setup)
		goto error;

678
	irq_remapping_enabled = 1;
679 680 681 682 683 684 685 686

	/*
	 * VT-d has a different layout for IO-APIC entries when
	 * interrupt remapping is enabled. So it needs a special routine
	 * to print IO-APIC entries for debugging purposes too.
	 */
	x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;

687
	pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
688

689
	return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
690 691 692 693 694

error:
	/*
	 * handle error condition gracefully here!
	 */
695 696

	if (x2apic_present)
697
		pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
698

699 700
	return -1;
}
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
				      struct intel_iommu *iommu)
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
	int count;

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
L
Lv Zheng 已提交
719
		bus = read_pci_config_byte(bus, path->device, path->function,
720 721 722 723
					   PCI_SECONDARY_BUS);
		path++;
	}
	ir_hpet[ir_hpet_num].bus   = bus;
L
Lv Zheng 已提交
724
	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
725 726 727 728 729
	ir_hpet[ir_hpet_num].iommu = iommu;
	ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
	ir_hpet_num++;
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
				      struct intel_iommu *iommu)
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
	int count;

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
L
Lv Zheng 已提交
747
		bus = read_pci_config_byte(bus, path->device, path->function,
748 749 750 751 752
					   PCI_SECONDARY_BUS);
		path++;
	}

	ir_ioapic[ir_ioapic_num].bus   = bus;
L
Lv Zheng 已提交
753
	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
754 755 756 757 758
	ir_ioapic[ir_ioapic_num].iommu = iommu;
	ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
	ir_ioapic_num++;
}

759 760
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
				      struct intel_iommu *iommu)
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
{
	struct acpi_dmar_hardware_unit *drhd;
	struct acpi_dmar_device_scope *scope;
	void *start, *end;

	drhd = (struct acpi_dmar_hardware_unit *)header;

	start = (void *)(drhd + 1);
	end = ((void *)drhd) + header->length;

	while (start < end) {
		scope = start;
		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
			if (ir_ioapic_num == MAX_IO_APICS) {
				printk(KERN_WARNING "Exceeded Max IO APICS\n");
				return -1;
			}

Y
Yinghai Lu 已提交
779 780 781
			printk(KERN_INFO "IOAPIC id %d under DRHD base "
			       " 0x%Lx IOMMU %d\n", scope->enumeration_id,
			       drhd->address, iommu->seq_id);
782

783
			ir_parse_one_ioapic_scope(scope, iommu);
784 785 786 787 788 789 790 791 792 793 794
		} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
			if (ir_hpet_num == MAX_HPET_TBS) {
				printk(KERN_WARNING "Exceeded Max HPET blocks\n");
				return -1;
			}

			printk(KERN_INFO "HPET id %d under DRHD base"
			       " 0x%Lx\n", scope->enumeration_id,
			       drhd->address);

			ir_parse_one_hpet_scope(scope, iommu);
795 796 797 798 799 800 801 802 803 804 805
		}
		start += scope->length;
	}

	return 0;
}

/*
 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
 * hardware unit.
 */
806
static int __init parse_ioapics_under_ir(void)
807 808
{
	struct dmar_drhd_unit *drhd;
809
	struct intel_iommu *iommu;
810
	int ir_supported = 0;
811
	int ioapic_idx;
812

813
	for_each_iommu(iommu, drhd)
814
		if (ecap_ir_support(iommu->ecap)) {
815
			if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
816 817 818 819 820
				return -1;

			ir_supported = 1;
		}

821 822 823 824 825 826 827 828 829 830 831
	if (!ir_supported)
		return 0;

	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
		int ioapic_id = mpc_ioapic_id(ioapic_idx);
		if (!map_ioapic_to_ir(ioapic_id)) {
			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
			       "interrupt remapping will be disabled\n",
			       ioapic_id);
			return -1;
		}
832 833
	}

834
	return 1;
835
}
836

837
static int __init ir_dev_scope_init(void)
838
{
839 840
	int ret;

841
	if (!irq_remapping_enabled)
842 843
		return 0;

844 845 846 847 848
	down_write(&dmar_global_lock);
	ret = dmar_dev_scope_init();
	up_write(&dmar_global_lock);

	return ret;
849 850 851
}
rootfs_initcall(ir_dev_scope_init);

852
static void disable_irq_remapping(void)
853 854 855 856 857 858 859 860 861 862 863
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;

	/*
	 * Disable Interrupt-remapping for all the DRHD's now.
	 */
	for_each_iommu(iommu, drhd) {
		if (!ecap_ir_support(iommu->ecap))
			continue;

864
		iommu_disable_irq_remapping(iommu);
865 866 867
	}
}

868
static int reenable_irq_remapping(int eim)
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
{
	struct dmar_drhd_unit *drhd;
	int setup = 0;
	struct intel_iommu *iommu = NULL;

	for_each_iommu(iommu, drhd)
		if (iommu->qi)
			dmar_reenable_qi(iommu);

	/*
	 * Setup Interrupt-remapping for all the DRHD's now.
	 */
	for_each_iommu(iommu, drhd) {
		if (!ecap_ir_support(iommu->ecap))
			continue;

		/* Set up interrupt remapping for iommu.*/
886
		iommu_set_irq_remapping(iommu, eim);
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
		setup = 1;
	}

	if (!setup)
		goto error;

	return 0;

error:
	/*
	 * handle error condition gracefully here!
	 */
	return -1;
}

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
static void prepare_irte(struct irte *irte, int vector,
			 unsigned int dest)
{
	memset(irte, 0, sizeof(*irte));

	irte->present = 1;
	irte->dst_mode = apic->irq_dest_mode;
	/*
	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
	 * actual level or edge trigger will be setup in the IO-APIC
	 * RTE. This will help simplify level triggered irq migration.
	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
	 * irq migration in the presence of interrupt-remapping.
	*/
	irte->trigger_mode = 0;
	irte->dlvry_mode = apic->irq_delivery_mode;
	irte->vector = vector;
	irte->dest_id = IRTE_DEST(dest);
	irte->redir_hint = 1;
}

static int intel_setup_ioapic_entry(int irq,
				    struct IO_APIC_route_entry *route_entry,
				    unsigned int destination, int vector,
				    struct io_apic_irq_attr *attr)
{
	int ioapic_id = mpc_ioapic_id(attr->ioapic);
929
	struct intel_iommu *iommu;
930 931 932 933
	struct IR_IO_APIC_route_entry *entry;
	struct irte irte;
	int index;

934 935
	down_read(&dmar_global_lock);
	iommu = map_ioapic_to_ir(ioapic_id);
936 937
	if (!iommu) {
		pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
938 939 940 941 942 943 944 945
		index = -ENODEV;
	} else {
		index = alloc_irte(iommu, irq, 1);
		if (index < 0) {
			pr_warn("Failed to allocate IRTE for ioapic %d\n",
				ioapic_id);
			index = -ENOMEM;
		}
946
	}
947 948 949
	up_read(&dmar_global_lock);
	if (index < 0)
		return index;
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967

	prepare_irte(&irte, vector, destination);

	/* Set source-id of interrupt request */
	set_ioapic_sid(&irte, ioapic_id);

	modify_irte(irq, &irte);

	apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
		"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
		"Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
		"Avail:%X Vector:%02X Dest:%08X "
		"SID:%04X SQ:%X SVT:%X)\n",
		attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
		irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
		irte.avail, irte.vector, irte.dest_id,
		irte.sid, irte.sq, irte.svt);

968
	entry = (struct IR_IO_APIC_route_entry *)route_entry;
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
	memset(entry, 0, sizeof(*entry));

	entry->index2	= (index >> 15) & 0x1;
	entry->zero	= 0;
	entry->format	= 1;
	entry->index	= (index & 0x7fff);
	/*
	 * IO-APIC RTE will be configured with virtual vector.
	 * irq handler will do the explicit EOI to the io-apic.
	 */
	entry->vector	= attr->ioapic_pin;
	entry->mask	= 0;			/* enable IRQ */
	entry->trigger	= attr->trigger;
	entry->polarity	= attr->polarity;

	/* Mask level triggered irqs.
	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
	 */
	if (attr->trigger)
		entry->mask = 1;

	return 0;
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
/*
 * Migrate the IO-APIC irq in the presence of intr-remapping.
 *
 * For both level and edge triggered, irq migration is a simple atomic
 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
 *
 * For level triggered, we eliminate the io-apic RTE modification (with the
 * updated vector information), by using a virtual vector (io-apic pin number).
 * Real vector that is used for interrupting cpu will be coming from
 * the interrupt-remapping table entry.
 *
 * As the migration is a simple atomic update of IRTE, the same mechanism
 * is used to migrate MSI irq's in the presence of interrupt-remapping.
 */
static int
intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
			  bool force)
{
	struct irq_cfg *cfg = data->chip_data;
	unsigned int dest, irq = data->irq;
	struct irte irte;
1014
	int err;
1015

1016 1017 1018
	if (!config_enabled(CONFIG_SMP))
		return -EINVAL;

1019 1020 1021 1022 1023 1024
	if (!cpumask_intersects(mask, cpu_online_mask))
		return -EINVAL;

	if (get_irte(irq, &irte))
		return -EBUSY;

1025 1026 1027
	err = assign_irq_vector(irq, cfg, mask);
	if (err)
		return err;
1028

1029 1030
	err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
	if (err) {
1031
		if (assign_irq_vector(irq, cfg, data->affinity))
1032 1033 1034
			pr_err("Failed to recover vector for irq %d\n", irq);
		return err;
	}
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055

	irte.vector = cfg->vector;
	irte.dest_id = IRTE_DEST(dest);

	/*
	 * Atomically updates the IRTE with the new destination, vector
	 * and flushes the interrupt entry cache.
	 */
	modify_irte(irq, &irte);

	/*
	 * After this point, all the interrupts will start arriving
	 * at the new destination. So, time to cleanup the previous
	 * vector allocation.
	 */
	if (cfg->move_in_progress)
		send_cleanup_vector(cfg);

	cpumask_copy(data->affinity, mask);
	return 0;
}
1056

1057 1058 1059 1060 1061 1062
static void intel_compose_msi_msg(struct pci_dev *pdev,
				  unsigned int irq, unsigned int dest,
				  struct msi_msg *msg, u8 hpet_id)
{
	struct irq_cfg *cfg;
	struct irte irte;
1063
	u16 sub_handle = 0;
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	int ir_index;

	cfg = irq_get_chip_data(irq);

	ir_index = map_irq_to_irte_handle(irq, &sub_handle);
	BUG_ON(ir_index == -1);

	prepare_irte(&irte, cfg->vector, dest);

	/* Set source-id of interrupt request */
	if (pdev)
		set_msi_sid(&irte, pdev);
	else
		set_hpet_sid(&irte, hpet_id);

	modify_irte(irq, &irte);

	msg->address_hi = MSI_ADDR_BASE_HI;
	msg->data = sub_handle;
	msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
			  MSI_ADDR_IR_SHV |
			  MSI_ADDR_IR_INDEX1(ir_index) |
			  MSI_ADDR_IR_INDEX2(ir_index);
}

/*
 * Map the PCI dev to the corresponding remapping hardware unit
 * and allocate 'nvec' consecutive interrupt-remapping table entries
 * in it.
 */
static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
{
	struct intel_iommu *iommu;
	int index;

1099
	down_read(&dmar_global_lock);
1100 1101 1102 1103
	iommu = map_dev_to_ir(dev);
	if (!iommu) {
		printk(KERN_ERR
		       "Unable to map PCI %s to iommu\n", pci_name(dev));
1104 1105 1106 1107 1108 1109 1110 1111 1112
		index = -ENOENT;
	} else {
		index = alloc_irte(iommu, irq, nvec);
		if (index < 0) {
			printk(KERN_ERR
			       "Unable to allocate %d IRTE for PCI %s\n",
			       nvec, pci_name(dev));
			index = -ENOSPC;
		}
1113
	}
1114
	up_read(&dmar_global_lock);
1115 1116 1117 1118 1119 1120 1121 1122

	return index;
}

static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
			       int index, int sub_handle)
{
	struct intel_iommu *iommu;
1123
	int ret = -ENOENT;
1124

1125
	down_read(&dmar_global_lock);
1126
	iommu = map_dev_to_ir(pdev);
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
	if (iommu) {
		/*
		 * setup the mapping between the irq and the IRTE
		 * base index, the sub_handle pointing to the
		 * appropriate interrupt remap table entry.
		 */
		set_irte_irq(irq, iommu, index, sub_handle);
		ret = 0;
	}
	up_read(&dmar_global_lock);
1137

1138
	return ret;
1139 1140
}

1141
static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
1142
{
1143 1144
	int ret = -1;
	struct intel_iommu *iommu;
1145 1146
	int index;

1147 1148 1149 1150 1151 1152 1153 1154
	down_read(&dmar_global_lock);
	iommu = map_hpet_to_ir(id);
	if (iommu) {
		index = alloc_irte(iommu, irq, 1);
		if (index >= 0)
			ret = 0;
	}
	up_read(&dmar_global_lock);
1155

1156
	return ret;
1157 1158
}

1159
struct irq_remap_ops intel_irq_remap_ops = {
1160 1161 1162 1163 1164
	.supported		= intel_irq_remapping_supported,
	.prepare		= dmar_table_init,
	.enable			= intel_enable_irq_remapping,
	.disable		= disable_irq_remapping,
	.reenable		= reenable_irq_remapping,
1165
	.enable_faulting	= enable_drhd_fault_handling,
1166
	.setup_ioapic_entry	= intel_setup_ioapic_entry,
1167
	.set_affinity		= intel_ioapic_set_affinity,
1168
	.free_irq		= free_irte,
1169 1170 1171
	.compose_msi_msg	= intel_compose_msi_msg,
	.msi_alloc_irq		= intel_msi_alloc_irq,
	.msi_setup_irq		= intel_msi_setup_irq,
1172
	.alloc_hpet_msi		= intel_alloc_hpet_msi,
1173
};