intel-iommu.c 124.7 KB
Newer Older
1
/*
2
 * Copyright © 2006-2014 Intel Corporation.
3 4 5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
13 14 15 16 17
 * Authors: David Woodhouse <dwmw2@infradead.org>,
 *          Ashok Raj <ashok.raj@intel.com>,
 *          Shaohua Li <shaohua.li@intel.com>,
 *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
 *          Fenghua Yu <fenghua.yu@intel.com>
J
Joerg Roedel 已提交
18
 *          Joerg Roedel <jroedel@suse.de>
19 20
 */

J
Joerg Roedel 已提交
21 22
#define pr_fmt(fmt)     "DMAR: " fmt

23 24
#include <linux/init.h>
#include <linux/bitmap.h>
M
mark gross 已提交
25
#include <linux/debugfs.h>
26
#include <linux/export.h>
27 28 29 30 31 32 33 34
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
35
#include <linux/memory.h>
M
mark gross 已提交
36
#include <linux/timer.h>
K
Kay, Allen M 已提交
37
#include <linux/iova.h>
38
#include <linux/iommu.h>
K
Kay, Allen M 已提交
39
#include <linux/intel-iommu.h>
40
#include <linux/syscore_ops.h>
41
#include <linux/tboot.h>
42
#include <linux/dmi.h>
43
#include <linux/pci-ats.h>
T
Tejun Heo 已提交
44
#include <linux/memblock.h>
A
Akinobu Mita 已提交
45
#include <linux/dma-contiguous.h>
46
#include <linux/crash_dump.h>
47
#include <asm/irq_remapping.h>
48
#include <asm/cacheflush.h>
49
#include <asm/iommu.h>
50

51 52
#include "irq_remapping.h"

F
Fenghua Yu 已提交
53 54 55
#define ROOT_SIZE		VTD_PAGE_SIZE
#define CONTEXT_SIZE		VTD_PAGE_SIZE

56
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60 61 62 63 64 65 66

#define IOAPIC_RANGE_START	(0xfee00000)
#define IOAPIC_RANGE_END	(0xfeefffff)
#define IOVA_START_ADDR		(0x1000)

#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48

F
Fenghua Yu 已提交
67
#define MAX_AGAW_WIDTH 64
68
#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
F
Fenghua Yu 已提交
69

70 71 72 73 74 75 76 77
#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)

/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw)	((unsigned long) min_t(uint64_t, \
				__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw)	(((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78

79 80 81
/* IO virtual address start page frame number */
#define IOVA_START_PFN		(1)

82
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
83
#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))
84
#define DMA_64BIT_PFN		IOVA_PFN(DMA_BIT_MASK(64))
M
mark gross 已提交
85

86 87 88 89
/* page table handling */
#define LEVEL_STRIDE		(9)
#define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */
#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)

108 109 110 111 112 113 114
static inline int agaw_to_level(int agaw)
{
	return agaw + 2;
}

static inline int agaw_to_width(int agaw)
{
115
	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 117 118 119
}

static inline int width_to_agaw(int width)
{
120
	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
}

static inline unsigned int level_to_offset_bits(int level)
{
	return (level - 1) * LEVEL_STRIDE;
}

static inline int pfn_level_offset(unsigned long pfn, int level)
{
	return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}

static inline unsigned long level_mask(int level)
{
	return -1UL << level_to_offset_bits(level);
}

static inline unsigned long level_size(int level)
{
	return 1UL << level_to_offset_bits(level);
}

static inline unsigned long align_to_level(unsigned long pfn, int level)
{
	return (pfn + level_size(level) - 1) & level_mask(level);
}
147

148 149
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
150
	return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 152
}

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
   are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
{
	return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
}

static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
	return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
	return mm_to_dma_pfn(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
	return page_to_dma_pfn(virt_to_page(p));
}

W
Weidong Han 已提交
173 174 175
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;

176
static void __init check_tylersburg_isoch(void);
177 178
static int rwbf_quirk;

179 180 181 182 183 184
/*
 * set to 1 to panic kernel if can't successfully enable VT-d
 * (used when kernel is launched w/ TXT)
 */
static int force_on = 0;

185 186 187 188 189 190 191
/*
 * 0: Present
 * 1-11: Reserved
 * 12-63: Context Ptr (12 - (haw-1))
 * 64-127: Reserved
 */
struct root_entry {
192 193
	u64	lo;
	u64	hi;
194 195 196
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
 * if marked present.
 */
static phys_addr_t root_entry_lctp(struct root_entry *re)
{
	if (!(re->lo & 1))
		return 0;

	return re->lo & VTD_PAGE_MASK;
}

/*
 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
 * if marked present.
 */
static phys_addr_t root_entry_uctp(struct root_entry *re)
{
	if (!(re->hi & 1))
		return 0;
217

218 219
	return re->hi & VTD_PAGE_MASK;
}
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/*
 * low 64 bits:
 * 0: present
 * 1: fault processing disable
 * 2-3: translation type
 * 12-63: address space root
 * high 64 bits:
 * 0-2: address width
 * 3-6: aval
 * 8-23: domain id
 */
struct context_entry {
	u64 lo;
	u64 hi;
};
235

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static inline void context_clear_pasid_enable(struct context_entry *context)
{
	context->lo &= ~(1ULL << 11);
}

static inline bool context_pasid_enabled(struct context_entry *context)
{
	return !!(context->lo & (1ULL << 11));
}

static inline void context_set_copied(struct context_entry *context)
{
	context->hi |= (1ull << 3);
}

static inline bool context_copied(struct context_entry *context)
{
	return !!(context->hi & (1ULL << 3));
}

static inline bool __context_present(struct context_entry *context)
257 258 259
{
	return (context->lo & 1);
}
260 261 262 263 264 265 266 267

static inline bool context_present(struct context_entry *context)
{
	return context_pasid_enabled(context) ?
	     __context_present(context) :
	     __context_present(context) && !context_copied(context);
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static inline void context_set_present(struct context_entry *context)
{
	context->lo |= 1;
}

static inline void context_set_fault_enable(struct context_entry *context)
{
	context->lo &= (((u64)-1) << 2) | 1;
}

static inline void context_set_translation_type(struct context_entry *context,
						unsigned long value)
{
	context->lo &= (((u64)-1) << 4) | 3;
	context->lo |= (value & 3) << 2;
}

static inline void context_set_address_root(struct context_entry *context,
					    unsigned long value)
{
288
	context->lo &= ~VTD_PAGE_MASK;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	context->lo |= value & VTD_PAGE_MASK;
}

static inline void context_set_address_width(struct context_entry *context,
					     unsigned long value)
{
	context->hi |= value & 7;
}

static inline void context_set_domain_id(struct context_entry *context,
					 unsigned long value)
{
	context->hi |= (value & ((1 << 16) - 1)) << 8;
}

304 305 306 307 308
static inline int context_domain_id(struct context_entry *c)
{
	return((c->hi >> 8) & 0xffff);
}

309 310 311 312 313
static inline void context_clear_entry(struct context_entry *context)
{
	context->lo = 0;
	context->hi = 0;
}
314

315 316 317 318 319
/*
 * 0: readable
 * 1: writable
 * 2-6: reserved
 * 7: super page
320 321
 * 8-10: available
 * 11: snoop behavior
322 323 324 325 326 327
 * 12-63: Host physcial address
 */
struct dma_pte {
	u64 val;
};

328 329 330 331 332 333 334
static inline void dma_clear_pte(struct dma_pte *pte)
{
	pte->val = 0;
}

static inline u64 dma_pte_addr(struct dma_pte *pte)
{
335 336 337 338
#ifdef CONFIG_64BIT
	return pte->val & VTD_PAGE_MASK;
#else
	/* Must have a full atomic 64-bit read */
339
	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
340
#endif
341 342 343 344 345 346
}

static inline bool dma_pte_present(struct dma_pte *pte)
{
	return (pte->val & 3) != 0;
}
347

348 349
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
350
	return (pte->val & DMA_PTE_LARGE_PAGE);
351 352
}

353 354 355 356 357
static inline int first_pte_in_page(struct dma_pte *pte)
{
	return !((unsigned long)pte & ~VTD_PAGE_MASK);
}

358 359 360 361 362 363
/*
 * This domain is a statically identity mapping domain.
 *	1. This domain creats a static 1:1 mapping to all usable memory.
 * 	2. It maps to each iommu if successful.
 *	3. Each iommu mapps to this domain if successful.
 */
364 365
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
366

367 368
/*
 * Domain represents a virtual machine, more than one devices
369 370
 * across iommus may be owned in one domain, e.g. kvm guest.
 */
371
#define DOMAIN_FLAG_VIRTUAL_MACHINE	(1 << 0)
372

373
/* si_domain contains mulitple devices */
374
#define DOMAIN_FLAG_STATIC_IDENTITY	(1 << 1)
375

376 377 378 379
#define for_each_domain_iommu(idx, domain)			\
	for (idx = 0; idx < g_num_of_iommus; idx++)		\
		if (domain->iommu_refcnt[idx])

380
struct dmar_domain {
381
	int	nid;			/* node id */
382 383 384 385

	unsigned	iommu_refcnt[DMAR_UNITS_SUPPORTED];
					/* Refcount of devices per iommu */

386

387 388 389 390 391
	u16		iommu_did[DMAR_UNITS_SUPPORTED];
					/* Domain ids per IOMMU. Use u16 since
					 * domain ids are 16 bit wide according
					 * to VT-d spec, section 9.3 */

392
	struct list_head devices;	/* all devices' list */
393 394 395 396 397 398 399 400
	struct iova_domain iovad;	/* iova's that belong to this domain */

	struct dma_pte	*pgd;		/* virtual address */
	int		gaw;		/* max guest address width */

	/* adjusted guest address width, 0 is level 2 30-bit */
	int		agaw;

W
Weidong Han 已提交
401
	int		flags;		/* flags to find out type of domain */
W
Weidong Han 已提交
402 403

	int		iommu_coherency;/* indicate coherency of iommu access */
404
	int		iommu_snooping; /* indicate snooping control feature*/
405
	int		iommu_count;	/* reference count of iommu */
406 407 408
	int		iommu_superpage;/* Level of superpages supported:
					   0 == 4KiB (no superpages), 1 == 2MiB,
					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409
	spinlock_t	iommu_lock;	/* protect iommu set in domain */
410
	u64		max_addr;	/* maximum mapped address */
411 412 413

	struct iommu_domain domain;	/* generic domain data structure for
					   iommu core */
414 415
};

416 417 418 419
/* PCI domain-device relationship */
struct device_domain_info {
	struct list_head link;	/* link to domain siblings */
	struct list_head global; /* link to global list */
420
	u8 bus;			/* PCI bus number */
421
	u8 devfn;		/* PCI devfn number */
422
	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Y
Yu Zhao 已提交
423
	struct intel_iommu *iommu; /* IOMMU used by this device */
424 425 426
	struct dmar_domain *domain; /* pointer to domain */
};

427 428 429 430 431
struct dmar_rmrr_unit {
	struct list_head list;		/* list of rmrr units	*/
	struct acpi_dmar_header *hdr;	/* ACPI header		*/
	u64	base_address;		/* reserved base address*/
	u64	end_address;		/* reserved end address */
432
	struct dmar_dev_scope *devices;	/* target devices */
433 434 435 436 437 438
	int	devices_cnt;		/* target device count */
};

struct dmar_atsr_unit {
	struct list_head list;		/* list of ATSR units */
	struct acpi_dmar_header *hdr;	/* ACPI header */
439
	struct dmar_dev_scope *devices;	/* target devices */
440 441 442 443 444 445 446 447 448 449
	int devices_cnt;		/* target device count */
	u8 include_all:1;		/* include all ports */
};

static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);

#define for_each_rmrr_units(rmrr) \
	list_for_each_entry(rmrr, &dmar_rmrr_units, list)

M
mark gross 已提交
450 451
static void flush_unmaps_timeout(unsigned long data);

452
static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
M
mark gross 已提交
453

454 455 456 457 458
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
	int next;
	struct iova *iova[HIGH_WATER_MARK];
	struct dmar_domain *domain[HIGH_WATER_MARK];
459
	struct page *freelist[HIGH_WATER_MARK];
460 461 462 463
};

static struct deferred_flush_tables *deferred_flush;

M
mark gross 已提交
464 465 466 467 468 469 470 471 472
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;

static DEFINE_SPINLOCK(async_umap_flush_lock);
static LIST_HEAD(unmaps_to_do);

static int timer_on;
static long list_size;

473
static void domain_exit(struct dmar_domain *domain);
474
static void domain_remove_dev_info(struct dmar_domain *domain);
475
static void domain_remove_one_dev_info(struct dmar_domain *domain,
476
				       struct device *dev);
477
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
478
					   struct device *dev);
479 480
static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu);
481

482
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 484 485
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
486
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
487

488 489 490
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);

491
static int dmar_map_gfx = 1;
492
static int dmar_forcedac;
M
mark gross 已提交
493
static int intel_iommu_strict;
494
static int intel_iommu_superpage = 1;
495 496 497 498 499 500 501 502
static int intel_iommu_ecs = 1;

/* We only actually use ECS when PASID support (on the new bit 40)
 * is also advertised. Some early implementations — the ones with
 * PASID support on bit 28 — have issues even when we *only* use
 * extended root/context tables. */
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
			    ecap_pasid(iommu->ecap))
503

504 505 506
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);

507 508 509 510
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);

511
static const struct iommu_ops intel_iommu_ops;
512

513 514 515 516 517
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
	return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
}

518 519 520 521 522
static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{
	iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
}

523 524 525 526 527 528 529 530 531
static void init_translation_status(struct intel_iommu *iommu)
{
	u32 gsts;

	gsts = readl(iommu->reg + DMAR_GSTS_REG);
	if (gsts & DMA_GSTS_TES)
		iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}

532 533 534 535 536 537
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
	return container_of(dom, struct dmar_domain, domain);
}

538 539 540 541 542
static int __init intel_iommu_setup(char *str)
{
	if (!str)
		return -EINVAL;
	while (*str) {
543 544
		if (!strncmp(str, "on", 2)) {
			dmar_disabled = 0;
J
Joerg Roedel 已提交
545
			pr_info("IOMMU enabled\n");
546
		} else if (!strncmp(str, "off", 3)) {
547
			dmar_disabled = 1;
J
Joerg Roedel 已提交
548
			pr_info("IOMMU disabled\n");
549 550
		} else if (!strncmp(str, "igfx_off", 8)) {
			dmar_map_gfx = 0;
J
Joerg Roedel 已提交
551
			pr_info("Disable GFX device mapping\n");
552
		} else if (!strncmp(str, "forcedac", 8)) {
J
Joerg Roedel 已提交
553
			pr_info("Forcing DAC for PCI devices\n");
554
			dmar_forcedac = 1;
M
mark gross 已提交
555
		} else if (!strncmp(str, "strict", 6)) {
J
Joerg Roedel 已提交
556
			pr_info("Disable batched IOTLB flush\n");
M
mark gross 已提交
557
			intel_iommu_strict = 1;
558
		} else if (!strncmp(str, "sp_off", 6)) {
J
Joerg Roedel 已提交
559
			pr_info("Disable supported super page\n");
560
			intel_iommu_superpage = 0;
561 562 563 564
		} else if (!strncmp(str, "ecs_off", 7)) {
			printk(KERN_INFO
				"Intel-IOMMU: disable extended context table support\n");
			intel_iommu_ecs = 0;
565 566 567 568 569 570 571 572 573 574 575 576 577
		}

		str += strcspn(str, ",");
		while (*str == ',')
			str++;
	}
	return 0;
}
__setup("intel_iommu=", intel_iommu_setup);

static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;

578 579
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
{
580 581 582 583 584 585 586 587
	struct dmar_domain **domains;
	int idx = did >> 8;

	domains = iommu->domains[idx];
	if (!domains)
		return NULL;

	return domains[did & 0xff];
588 589 590 591 592
}

static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
			     struct dmar_domain *domain)
{
593 594 595 596 597 598 599 600 601 602 603 604 605
	struct dmar_domain **domains;
	int idx = did >> 8;

	if (!iommu->domains[idx]) {
		size_t size = 256 * sizeof(struct dmar_domain *);
		iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
	}

	domains = iommu->domains[idx];
	if (WARN_ON(!domains))
		return;
	else
		domains[did & 0xff] = domain;
606 607
}

608
static inline void *alloc_pgtable_page(int node)
609
{
610 611
	struct page *page;
	void *vaddr = NULL;
612

613 614 615
	page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
	if (page)
		vaddr = page_address(page);
616
	return vaddr;
617 618 619 620 621 622 623 624 625
}

static inline void free_pgtable_page(void *vaddr)
{
	free_page((unsigned long)vaddr);
}

static inline void *alloc_domain_mem(void)
{
626
	return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
627 628
}

K
Kay, Allen M 已提交
629
static void free_domain_mem(void *vaddr)
630 631 632 633 634 635
{
	kmem_cache_free(iommu_domain_cache, vaddr);
}

static inline void * alloc_devinfo_mem(void)
{
636
	return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
637 638 639 640 641 642 643
}

static inline void free_devinfo_mem(void *vaddr)
{
	kmem_cache_free(iommu_devinfo_cache, vaddr);
}

644 645 646 647 648
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
	return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}

649 650 651 652 653
static inline int domain_type_is_si(struct dmar_domain *domain)
{
	return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}

654 655 656 657 658
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
	return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
				DOMAIN_FLAG_STATIC_IDENTITY);
}
W
Weidong Han 已提交
659

660 661 662 663 664 665 666 667
static inline int domain_pfn_supported(struct dmar_domain *domain,
				       unsigned long pfn)
{
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

	return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}

F
Fenghua Yu 已提交
668
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
W
Weidong Han 已提交
669 670 671 672 673
{
	unsigned long sagaw;
	int agaw = -1;

	sagaw = cap_sagaw(iommu->cap);
F
Fenghua Yu 已提交
674
	for (agaw = width_to_agaw(max_gaw);
W
Weidong Han 已提交
675 676 677 678 679 680 681 682
	     agaw >= 0; agaw--) {
		if (test_bit(agaw, &sagaw))
			break;
	}

	return agaw;
}

F
Fenghua Yu 已提交
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
/*
 * Calculate max SAGAW for each iommu.
 */
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}

/*
 * calculate agaw for each iommu.
 * "SAGAW" may be different across iommus, use a default agaw, and
 * get a supported less agaw for iommus that don't support the default agaw.
 */
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}

701
/* This functionin only returns single iommu in a domain */
702 703 704 705
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
	int iommu_id;

706
	/* si_domain and vm domain should not get here. */
707
	BUG_ON(domain_type_is_vm_or_si(domain));
708 709 710
	for_each_domain_iommu(iommu_id, domain)
		break;

711 712 713 714 715 716
	if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
		return NULL;

	return g_iommus[iommu_id];
}

W
Weidong Han 已提交
717 718
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
719 720
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
721 722
	bool found = false;
	int i;
723

724
	domain->iommu_coherency = 1;
W
Weidong Han 已提交
725

726
	for_each_domain_iommu(i, domain) {
727
		found = true;
W
Weidong Han 已提交
728 729 730 731 732
		if (!ecap_coherent(g_iommus[i]->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
733 734 735 736 737 738 739 740 741 742 743 744
	if (found)
		return;

	/* No hardware attached; use lowest common denominator */
	rcu_read_lock();
	for_each_active_iommu(iommu, drhd) {
		if (!ecap_coherent(iommu->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
	rcu_read_unlock();
W
Weidong Han 已提交
745 746
}

747
static int domain_update_iommu_snooping(struct intel_iommu *skip)
748
{
749 750 751
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	int ret = 1;
752

753 754 755 756 757 758 759
	rcu_read_lock();
	for_each_active_iommu(iommu, drhd) {
		if (iommu != skip) {
			if (!ecap_sc_support(iommu->ecap)) {
				ret = 0;
				break;
			}
760 761
		}
	}
762 763 764
	rcu_read_unlock();

	return ret;
765 766
}

767
static int domain_update_iommu_superpage(struct intel_iommu *skip)
768
{
769
	struct dmar_drhd_unit *drhd;
770
	struct intel_iommu *iommu;
771
	int mask = 0xf;
772 773

	if (!intel_iommu_superpage) {
774
		return 0;
775 776
	}

777
	/* set iommu_superpage to the smallest common denominator */
778
	rcu_read_lock();
779
	for_each_active_iommu(iommu, drhd) {
780 781 782 783
		if (iommu != skip) {
			mask &= cap_super_page_val(iommu->cap);
			if (!mask)
				break;
784 785
		}
	}
786 787
	rcu_read_unlock();

788
	return fls(mask);
789 790
}

791 792 793 794
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
	domain_update_iommu_coherency(domain);
795 796
	domain->iommu_snooping = domain_update_iommu_snooping(NULL);
	domain->iommu_superpage = domain_update_iommu_superpage(NULL);
797 798
}

799 800 801 802 803 804 805
static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
						       u8 bus, u8 devfn, int alloc)
{
	struct root_entry *root = &iommu->root_entry[bus];
	struct context_entry *context;
	u64 *entry;

806
	if (ecs_enabled(iommu)) {
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
		if (devfn >= 0x80) {
			devfn -= 0x80;
			entry = &root->hi;
		}
		devfn *= 2;
	}
	entry = &root->lo;
	if (*entry & 1)
		context = phys_to_virt(*entry & VTD_PAGE_MASK);
	else {
		unsigned long phy_addr;
		if (!alloc)
			return NULL;

		context = alloc_pgtable_page(iommu->node);
		if (!context)
			return NULL;

		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
		phy_addr = virt_to_phys((void *)context);
		*entry = phy_addr | 1;
		__iommu_flush_cache(iommu, entry, sizeof(*entry));
	}
	return &context[devfn];
}

833 834 835 836 837
static int iommu_dummy(struct device *dev)
{
	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}

838
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
839 840
{
	struct dmar_drhd_unit *drhd = NULL;
841
	struct intel_iommu *iommu;
842 843
	struct device *tmp;
	struct pci_dev *ptmp, *pdev = NULL;
844
	u16 segment = 0;
845 846
	int i;

847 848 849
	if (iommu_dummy(dev))
		return NULL;

850 851 852
	if (dev_is_pci(dev)) {
		pdev = to_pci_dev(dev);
		segment = pci_domain_nr(pdev->bus);
853
	} else if (has_acpi_companion(dev))
854 855
		dev = &ACPI_COMPANION(dev)->dev;

856
	rcu_read_lock();
857
	for_each_active_iommu(iommu, drhd) {
858
		if (pdev && segment != drhd->segment)
859
			continue;
860

861
		for_each_active_dev_scope(drhd->devices,
862 863 864 865
					  drhd->devices_cnt, i, tmp) {
			if (tmp == dev) {
				*bus = drhd->devices[i].bus;
				*devfn = drhd->devices[i].devfn;
866
				goto out;
867 868 869 870 871 872 873 874 875 876
			}

			if (!pdev || !dev_is_pci(tmp))
				continue;

			ptmp = to_pci_dev(tmp);
			if (ptmp->subordinate &&
			    ptmp->subordinate->number <= pdev->bus->number &&
			    ptmp->subordinate->busn_res.end >= pdev->bus->number)
				goto got_pdev;
877
		}
878

879 880 881 882
		if (pdev && drhd->include_all) {
		got_pdev:
			*bus = pdev->bus->number;
			*devfn = pdev->devfn;
883
			goto out;
884
		}
885
	}
886
	iommu = NULL;
887
 out:
888
	rcu_read_unlock();
889

890
	return iommu;
891 892
}

W
Weidong Han 已提交
893 894 895 896 897 898 899
static void domain_flush_cache(struct dmar_domain *domain,
			       void *addr, int size)
{
	if (!domain->iommu_coherency)
		clflush_cache_range(addr, size);
}

900 901 902
static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct context_entry *context;
903
	int ret = 0;
904 905 906
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
907 908 909
	context = iommu_context_addr(iommu, bus, devfn, 0);
	if (context)
		ret = context_present(context);
910 911 912 913 914 915 916 917 918 919
	spin_unlock_irqrestore(&iommu->lock, flags);
	return ret;
}

static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct context_entry *context;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
920
	context = iommu_context_addr(iommu, bus, devfn, 0);
921
	if (context) {
922 923
		context_clear_entry(context);
		__iommu_flush_cache(iommu, context, sizeof(*context));
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	}
	spin_unlock_irqrestore(&iommu->lock, flags);
}

static void free_context_table(struct intel_iommu *iommu)
{
	int i;
	unsigned long flags;
	struct context_entry *context;

	spin_lock_irqsave(&iommu->lock, flags);
	if (!iommu->root_entry) {
		goto out;
	}
	for (i = 0; i < ROOT_ENTRY_NR; i++) {
939
		context = iommu_context_addr(iommu, i, 0, 0);
940 941
		if (context)
			free_pgtable_page(context);
942

943
		if (!ecs_enabled(iommu))
944 945 946 947 948 949
			continue;

		context = iommu_context_addr(iommu, i, 0x80, 0);
		if (context)
			free_pgtable_page(context);

950 951 952 953 954 955 956
	}
	free_pgtable_page(iommu->root_entry);
	iommu->root_entry = NULL;
out:
	spin_unlock_irqrestore(&iommu->lock, flags);
}

957
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958
				      unsigned long pfn, int *target_level)
959 960 961
{
	struct dma_pte *parent, *pte = NULL;
	int level = agaw_to_level(domain->agaw);
962
	int offset;
963 964

	BUG_ON(!domain->pgd);
965

966
	if (!domain_pfn_supported(domain, pfn))
967 968 969
		/* Address beyond IOMMU's addressing capabilities. */
		return NULL;

970 971
	parent = domain->pgd;

972
	while (1) {
973 974
		void *tmp_page;

975
		offset = pfn_level_offset(pfn, level);
976
		pte = &parent[offset];
977
		if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
978
			break;
979
		if (level == *target_level)
980 981
			break;

982
		if (!dma_pte_present(pte)) {
983 984
			uint64_t pteval;

985
			tmp_page = alloc_pgtable_page(domain->nid);
986

987
			if (!tmp_page)
988
				return NULL;
989

990
			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991
			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992
			if (cmpxchg64(&pte->val, 0ULL, pteval))
993 994
				/* Someone else set it while we were thinking; use theirs. */
				free_pgtable_page(tmp_page);
995
			else
996
				domain_flush_cache(domain, pte, sizeof(*pte));
997
		}
998 999 1000
		if (level == 1)
			break;

1001
		parent = phys_to_virt(dma_pte_addr(pte));
1002 1003 1004
		level--;
	}

1005 1006 1007
	if (!*target_level)
		*target_level = level;

1008 1009 1010
	return pte;
}

1011

1012
/* return address's pte at specific level */
1013 1014
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
					 unsigned long pfn,
1015
					 int level, int *large_page)
1016 1017 1018 1019 1020 1021 1022
{
	struct dma_pte *parent, *pte = NULL;
	int total = agaw_to_level(domain->agaw);
	int offset;

	parent = domain->pgd;
	while (level <= total) {
1023
		offset = pfn_level_offset(pfn, total);
1024 1025 1026 1027
		pte = &parent[offset];
		if (level == total)
			return pte;

1028 1029
		if (!dma_pte_present(pte)) {
			*large_page = total;
1030
			break;
1031 1032
		}

1033
		if (dma_pte_superpage(pte)) {
1034 1035 1036 1037
			*large_page = total;
			return pte;
		}

1038
		parent = phys_to_virt(dma_pte_addr(pte));
1039 1040 1041 1042 1043 1044
		total--;
	}
	return NULL;
}

/* clear last level pte, a tlb flush should be followed */
1045
static void dma_pte_clear_range(struct dmar_domain *domain,
1046 1047
				unsigned long start_pfn,
				unsigned long last_pfn)
1048
{
1049
	unsigned int large_page = 1;
1050
	struct dma_pte *first_pte, *pte;
1051

1052 1053
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054
	BUG_ON(start_pfn > last_pfn);
1055

1056
	/* we don't need lock here; nobody else touches the iova range */
1057
	do {
1058 1059
		large_page = 1;
		first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1060
		if (!pte) {
1061
			start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1062 1063
			continue;
		}
1064
		do {
1065
			dma_clear_pte(pte);
1066
			start_pfn += lvl_to_nr_pages(large_page);
1067
			pte++;
1068 1069
		} while (start_pfn <= last_pfn && !first_pte_in_page(pte));

1070 1071
		domain_flush_cache(domain, first_pte,
				   (void *)pte - (void *)first_pte);
1072 1073

	} while (start_pfn && start_pfn <= last_pfn);
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
static void dma_pte_free_level(struct dmar_domain *domain, int level,
			       struct dma_pte *pte, unsigned long pfn,
			       unsigned long start_pfn, unsigned long last_pfn)
{
	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;
		struct dma_pte *level_pte;

		if (!dma_pte_present(pte) || dma_pte_superpage(pte))
			goto next;

		level_pfn = pfn & level_mask(level - 1);
		level_pte = phys_to_virt(dma_pte_addr(pte));

		if (level > 2)
			dma_pte_free_level(domain, level - 1, level_pte,
					   level_pfn, start_pfn, last_pfn);

		/* If range covers entire pagetable, free it */
		if (!(start_pfn > level_pfn ||
1099
		      last_pfn < level_pfn + level_size(level) - 1)) {
1100 1101 1102 1103 1104 1105 1106 1107 1108
			dma_clear_pte(pte);
			domain_flush_cache(domain, pte, sizeof(*pte));
			free_pgtable_page(level_pte);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}

1109 1110
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111 1112
				   unsigned long start_pfn,
				   unsigned long last_pfn)
1113
{
1114 1115
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116
	BUG_ON(start_pfn > last_pfn);
1117

1118 1119
	dma_pte_clear_range(domain, start_pfn, last_pfn);

1120
	/* We don't need lock here; nobody else touches the iova range */
1121 1122
	dma_pte_free_level(domain, agaw_to_level(domain->agaw),
			   domain->pgd, 0, start_pfn, last_pfn);
1123

1124
	/* free pgd */
1125
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126 1127 1128 1129 1130
		free_pgtable_page(domain->pgd);
		domain->pgd = NULL;
	}
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
/* When a page at a given level is being unlinked from its parent, we don't
   need to *modify* it at all. All we need to do is make a list of all the
   pages which can be freed just as soon as we've flushed the IOTLB and we
   know the hardware page-walk will no longer touch them.
   The 'pte' argument is the *parent* PTE, pointing to the page that is to
   be freed. */
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
					    int level, struct dma_pte *pte,
					    struct page *freelist)
{
	struct page *pg;

	pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
	pg->freelist = freelist;
	freelist = pg;

	if (level == 1)
		return freelist;

1150 1151
	pte = page_address(pg);
	do {
1152 1153 1154
		if (dma_pte_present(pte) && !dma_pte_superpage(pte))
			freelist = dma_pte_list_pagetables(domain, level - 1,
							   pte, freelist);
1155 1156
		pte++;
	} while (!first_pte_in_page(pte));
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218

	return freelist;
}

static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
					struct dma_pte *pte, unsigned long pfn,
					unsigned long start_pfn,
					unsigned long last_pfn,
					struct page *freelist)
{
	struct dma_pte *first_pte = NULL, *last_pte = NULL;

	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;

		if (!dma_pte_present(pte))
			goto next;

		level_pfn = pfn & level_mask(level);

		/* If range covers entire pagetable, free it */
		if (start_pfn <= level_pfn &&
		    last_pfn >= level_pfn + level_size(level) - 1) {
			/* These suborbinate page tables are going away entirely. Don't
			   bother to clear them; we're just going to *free* them. */
			if (level > 1 && !dma_pte_superpage(pte))
				freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);

			dma_clear_pte(pte);
			if (!first_pte)
				first_pte = pte;
			last_pte = pte;
		} else if (level > 1) {
			/* Recurse down into a level that isn't *entirely* obsolete */
			freelist = dma_pte_clear_level(domain, level - 1,
						       phys_to_virt(dma_pte_addr(pte)),
						       level_pfn, start_pfn, last_pfn,
						       freelist);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);

	if (first_pte)
		domain_flush_cache(domain, first_pte,
				   (void *)++last_pte - (void *)first_pte);

	return freelist;
}

/* We can't just free the pages because the IOMMU may still be walking
   the page tables, and may have cached the intermediate levels. The
   pages can only be freed after the IOTLB flush has been done. */
struct page *domain_unmap(struct dmar_domain *domain,
			  unsigned long start_pfn,
			  unsigned long last_pfn)
{
	struct page *freelist = NULL;

1219 1220
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	BUG_ON(start_pfn > last_pfn);

	/* we don't need lock here; nobody else touches the iova range */
	freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
				       domain->pgd, 0, start_pfn, last_pfn, NULL);

	/* free pgd */
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
		struct page *pgd_page = virt_to_page(domain->pgd);
		pgd_page->freelist = freelist;
		freelist = pgd_page;

		domain->pgd = NULL;
	}

	return freelist;
}

void dma_free_pagelist(struct page *freelist)
{
	struct page *pg;

	while ((pg = freelist)) {
		freelist = pg->freelist;
		free_pgtable_page(page_address(pg));
	}
}

1249 1250 1251 1252 1253 1254
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
	struct root_entry *root;
	unsigned long flags;

1255
	root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1256
	if (!root) {
J
Joerg Roedel 已提交
1257
		pr_err("Allocating root entry for %s failed\n",
1258
			iommu->name);
1259
		return -ENOMEM;
1260
	}
1261

F
Fenghua Yu 已提交
1262
	__iommu_flush_cache(iommu, root, ROOT_SIZE);
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272

	spin_lock_irqsave(&iommu->lock, flags);
	iommu->root_entry = root;
	spin_unlock_irqrestore(&iommu->lock, flags);

	return 0;
}

static void iommu_set_root_entry(struct intel_iommu *iommu)
{
1273
	u64 addr;
1274
	u32 sts;
1275 1276
	unsigned long flag;

1277
	addr = virt_to_phys(iommu->root_entry);
1278
	if (ecs_enabled(iommu))
1279
		addr |= DMA_RTADDR_RTT;
1280

1281
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282
	dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1283

1284
	writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1285 1286 1287

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288
		      readl, (sts & DMA_GSTS_RTPS), sts);
1289

1290
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1291 1292 1293 1294 1295 1296 1297
}

static void iommu_flush_write_buffer(struct intel_iommu *iommu)
{
	u32 val;
	unsigned long flag;

1298
	if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1299 1300
		return;

1301
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302
	writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1303 1304 1305

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306
		      readl, (!(val & DMA_GSTS_WBFS)), val);
1307

1308
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1309 1310 1311
}

/* return value determine if we need a write buffer flush */
1312 1313 1314
static void __iommu_flush_context(struct intel_iommu *iommu,
				  u16 did, u16 source_id, u8 function_mask,
				  u64 type)
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
{
	u64 val = 0;
	unsigned long flag;

	switch (type) {
	case DMA_CCMD_GLOBAL_INVL:
		val = DMA_CCMD_GLOBAL_INVL;
		break;
	case DMA_CCMD_DOMAIN_INVL:
		val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
		break;
	case DMA_CCMD_DEVICE_INVL:
		val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
			| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
		break;
	default:
		BUG();
	}
	val |= DMA_CCMD_ICC;

1335
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336 1337 1338 1339 1340 1341
	dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
		dmar_readq, (!(val & DMA_CCMD_ICC)), val);

1342
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1343 1344 1345
}

/* return value determine if we need a write buffer flush */
1346 1347
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
				u64 addr, unsigned int size_order, u64 type)
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
{
	int tlb_offset = ecap_iotlb_offset(iommu->ecap);
	u64 val = 0, val_iva = 0;
	unsigned long flag;

	switch (type) {
	case DMA_TLB_GLOBAL_FLUSH:
		/* global flush doesn't need set IVA_REG */
		val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
		break;
	case DMA_TLB_DSI_FLUSH:
		val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
		break;
	case DMA_TLB_PSI_FLUSH:
		val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363
		/* IH bit is passed in as part of address */
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
		val_iva = size_order | addr;
		break;
	default:
		BUG();
	}
	/* Note: set drain read/write */
#if 0
	/*
	 * This is probably to be super secure.. Looks like we can
	 * ignore it without any impact.
	 */
	if (cap_read_drain(iommu->cap))
		val |= DMA_TLB_READ_DRAIN;
#endif
	if (cap_write_drain(iommu->cap))
		val |= DMA_TLB_WRITE_DRAIN;

1381
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382 1383 1384 1385 1386 1387 1388 1389 1390
	/* Note: Only uses first TLB reg currently */
	if (val_iva)
		dmar_writeq(iommu->reg + tlb_offset, val_iva);
	dmar_writeq(iommu->reg + tlb_offset + 8, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, tlb_offset + 8,
		dmar_readq, (!(val & DMA_TLB_IVT)), val);

1391
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1392 1393 1394

	/* check IOTLB invalidation granularity */
	if (DMA_TLB_IAIG(val) == 0)
J
Joerg Roedel 已提交
1395
		pr_err("Flush IOTLB failed\n");
1396
	if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
J
Joerg Roedel 已提交
1397
		pr_debug("TLB flush request %Lx, actual %Lx\n",
F
Fenghua Yu 已提交
1398 1399
			(unsigned long long)DMA_TLB_IIRG(type),
			(unsigned long long)DMA_TLB_IAIG(val));
1400 1401
}

1402 1403 1404
static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
			 u8 bus, u8 devfn)
Y
Yu Zhao 已提交
1405
{
1406
	bool found = false;
Y
Yu Zhao 已提交
1407 1408
	unsigned long flags;
	struct device_domain_info *info;
1409
	struct pci_dev *pdev;
Y
Yu Zhao 已提交
1410 1411 1412 1413 1414 1415 1416 1417 1418

	if (!ecap_dev_iotlb_support(iommu->ecap))
		return NULL;

	if (!iommu->qi)
		return NULL;

	spin_lock_irqsave(&device_domain_lock, flags);
	list_for_each_entry(info, &domain->devices, link)
1419 1420
		if (info->iommu == iommu && info->bus == bus &&
		    info->devfn == devfn) {
1421
			found = true;
Y
Yu Zhao 已提交
1422 1423 1424 1425
			break;
		}
	spin_unlock_irqrestore(&device_domain_lock, flags);

1426
	if (!found || !info->dev || !dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1427 1428
		return NULL;

1429 1430 1431
	pdev = to_pci_dev(info->dev);

	if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Y
Yu Zhao 已提交
1432 1433
		return NULL;

1434
	if (!dmar_find_matched_atsr_unit(pdev))
Y
Yu Zhao 已提交
1435 1436 1437 1438 1439 1440
		return NULL;

	return info;
}

static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1441
{
1442
	if (!info || !dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1443 1444
		return;

1445
	pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Y
Yu Zhao 已提交
1446 1447 1448 1449
}

static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
1450 1451
	if (!info->dev || !dev_is_pci(info->dev) ||
	    !pci_ats_enabled(to_pci_dev(info->dev)))
Y
Yu Zhao 已提交
1452 1453
		return;

1454
	pci_disable_ats(to_pci_dev(info->dev));
Y
Yu Zhao 已提交
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
}

static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
				  u64 addr, unsigned mask)
{
	u16 sid, qdep;
	unsigned long flags;
	struct device_domain_info *info;

	spin_lock_irqsave(&device_domain_lock, flags);
	list_for_each_entry(info, &domain->devices, link) {
1466 1467 1468 1469 1470 1471
		struct pci_dev *pdev;
		if (!info->dev || !dev_is_pci(info->dev))
			continue;

		pdev = to_pci_dev(info->dev);
		if (!pci_ats_enabled(pdev))
Y
Yu Zhao 已提交
1472 1473 1474
			continue;

		sid = info->bus << 8 | info->devfn;
1475
		qdep = pci_ats_queue_depth(pdev);
Y
Yu Zhao 已提交
1476 1477 1478 1479 1480
		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
	}
	spin_unlock_irqrestore(&device_domain_lock, flags);
}

1481 1482 1483 1484
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
				  struct dmar_domain *domain,
				  unsigned long pfn, unsigned int pages,
				  int ih, int map)
1485
{
1486
	unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1487
	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1488
	u16 did = domain->iommu_did[iommu->seq_id];
1489 1490 1491

	BUG_ON(pages == 0);

1492 1493
	if (ih)
		ih = 1 << 6;
1494
	/*
1495 1496
	 * Fallback to domain selective flush if no PSI support or the size is
	 * too big.
1497 1498 1499
	 * PSI requires page size to be 2 ^ x, and the base address is naturally
	 * aligned to the size
	 */
1500 1501
	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
		iommu->flush.flush_iotlb(iommu, did, 0, 0,
1502
						DMA_TLB_DSI_FLUSH);
1503
	else
1504
		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1505
						DMA_TLB_PSI_FLUSH);
1506 1507

	/*
1508 1509
	 * In caching mode, changes of pages from non-present to present require
	 * flush. However, device IOTLB doesn't need to be flushed in this case.
1510
	 */
1511
	if (!cap_caching_mode(iommu->cap) || !map)
1512 1513
		iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
				      addr, mask);
1514 1515
}

M
mark gross 已提交
1516 1517 1518 1519 1520
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
	u32 pmen;
	unsigned long flags;

1521
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
M
mark gross 已提交
1522 1523 1524 1525 1526 1527 1528 1529
	pmen = readl(iommu->reg + DMAR_PMEN_REG);
	pmen &= ~DMA_PMEN_EPM;
	writel(pmen, iommu->reg + DMAR_PMEN_REG);

	/* wait for the protected region status bit to clear */
	IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
		readl, !(pmen & DMA_PMEN_PRS), pmen);

1530
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
M
mark gross 已提交
1531 1532
}

1533
static void iommu_enable_translation(struct intel_iommu *iommu)
1534 1535 1536 1537
{
	u32 sts;
	unsigned long flags;

1538
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1539 1540
	iommu->gcmd |= DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1541 1542 1543

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1544
		      readl, (sts & DMA_GSTS_TES), sts);
1545

1546
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1547 1548
}

1549
static void iommu_disable_translation(struct intel_iommu *iommu)
1550 1551 1552 1553
{
	u32 sts;
	unsigned long flag;

1554
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1555 1556 1557 1558 1559
	iommu->gcmd &= ~DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1560
		      readl, (!(sts & DMA_GSTS_TES)), sts);
1561

1562
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1563 1564
}

1565

1566 1567
static int iommu_init_domains(struct intel_iommu *iommu)
{
1568 1569
	u32 ndomains, nlongs;
	size_t size;
1570 1571

	ndomains = cap_ndoms(iommu->cap);
1572
	pr_debug("%s: Number of Domains supported <%d>\n",
J
Joerg Roedel 已提交
1573
		 iommu->name, ndomains);
1574 1575
	nlongs = BITS_TO_LONGS(ndomains);

1576 1577
	spin_lock_init(&iommu->lock);

1578 1579
	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
	if (!iommu->domain_ids) {
J
Joerg Roedel 已提交
1580 1581
		pr_err("%s: Allocating domain id array failed\n",
		       iommu->name);
1582 1583
		return -ENOMEM;
	}
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593

	size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
	iommu->domains = kzalloc(size, GFP_KERNEL);

	if (iommu->domains) {
		size = 256 * sizeof(struct dmar_domain *);
		iommu->domains[0] = kzalloc(size, GFP_KERNEL);
	}

	if (!iommu->domains || !iommu->domains[0]) {
J
Joerg Roedel 已提交
1594 1595
		pr_err("%s: Allocating domain array failed\n",
		       iommu->name);
1596
		kfree(iommu->domain_ids);
1597
		kfree(iommu->domains);
1598
		iommu->domain_ids = NULL;
1599
		iommu->domains    = NULL;
1600 1601 1602
		return -ENOMEM;
	}

1603 1604


1605
	/*
1606 1607 1608 1609
	 * If Caching mode is set, then invalid translations are tagged
	 * with domain-id 0, hence we need to pre-allocate it. We also
	 * use domain-id 0 as a marker for non-allocated domain-id, so
	 * make sure it is not used for a real domain.
1610
	 */
1611 1612
	set_bit(0, iommu->domain_ids);

1613 1614 1615
	return 0;
}

1616
static void disable_dmar_iommu(struct intel_iommu *iommu)
1617
{
1618
	struct device_domain_info *info, *tmp;
1619

1620 1621
	if (!iommu->domains || !iommu->domain_ids)
		return;
1622

1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
		struct dmar_domain *domain;

		if (info->iommu != iommu)
			continue;

		if (!info->dev || !info->domain)
			continue;

		domain = info->domain;

		domain_remove_one_dev_info(domain, info->dev);

		if (!domain_type_is_vm_or_si(domain))
			domain_exit(domain);
1638 1639 1640 1641
	}

	if (iommu->gcmd & DMA_GCMD_TE)
		iommu_disable_translation(iommu);
1642
}
1643

1644 1645 1646
static void free_dmar_iommu(struct intel_iommu *iommu)
{
	if ((iommu->domains) && (iommu->domain_ids)) {
1647 1648 1649 1650 1651
		int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
		int i;

		for (i = 0; i < elems; i++)
			kfree(iommu->domains[i]);
1652 1653 1654 1655 1656
		kfree(iommu->domains);
		kfree(iommu->domain_ids);
		iommu->domains = NULL;
		iommu->domain_ids = NULL;
	}
1657

W
Weidong Han 已提交
1658 1659
	g_iommus[iommu->seq_id] = NULL;

1660 1661 1662 1663
	/* free context mapping */
	free_context_table(iommu);
}

1664
static struct dmar_domain *alloc_domain(int flags)
1665 1666 1667 1668 1669 1670 1671
{
	struct dmar_domain *domain;

	domain = alloc_domain_mem();
	if (!domain)
		return NULL;

1672
	memset(domain, 0, sizeof(*domain));
1673
	domain->nid = -1;
1674
	domain->flags = flags;
1675 1676
	spin_lock_init(&domain->iommu_lock);
	INIT_LIST_HEAD(&domain->devices);
1677 1678 1679 1680

	return domain;
}

1681 1682
static int __iommu_attach_domain(struct dmar_domain *domain,
				 struct intel_iommu *iommu)
1683 1684 1685 1686
{
	int num;
	unsigned long ndomains;

1687 1688 1689 1690
	num = domain->iommu_did[iommu->seq_id];
	if (num)
		return num;

1691
	ndomains = cap_ndoms(iommu->cap);
1692 1693
	num	 = find_first_zero_bit(iommu->domain_ids, ndomains);

1694 1695
	if (num < ndomains) {
		set_bit(num, iommu->domain_ids);
1696
		set_iommu_domain(iommu, num, domain);
1697
		domain->iommu_did[iommu->seq_id] = num;
1698 1699
	} else {
		num = -ENOSPC;
1700 1701
	}

1702 1703 1704
	if (num < 0)
		pr_err("%s: No free domain ids\n", iommu->name);

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	return num;
}

static int iommu_attach_domain(struct dmar_domain *domain,
			       struct intel_iommu *iommu)
{
	int num;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
	num = __iommu_attach_domain(domain, iommu);
1716
	spin_unlock_irqrestore(&iommu->lock, flags);
1717

1718
	return num;
1719 1720
}

1721 1722
static void iommu_detach_domain(struct dmar_domain *domain,
				struct intel_iommu *iommu)
1723 1724
{
	unsigned long flags;
1725
	int num;
1726

1727
	spin_lock_irqsave(&iommu->lock, flags);
1728 1729 1730 1731 1732 1733 1734

	num = domain->iommu_did[iommu->seq_id];

	if (num == 0)
		return;

	clear_bit(num, iommu->domain_ids);
1735
	set_iommu_domain(iommu, num, NULL);
1736

1737
	spin_unlock_irqrestore(&iommu->lock, flags);
1738 1739
}

1740 1741 1742 1743 1744 1745
static void domain_attach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu)
{
	unsigned long flags;

	spin_lock_irqsave(&domain->iommu_lock, flags);
1746 1747 1748 1749
	domain->iommu_refcnt[iommu->seq_id] += 1;
	domain->iommu_count += 1;
	if (domain->iommu_refcnt[iommu->seq_id] == 1) {
		domain->nid = iommu->node;
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
		domain_update_iommu_cap(domain);
	}
	spin_unlock_irqrestore(&domain->iommu_lock, flags);
}

static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu)
{
	unsigned long flags;
	int count = INT_MAX;

	spin_lock_irqsave(&domain->iommu_lock, flags);
1762 1763 1764
	domain->iommu_refcnt[iommu->seq_id] -= 1;
	count = --domain->iommu_count;
	if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1765
		domain_update_iommu_cap(domain);
1766
		domain->iommu_did[iommu->seq_id] = 0;
1767 1768 1769 1770 1771 1772
	}
	spin_unlock_irqrestore(&domain->iommu_lock, flags);

	return count;
}

1773
static struct iova_domain reserved_iova_list;
M
Mark Gross 已提交
1774
static struct lock_class_key reserved_rbtree_key;
1775

1776
static int dmar_init_reserved_ranges(void)
1777 1778 1779 1780 1781
{
	struct pci_dev *pdev = NULL;
	struct iova *iova;
	int i;

1782 1783
	init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
1784

M
Mark Gross 已提交
1785 1786 1787
	lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
		&reserved_rbtree_key);

1788 1789 1790
	/* IOAPIC ranges shouldn't be accessed by DMA */
	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
		IOVA_PFN(IOAPIC_RANGE_END));
1791
	if (!iova) {
J
Joerg Roedel 已提交
1792
		pr_err("Reserve IOAPIC range failed\n");
1793 1794
		return -ENODEV;
	}
1795 1796 1797 1798 1799 1800 1801 1802 1803

	/* Reserve all PCI MMIO to avoid peer-to-peer access */
	for_each_pci_dev(pdev) {
		struct resource *r;

		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
			r = &pdev->resource[i];
			if (!r->flags || !(r->flags & IORESOURCE_MEM))
				continue;
1804 1805 1806
			iova = reserve_iova(&reserved_iova_list,
					    IOVA_PFN(r->start),
					    IOVA_PFN(r->end));
1807
			if (!iova) {
J
Joerg Roedel 已提交
1808
				pr_err("Reserve iova failed\n");
1809 1810
				return -ENODEV;
			}
1811 1812
		}
	}
1813
	return 0;
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
}

static void domain_reserve_special_ranges(struct dmar_domain *domain)
{
	copy_reserved_iova(&reserved_iova_list, &domain->iovad);
}

static inline int guestwidth_to_adjustwidth(int gaw)
{
	int agaw;
	int r = (gaw - 12) % 9;

	if (r == 0)
		agaw = gaw;
	else
		agaw = gaw + 9 - r;
	if (agaw > 64)
		agaw = 64;
	return agaw;
}

static int domain_init(struct dmar_domain *domain, int guest_width)
{
	struct intel_iommu *iommu;
	int adjust_width, agaw;
	unsigned long sagaw;

1841 1842
	init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
1843 1844 1845
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
1846
	iommu = domain_get_iommu(domain);
1847 1848 1849 1850 1851 1852 1853 1854
	if (guest_width > cap_mgaw(iommu->cap))
		guest_width = cap_mgaw(iommu->cap);
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	agaw = width_to_agaw(adjust_width);
	sagaw = cap_sagaw(iommu->cap);
	if (!test_bit(agaw, &sagaw)) {
		/* hardware doesn't support it, choose a bigger one */
J
Joerg Roedel 已提交
1855
		pr_debug("Hardware doesn't support agaw %d\n", agaw);
1856 1857 1858 1859 1860 1861
		agaw = find_next_bit(&sagaw, 5, agaw);
		if (agaw >= 5)
			return -ENODEV;
	}
	domain->agaw = agaw;

W
Weidong Han 已提交
1862 1863 1864 1865 1866
	if (ecap_coherent(iommu->ecap))
		domain->iommu_coherency = 1;
	else
		domain->iommu_coherency = 0;

1867 1868 1869 1870 1871
	if (ecap_sc_support(iommu->ecap))
		domain->iommu_snooping = 1;
	else
		domain->iommu_snooping = 0;

1872 1873 1874 1875 1876
	if (intel_iommu_superpage)
		domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
	else
		domain->iommu_superpage = 0;

1877
	domain->nid = iommu->node;
1878

1879
	/* always allocate the top pgd */
1880
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1881 1882
	if (!domain->pgd)
		return -ENOMEM;
F
Fenghua Yu 已提交
1883
	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1884 1885 1886 1887 1888
	return 0;
}

static void domain_exit(struct dmar_domain *domain)
{
1889
	struct page *freelist = NULL;
1890
	int i;
1891 1892 1893 1894 1895

	/* Domain 0 is reserved, so dont process it */
	if (!domain)
		return;

1896 1897 1898 1899
	/* Flush any lazy unmaps that may reference this domain */
	if (!intel_iommu_strict)
		flush_unmaps_timeout(0);

1900
	/* remove associated devices */
1901
	domain_remove_dev_info(domain);
1902

1903 1904 1905
	/* destroy iovas */
	put_iova_domain(&domain->iovad);

1906
	freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1907

1908
	/* clear attached or cached domains */
1909
	rcu_read_lock();
1910 1911
	for_each_domain_iommu(i, domain)
		iommu_detach_domain(domain, g_iommus[i]);
1912
	rcu_read_unlock();
1913

1914 1915
	dma_free_pagelist(freelist);

1916 1917 1918
	free_domain_mem(domain);
}

1919 1920
static int domain_context_mapping_one(struct dmar_domain *domain,
				      struct intel_iommu *iommu,
1921
				      u8 bus, u8 devfn)
1922
{
1923 1924
	int translation = CONTEXT_TT_MULTI_LEVEL;
	struct device_domain_info *info = NULL;
1925 1926
	struct context_entry *context;
	unsigned long flags;
1927 1928 1929
	struct dma_pte *pgd;
	int id;
	int agaw;
1930 1931 1932

	if (hw_pass_through && domain_type_is_si(domain))
		translation = CONTEXT_TT_PASS_THROUGH;
1933 1934 1935

	pr_debug("Set context mapping for %02x:%02x.%d\n",
		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
F
Fenghua Yu 已提交
1936

1937
	BUG_ON(!domain->pgd);
W
Weidong Han 已提交
1938

1939 1940 1941
	spin_lock_irqsave(&iommu->lock, flags);
	context = iommu_context_addr(iommu, bus, devfn, 1);
	spin_unlock_irqrestore(&iommu->lock, flags);
1942 1943 1944
	if (!context)
		return -ENOMEM;
	spin_lock_irqsave(&iommu->lock, flags);
1945
	if (context_present(context)) {
1946 1947 1948 1949
		spin_unlock_irqrestore(&iommu->lock, flags);
		return 0;
	}

1950 1951
	pgd = domain->pgd;

1952 1953 1954 1955 1956
	id = __iommu_attach_domain(domain, iommu);
	if (id < 0) {
		spin_unlock_irqrestore(&iommu->lock, flags);
		pr_err("%s: No free domain ids\n", iommu->name);
		return -EFAULT;
1957 1958
	}

1959
	context_clear_entry(context);
1960
	context_set_domain_id(context, id);
F
Fenghua Yu 已提交
1961

1962 1963 1964 1965
	/*
	 * Skip top levels of page tables for iommu which has less agaw
	 * than default.  Unnecessary for PT mode.
	 */
Y
Yu Zhao 已提交
1966
	if (translation != CONTEXT_TT_PASS_THROUGH) {
1967 1968 1969 1970 1971 1972 1973 1974
		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
			pgd = phys_to_virt(dma_pte_addr(pgd));
			if (!dma_pte_present(pgd)) {
				spin_unlock_irqrestore(&iommu->lock, flags);
				return -ENOMEM;
			}
		}

1975
		info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Y
Yu Zhao 已提交
1976 1977
		translation = info ? CONTEXT_TT_DEV_IOTLB :
				     CONTEXT_TT_MULTI_LEVEL;
1978

Y
Yu Zhao 已提交
1979 1980
		context_set_address_root(context, virt_to_phys(pgd));
		context_set_address_width(context, iommu->agaw);
1981 1982 1983 1984 1985 1986 1987
	} else {
		/*
		 * In pass through mode, AW must be programmed to
		 * indicate the largest AGAW value supported by
		 * hardware. And ASR is ignored by hardware.
		 */
		context_set_address_width(context, iommu->msagaw);
Y
Yu Zhao 已提交
1988
	}
F
Fenghua Yu 已提交
1989 1990

	context_set_translation_type(context, translation);
1991 1992
	context_set_fault_enable(context);
	context_set_present(context);
W
Weidong Han 已提交
1993
	domain_flush_cache(domain, context, sizeof(*context));
1994

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
	/*
	 * It's a non-present to present mapping. If hardware doesn't cache
	 * non-present entry we only need to flush the write-buffer. If the
	 * _does_ cache non-present entries, then it does so in the special
	 * domain #0, which we have to flush:
	 */
	if (cap_caching_mode(iommu->cap)) {
		iommu->flush.flush_context(iommu, 0,
					   (((u16)bus) << 8) | devfn,
					   DMA_CCMD_MASK_NOBIT,
					   DMA_CCMD_DEVICE_INVL);
2006
		iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
2007
	} else {
2008
		iommu_flush_write_buffer(iommu);
2009
	}
Y
Yu Zhao 已提交
2010
	iommu_enable_dev_iotlb(info);
2011
	spin_unlock_irqrestore(&iommu->lock, flags);
2012

2013 2014
	domain_attach_iommu(domain, iommu);

2015 2016 2017
	return 0;
}

2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
struct domain_context_mapping_data {
	struct dmar_domain *domain;
	struct intel_iommu *iommu;
};

static int domain_context_mapping_cb(struct pci_dev *pdev,
				     u16 alias, void *opaque)
{
	struct domain_context_mapping_data *data = opaque;

	return domain_context_mapping_one(data->domain, data->iommu,
2029
					  PCI_BUS_NUM(alias), alias & 0xff);
2030 2031
}

2032
static int
2033
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2034
{
2035
	struct intel_iommu *iommu;
2036
	u8 bus, devfn;
2037
	struct domain_context_mapping_data data;
2038

2039
	iommu = device_to_iommu(dev, &bus, &devfn);
2040 2041
	if (!iommu)
		return -ENODEV;
2042

2043
	if (!dev_is_pci(dev))
2044
		return domain_context_mapping_one(domain, iommu, bus, devfn);
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058

	data.domain = domain;
	data.iommu = iommu;

	return pci_for_each_dma_alias(to_pci_dev(dev),
				      &domain_context_mapping_cb, &data);
}

static int domain_context_mapped_cb(struct pci_dev *pdev,
				    u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2059 2060
}

2061
static int domain_context_mapped(struct device *dev)
2062
{
W
Weidong Han 已提交
2063
	struct intel_iommu *iommu;
2064
	u8 bus, devfn;
W
Weidong Han 已提交
2065

2066
	iommu = device_to_iommu(dev, &bus, &devfn);
W
Weidong Han 已提交
2067 2068
	if (!iommu)
		return -ENODEV;
2069

2070 2071
	if (!dev_is_pci(dev))
		return device_context_mapped(iommu, bus, devfn);
2072

2073 2074
	return !pci_for_each_dma_alias(to_pci_dev(dev),
				       domain_context_mapped_cb, iommu);
2075 2076
}

2077 2078 2079 2080 2081 2082 2083 2084
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
					    size_t size)
{
	host_addr &= ~PAGE_MASK;
	return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}

2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain,
					  unsigned long iov_pfn,
					  unsigned long phy_pfn,
					  unsigned long pages)
{
	int support, level = 1;
	unsigned long pfnmerge;

	support = domain->iommu_superpage;

	/* To use a large page, the virtual *and* physical addresses
	   must be aligned to 2MiB/1GiB/etc. Lower bits set in either
	   of them will mean we have to use smaller pages. So just
	   merge them and check both at once. */
	pfnmerge = iov_pfn | phy_pfn;

	while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
		pages >>= VTD_STRIDE_SHIFT;
		if (!pages)
			break;
		pfnmerge >>= VTD_STRIDE_SHIFT;
		level++;
		support--;
	}
	return level;
}

2113 2114 2115
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
			    struct scatterlist *sg, unsigned long phys_pfn,
			    unsigned long nr_pages, int prot)
2116 2117
{
	struct dma_pte *first_pte = NULL, *pte = NULL;
2118
	phys_addr_t uninitialized_var(pteval);
2119
	unsigned long sg_res = 0;
2120 2121
	unsigned int largepage_lvl = 0;
	unsigned long lvl_pages = 0;
2122

2123
	BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2124 2125 2126 2127 2128 2129

	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
		return -EINVAL;

	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;

2130 2131
	if (!sg) {
		sg_res = nr_pages;
2132 2133 2134
		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
	}

2135
	while (nr_pages > 0) {
2136 2137
		uint64_t tmp;

2138
		if (!sg_res) {
2139
			sg_res = aligned_nrpages(sg->offset, sg->length);
2140 2141 2142
			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
			sg->dma_length = sg->length;
			pteval = page_to_phys(sg_page(sg)) | prot;
2143
			phys_pfn = pteval >> VTD_PAGE_SHIFT;
2144
		}
2145

2146
		if (!pte) {
2147 2148
			largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);

2149
			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2150 2151
			if (!pte)
				return -ENOMEM;
2152
			/* It is large page*/
2153
			if (largepage_lvl > 1) {
2154
				pteval |= DMA_PTE_LARGE_PAGE;
2155 2156 2157 2158 2159 2160
				lvl_pages = lvl_to_nr_pages(largepage_lvl);
				/*
				 * Ensure that old small page tables are
				 * removed to make room for superpage,
				 * if they exist.
				 */
2161
				dma_pte_free_pagetable(domain, iov_pfn,
2162
						       iov_pfn + lvl_pages - 1);
2163
			} else {
2164
				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2165
			}
2166

2167 2168 2169 2170
		}
		/* We don't need lock here, nobody else
		 * touches the iova range
		 */
2171
		tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2172
		if (tmp) {
2173
			static int dumps = 5;
J
Joerg Roedel 已提交
2174 2175
			pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
				iov_pfn, tmp, (unsigned long long)pteval);
2176 2177 2178 2179 2180 2181
			if (dumps) {
				dumps--;
				debug_dma_dump_mappings(NULL);
			}
			WARN_ON(1);
		}
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204

		lvl_pages = lvl_to_nr_pages(largepage_lvl);

		BUG_ON(nr_pages < lvl_pages);
		BUG_ON(sg_res < lvl_pages);

		nr_pages -= lvl_pages;
		iov_pfn += lvl_pages;
		phys_pfn += lvl_pages;
		pteval += lvl_pages * VTD_PAGE_SIZE;
		sg_res -= lvl_pages;

		/* If the next PTE would be the first in a new page, then we
		   need to flush the cache on the entries we've just written.
		   And then we'll need to recalculate 'pte', so clear it and
		   let it get set again in the if (!pte) block above.

		   If we're done (!nr_pages) we need to flush the cache too.

		   Also if we've been setting superpages, we may need to
		   recalculate 'pte' and switch back to smaller pages for the
		   end of the mapping, if the trailing size is not enough to
		   use another superpage (i.e. sg_res < lvl_pages). */
2205
		pte++;
2206 2207
		if (!nr_pages || first_pte_in_page(pte) ||
		    (largepage_lvl > 1 && sg_res < lvl_pages)) {
2208 2209 2210 2211
			domain_flush_cache(domain, first_pte,
					   (void *)pte - (void *)first_pte);
			pte = NULL;
		}
2212 2213

		if (!sg_res && nr_pages)
2214 2215 2216 2217 2218
			sg = sg_next(sg);
	}
	return 0;
}

2219 2220 2221
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				    struct scatterlist *sg, unsigned long nr_pages,
				    int prot)
2222
{
2223 2224
	return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
2225

2226 2227 2228 2229 2230
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				     unsigned long phys_pfn, unsigned long nr_pages,
				     int prot)
{
	return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2231 2232
}

2233
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2234
{
2235 2236
	if (!iommu)
		return;
2237 2238 2239

	clear_context_table(iommu, bus, devfn);
	iommu->flush.flush_context(iommu, 0, 0, 0,
2240
					   DMA_CCMD_GLOBAL_INVL);
2241
	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2242 2243
}

2244 2245 2246 2247 2248 2249
static inline void unlink_domain_info(struct device_domain_info *info)
{
	assert_spin_locked(&device_domain_lock);
	list_del(&info->link);
	list_del(&info->global);
	if (info->dev)
2250
		info->dev->archdata.iommu = NULL;
2251 2252
}

2253 2254
static void domain_remove_dev_info(struct dmar_domain *domain)
{
2255
	struct device_domain_info *info, *tmp;
2256

2257 2258
	list_for_each_entry_safe(info, tmp, &domain->devices, link)
		domain_remove_one_dev_info(domain, info->dev);
2259 2260 2261 2262
}

/*
 * find_domain
2263
 * Note: we use struct device->archdata.iommu stores the info
2264
 */
2265
static struct dmar_domain *find_domain(struct device *dev)
2266 2267 2268 2269
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
2270
	info = dev->archdata.iommu;
2271 2272 2273 2274 2275
	if (info)
		return info->domain;
	return NULL;
}

2276
static inline struct device_domain_info *
2277 2278 2279 2280 2281
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
	struct device_domain_info *info;

	list_for_each_entry(info, &device_domain_list, global)
2282
		if (info->iommu->segment == segment && info->bus == bus &&
2283
		    info->devfn == devfn)
2284
			return info;
2285 2286 2287 2288

	return NULL;
}

2289
static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2290
						int bus, int devfn,
2291 2292
						struct device *dev,
						struct dmar_domain *domain)
2293
{
2294
	struct dmar_domain *found = NULL;
2295 2296 2297 2298 2299
	struct device_domain_info *info;
	unsigned long flags;

	info = alloc_devinfo_mem();
	if (!info)
2300
		return NULL;
2301 2302 2303 2304 2305

	info->bus = bus;
	info->devfn = devfn;
	info->dev = dev;
	info->domain = domain;
2306
	info->iommu = iommu;
2307 2308 2309

	spin_lock_irqsave(&device_domain_lock, flags);
	if (dev)
2310
		found = find_domain(dev);
2311 2312
	else {
		struct device_domain_info *info2;
2313
		info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2314 2315 2316
		if (info2)
			found = info2->domain;
	}
2317 2318 2319
	if (found) {
		spin_unlock_irqrestore(&device_domain_lock, flags);
		free_devinfo_mem(info);
2320 2321
		/* Caller must free the original domain */
		return found;
2322 2323
	}

2324 2325 2326 2327 2328 2329 2330
	list_add(&info->link, &domain->devices);
	list_add(&info->global, &device_domain_list);
	if (dev)
		dev->archdata.iommu = info;
	spin_unlock_irqrestore(&device_domain_lock, flags);

	return domain;
2331 2332
}

2333 2334 2335 2336 2337 2338
static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
{
	*(u16 *)opaque = alias;
	return 0;
}

2339
/* domain is initialized */
2340
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2341
{
2342 2343
	struct dmar_domain *domain, *tmp;
	struct intel_iommu *iommu;
2344
	struct device_domain_info *info;
2345
	u16 dma_alias;
2346
	unsigned long flags;
2347
	u8 bus, devfn;
2348

2349
	domain = find_domain(dev);
2350 2351 2352
	if (domain)
		return domain;

2353 2354 2355 2356
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return NULL;

2357 2358
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2359

2360 2361 2362 2363 2364 2365 2366 2367 2368
		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);

		spin_lock_irqsave(&device_domain_lock, flags);
		info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
						      PCI_BUS_NUM(dma_alias),
						      dma_alias & 0xff);
		if (info) {
			iommu = info->iommu;
			domain = info->domain;
2369
		}
2370
		spin_unlock_irqrestore(&device_domain_lock, flags);
2371

2372 2373 2374 2375
		/* DMA alias already has a domain, uses it */
		if (info)
			goto found_domain;
	}
2376

2377
	/* Allocate and initialize new domain for the device */
2378
	domain = alloc_domain(0);
2379
	if (!domain)
2380
		return NULL;
J
Joerg Roedel 已提交
2381
	if (iommu_attach_domain(domain, iommu) < 0) {
2382
		free_domain_mem(domain);
2383
		return NULL;
2384
	}
2385
	domain_attach_iommu(domain, iommu);
2386 2387 2388
	if (domain_init(domain, gaw)) {
		domain_exit(domain);
		return NULL;
2389
	}
2390

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
	/* register PCI DMA alias device */
	if (dev_is_pci(dev)) {
		tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
					   dma_alias & 0xff, NULL, domain);

		if (!tmp || tmp != domain) {
			domain_exit(domain);
			domain = tmp;
		}

2401
		if (!domain)
2402
			return NULL;
2403 2404 2405
	}

found_domain:
2406 2407 2408 2409 2410 2411
	tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);

	if (!tmp || tmp != domain) {
		domain_exit(domain);
		domain = tmp;
	}
2412 2413

	return domain;
2414 2415
}

2416
static int iommu_identity_mapping;
2417 2418 2419
#define IDENTMAP_ALL		1
#define IDENTMAP_GFX		2
#define IDENTMAP_AZALIA		4
2420

2421 2422 2423
static int iommu_domain_identity_map(struct dmar_domain *domain,
				     unsigned long long start,
				     unsigned long long end)
2424
{
2425 2426 2427 2428 2429
	unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
	unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;

	if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
			  dma_to_mm_pfn(last_vpfn))) {
J
Joerg Roedel 已提交
2430
		pr_err("Reserving iova failed\n");
2431
		return -ENOMEM;
2432 2433
	}

J
Joerg Roedel 已提交
2434
	pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2435 2436 2437 2438
	/*
	 * RMRR range might have overlap with physical memory range,
	 * clear it first
	 */
2439
	dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2440

2441 2442
	return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
				  last_vpfn - first_vpfn + 1,
2443
				  DMA_PTE_READ|DMA_PTE_WRITE);
2444 2445
}

2446
static int iommu_prepare_identity_map(struct device *dev,
2447 2448 2449 2450 2451 2452
				      unsigned long long start,
				      unsigned long long end)
{
	struct dmar_domain *domain;
	int ret;

2453
	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2454 2455 2456
	if (!domain)
		return -ENOMEM;

2457 2458 2459 2460 2461
	/* For _hardware_ passthrough, don't bother. But for software
	   passthrough, we do it anyway -- it may indicate a memory
	   range which is reserved in E820, so which didn't get set
	   up to start with in si_domain */
	if (domain == si_domain && hw_pass_through) {
J
Joerg Roedel 已提交
2462 2463
		pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
			dev_name(dev), start, end);
2464 2465 2466
		return 0;
	}

J
Joerg Roedel 已提交
2467 2468 2469
	pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
		dev_name(dev), start, end);

2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
	if (end < start) {
		WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
			"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
			dmi_get_system_info(DMI_BIOS_VENDOR),
			dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		ret = -EIO;
		goto error;
	}

2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
	if (end >> agaw_to_width(domain->agaw)) {
		WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     agaw_to_width(domain->agaw),
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		ret = -EIO;
		goto error;
	}
2490

2491
	ret = iommu_domain_identity_map(domain, start, end);
2492 2493 2494 2495
	if (ret)
		goto error;

	/* context entry init */
2496
	ret = domain_context_mapping(domain, dev);
2497 2498 2499 2500 2501 2502
	if (ret)
		goto error;

	return 0;

 error:
2503 2504 2505 2506 2507
	domain_exit(domain);
	return ret;
}

static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2508
					 struct device *dev)
2509
{
2510
	if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2511
		return 0;
2512 2513
	return iommu_prepare_identity_map(dev, rmrr->base_address,
					  rmrr->end_address);
2514 2515
}

2516
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2517 2518 2519 2520 2521 2522 2523 2524 2525
static inline void iommu_prepare_isa(void)
{
	struct pci_dev *pdev;
	int ret;

	pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
	if (!pdev)
		return;

J
Joerg Roedel 已提交
2526
	pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2527
	ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2528 2529

	if (ret)
J
Joerg Roedel 已提交
2530
		pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2531

2532
	pci_dev_put(pdev);
2533 2534 2535 2536 2537 2538
}
#else
static inline void iommu_prepare_isa(void)
{
	return;
}
2539
#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2540

2541
static int md_domain_init(struct dmar_domain *domain, int guest_width);
2542

2543
static int __init si_domain_init(int hw)
2544
{
2545
	int nid, ret = 0;
2546

2547
	si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2548 2549 2550 2551 2552 2553 2554 2555
	if (!si_domain)
		return -EFAULT;

	if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
		domain_exit(si_domain);
		return -EFAULT;
	}

2556
	pr_debug("Identity mapping domain allocated\n");
2557

2558 2559 2560
	if (hw)
		return 0;

2561
	for_each_online_node(nid) {
2562 2563 2564 2565 2566 2567 2568 2569 2570
		unsigned long start_pfn, end_pfn;
		int i;

		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
			ret = iommu_domain_identity_map(si_domain,
					PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
			if (ret)
				return ret;
		}
2571 2572
	}

2573 2574 2575
	return 0;
}

2576
static int identity_mapping(struct device *dev)
2577 2578 2579 2580 2581 2582
{
	struct device_domain_info *info;

	if (likely(!iommu_identity_mapping))
		return 0;

2583
	info = dev->archdata.iommu;
2584 2585
	if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
		return (info->domain == si_domain);
2586 2587 2588 2589

	return 0;
}

2590
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2591
{
2592
	struct dmar_domain *ndomain;
2593
	struct intel_iommu *iommu;
2594
	u8 bus, devfn;
2595
	int ret;
2596

2597
	iommu = device_to_iommu(dev, &bus, &devfn);
2598 2599 2600
	if (!iommu)
		return -ENODEV;

2601
	ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2602 2603
	if (ndomain != domain)
		return -EBUSY;
2604

2605
	ret = domain_context_mapping(domain, dev);
2606
	if (ret) {
2607
		domain_remove_one_dev_info(domain, dev);
2608 2609 2610
		return ret;
	}

2611 2612 2613
	return 0;
}

2614
static bool device_has_rmrr(struct device *dev)
2615 2616
{
	struct dmar_rmrr_unit *rmrr;
2617
	struct device *tmp;
2618 2619
	int i;

2620
	rcu_read_lock();
2621
	for_each_rmrr_units(rmrr) {
2622 2623 2624 2625 2626 2627
		/*
		 * Return TRUE if this RMRR contains the device that
		 * is passed in.
		 */
		for_each_active_dev_scope(rmrr->devices,
					  rmrr->devices_cnt, i, tmp)
2628
			if (tmp == dev) {
2629
				rcu_read_unlock();
2630
				return true;
2631
			}
2632
	}
2633
	rcu_read_unlock();
2634 2635 2636
	return false;
}

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
/*
 * There are a couple cases where we need to restrict the functionality of
 * devices associated with RMRRs.  The first is when evaluating a device for
 * identity mapping because problems exist when devices are moved in and out
 * of domains and their respective RMRR information is lost.  This means that
 * a device with associated RMRRs will never be in a "passthrough" domain.
 * The second is use of the device through the IOMMU API.  This interface
 * expects to have full control of the IOVA space for the device.  We cannot
 * satisfy both the requirement that RMRR access is maintained and have an
 * unencumbered IOVA space.  We also have no ability to quiesce the device's
 * use of the RMRR space or even inform the IOMMU API user of the restriction.
 * We therefore prevent devices associated with an RMRR from participating in
 * the IOMMU API, which eliminates them from device assignment.
 *
 * In both cases we assume that PCI USB devices with RMRRs have them largely
 * for historical reasons and that the RMRR space is not actively used post
 * boot.  This exclusion may change if vendors begin to abuse it.
2654 2655 2656 2657
 *
 * The same exception is made for graphics devices, with the requirement that
 * any use of the RMRR regions will be torn down before assigning the device
 * to a guest.
2658 2659 2660 2661 2662 2663 2664 2665 2666
 */
static bool device_is_rmrr_locked(struct device *dev)
{
	if (!device_has_rmrr(dev))
		return false;

	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);

2667
		if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2668 2669 2670 2671 2672 2673
			return false;
	}

	return true;
}

2674
static int iommu_should_identity_map(struct device *dev, int startup)
2675
{
2676

2677 2678
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2679

2680
		if (device_is_rmrr_locked(dev))
2681
			return 0;
2682

2683 2684
		if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
			return 1;
2685

2686 2687
		if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
			return 1;
2688

2689
		if (!(iommu_identity_mapping & IDENTMAP_ALL))
2690
			return 0;
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714

		/*
		 * We want to start off with all devices in the 1:1 domain, and
		 * take them out later if we find they can't access all of memory.
		 *
		 * However, we can't do this for PCI devices behind bridges,
		 * because all PCI devices behind the same bridge will end up
		 * with the same source-id on their transactions.
		 *
		 * Practically speaking, we can't change things around for these
		 * devices at run-time, because we can't be sure there'll be no
		 * DMA transactions in flight for any of their siblings.
		 *
		 * So PCI devices (unless they're on the root bus) as well as
		 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
		 * the 1:1 domain, just in _case_ one of their siblings turns out
		 * not to be able to map all of memory.
		 */
		if (!pci_is_pcie(pdev)) {
			if (!pci_is_root_bus(pdev->bus))
				return 0;
			if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
				return 0;
		} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2715
			return 0;
2716 2717 2718 2719
	} else {
		if (device_has_rmrr(dev))
			return 0;
	}
2720

2721
	/*
2722
	 * At boot time, we don't yet know if devices will be 64-bit capable.
2723
	 * Assume that they will — if they turn out not to be, then we can
2724 2725
	 * take them out of the 1:1 domain later.
	 */
2726 2727 2728 2729 2730
	if (!startup) {
		/*
		 * If the device's dma_mask is less than the system's memory
		 * size then this is not a candidate for identity mapping.
		 */
2731
		u64 dma_mask = *dev->dma_mask;
2732

2733 2734 2735
		if (dev->coherent_dma_mask &&
		    dev->coherent_dma_mask < dma_mask)
			dma_mask = dev->coherent_dma_mask;
2736

2737
		return dma_mask >= dma_get_required_mask(dev);
2738
	}
2739 2740 2741 2742

	return 1;
}

2743 2744 2745 2746 2747 2748 2749
static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
{
	int ret;

	if (!iommu_should_identity_map(dev, 1))
		return 0;

2750
	ret = domain_add_dev_info(si_domain, dev);
2751
	if (!ret)
J
Joerg Roedel 已提交
2752 2753
		pr_info("%s identity mapping for device %s\n",
			hw ? "Hardware" : "Software", dev_name(dev));
2754 2755 2756 2757 2758 2759 2760 2761
	else if (ret == -ENODEV)
		/* device not associated with an iommu */
		ret = 0;

	return ret;
}


2762
static int __init iommu_prepare_static_identity_mapping(int hw)
2763 2764
{
	struct pci_dev *pdev = NULL;
2765 2766 2767 2768 2769
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	struct device *dev;
	int i;
	int ret = 0;
2770 2771

	for_each_pci_dev(pdev) {
2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
		ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
		if (ret)
			return ret;
	}

	for_each_active_iommu(iommu, drhd)
		for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
			struct acpi_device_physical_node *pn;
			struct acpi_device *adev;

			if (dev->bus != &acpi_bus_type)
				continue;
2784

2785 2786 2787 2788 2789 2790
			adev= to_acpi_device(dev);
			mutex_lock(&adev->physical_node_lock);
			list_for_each_entry(pn, &adev->physical_node_list, node) {
				ret = dev_prepare_static_identity_mapping(pn->dev, hw);
				if (ret)
					break;
2791
			}
2792 2793 2794
			mutex_unlock(&adev->physical_node_lock);
			if (ret)
				return ret;
2795
		}
2796 2797 2798 2799

	return 0;
}

2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
	/*
	 * Start from the sane iommu hardware state.
	 * If the queued invalidation is already initialized by us
	 * (for example, while enabling interrupt-remapping) then
	 * we got the things already rolling from a sane state.
	 */
	if (!iommu->qi) {
		/*
		 * Clear any previous faults.
		 */
		dmar_fault(-1, iommu);
		/*
		 * Disable queued invalidation if supported and already enabled
		 * before OS handover.
		 */
		dmar_disable_qi(iommu);
	}

	if (dmar_enable_qi(iommu)) {
		/*
		 * Queued Invalidate not enabled, use Register Based Invalidate
		 */
		iommu->flush.flush_context = __iommu_flush_context;
		iommu->flush.flush_iotlb = __iommu_flush_iotlb;
J
Joerg Roedel 已提交
2826
		pr_info("%s: Using Register based invalidation\n",
2827 2828 2829 2830
			iommu->name);
	} else {
		iommu->flush.flush_context = qi_flush_context;
		iommu->flush.flush_iotlb = qi_flush_iotlb;
J
Joerg Roedel 已提交
2831
		pr_info("%s: Using Queued invalidation\n", iommu->name);
2832 2833 2834
	}
}

2835 2836 2837 2838 2839 2840
static int copy_context_table(struct intel_iommu *iommu,
			      struct root_entry *old_re,
			      struct context_entry **tbl,
			      int bus, bool ext)
{
	struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2841
	int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
	phys_addr_t old_ce_phys;

	tbl_idx = ext ? bus * 2 : bus;

	for (devfn = 0; devfn < 256; devfn++) {
		/* First calculate the correct index */
		idx = (ext ? devfn * 2 : devfn) % 256;

		if (idx == 0) {
			/* First save what we may have and clean up */
			if (new_ce) {
				tbl[tbl_idx] = new_ce;
				__iommu_flush_cache(iommu, new_ce,
						    VTD_PAGE_SIZE);
				pos = 1;
			}

			if (old_ce)
				iounmap(old_ce);

			ret = 0;
			if (devfn < 0x80)
				old_ce_phys = root_entry_lctp(old_re);
			else
				old_ce_phys = root_entry_uctp(old_re);

			if (!old_ce_phys) {
				if (ext && devfn == 0) {
					/* No LCTP, try UCTP */
					devfn = 0x7f;
					continue;
				} else {
					goto out;
				}
			}

			ret = -ENOMEM;
			old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
			if (!old_ce)
				goto out;

			new_ce = alloc_pgtable_page(iommu->node);
			if (!new_ce)
				goto out_unmap;

			ret = 0;
		}

		/* Now copy the context entry */
		ce = old_ce[idx];

2893
		if (!__context_present(&ce))
2894 2895
			continue;

2896 2897 2898 2899
		did = context_domain_id(&ce);
		if (did >= 0 && did < cap_ndoms(iommu->cap))
			set_bit(did, iommu->domain_ids);

2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
		/*
		 * We need a marker for copied context entries. This
		 * marker needs to work for the old format as well as
		 * for extended context entries.
		 *
		 * Bit 67 of the context entry is used. In the old
		 * format this bit is available to software, in the
		 * extended format it is the PGE bit, but PGE is ignored
		 * by HW if PASIDs are disabled (and thus still
		 * available).
		 *
		 * So disable PASIDs first and then mark the entry
		 * copied. This means that we don't copy PASID
		 * translations from the old kernel, but this is fine as
		 * faults there are not fatal.
		 */
		context_clear_pasid_enable(&ce);
		context_set_copied(&ce);

2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
		new_ce[idx] = ce;
	}

	tbl[tbl_idx + pos] = new_ce;

	__iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);

out_unmap:
	iounmap(old_ce);

out:
	return ret;
}

static int copy_translation_tables(struct intel_iommu *iommu)
{
	struct context_entry **ctxt_tbls;
	struct root_entry *old_rt;
	phys_addr_t old_rt_phys;
	int ctxt_table_entries;
	unsigned long flags;
	u64 rtaddr_reg;
	int bus, ret;
2942
	bool new_ext, ext;
2943 2944 2945

	rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
	ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
	new_ext    = !!ecap_ecs(iommu->ecap);

	/*
	 * The RTT bit can only be changed when translation is disabled,
	 * but disabling translation means to open a window for data
	 * corruption. So bail out and don't copy anything if we would
	 * have to change the bit.
	 */
	if (new_ext != ext)
		return -EINVAL;
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014

	old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
	if (!old_rt_phys)
		return -EINVAL;

	old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
	if (!old_rt)
		return -ENOMEM;

	/* This is too big for the stack - allocate it from slab */
	ctxt_table_entries = ext ? 512 : 256;
	ret = -ENOMEM;
	ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
	if (!ctxt_tbls)
		goto out_unmap;

	for (bus = 0; bus < 256; bus++) {
		ret = copy_context_table(iommu, &old_rt[bus],
					 ctxt_tbls, bus, ext);
		if (ret) {
			pr_err("%s: Failed to copy context table for bus %d\n",
				iommu->name, bus);
			continue;
		}
	}

	spin_lock_irqsave(&iommu->lock, flags);

	/* Context tables are copied, now write them to the root_entry table */
	for (bus = 0; bus < 256; bus++) {
		int idx = ext ? bus * 2 : bus;
		u64 val;

		if (ctxt_tbls[idx]) {
			val = virt_to_phys(ctxt_tbls[idx]) | 1;
			iommu->root_entry[bus].lo = val;
		}

		if (!ext || !ctxt_tbls[idx + 1])
			continue;

		val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
		iommu->root_entry[bus].hi = val;
	}

	spin_unlock_irqrestore(&iommu->lock, flags);

	kfree(ctxt_tbls);

	__iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);

	ret = 0;

out_unmap:
	iounmap(old_rt);

	return ret;
}

3015
static int __init init_dmars(void)
3016 3017 3018
{
	struct dmar_drhd_unit *drhd;
	struct dmar_rmrr_unit *rmrr;
3019
	bool copied_tables = false;
3020
	struct device *dev;
3021
	struct intel_iommu *iommu;
3022
	int i, ret;
3023

3024 3025 3026 3027 3028 3029 3030
	/*
	 * for each drhd
	 *    allocate root
	 *    initialize and program root entry to not present
	 * endfor
	 */
	for_each_drhd_unit(drhd) {
M
mark gross 已提交
3031 3032 3033 3034 3035
		/*
		 * lock not needed as this is only incremented in the single
		 * threaded kernel __init code path all other access are read
		 * only
		 */
3036
		if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3037 3038 3039
			g_num_of_iommus++;
			continue;
		}
J
Joerg Roedel 已提交
3040
		pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
M
mark gross 已提交
3041 3042
	}

3043 3044 3045 3046
	/* Preallocate enough resources for IOMMU hot-addition */
	if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
		g_num_of_iommus = DMAR_UNITS_SUPPORTED;

W
Weidong Han 已提交
3047 3048 3049
	g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
			GFP_KERNEL);
	if (!g_iommus) {
J
Joerg Roedel 已提交
3050
		pr_err("Allocating global iommu array failed\n");
W
Weidong Han 已提交
3051 3052 3053 3054
		ret = -ENOMEM;
		goto error;
	}

3055 3056 3057
	deferred_flush = kzalloc(g_num_of_iommus *
		sizeof(struct deferred_flush_tables), GFP_KERNEL);
	if (!deferred_flush) {
M
mark gross 已提交
3058
		ret = -ENOMEM;
3059
		goto free_g_iommus;
M
mark gross 已提交
3060 3061
	}

3062
	for_each_active_iommu(iommu, drhd) {
W
Weidong Han 已提交
3063
		g_iommus[iommu->seq_id] = iommu;
3064

3065 3066
		intel_iommu_init_qi(iommu);

3067 3068
		ret = iommu_init_domains(iommu);
		if (ret)
3069
			goto free_iommu;
3070

3071 3072
		init_translation_status(iommu);

3073 3074 3075 3076 3077 3078
		if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
			iommu_disable_translation(iommu);
			clear_translation_pre_enabled(iommu);
			pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
				iommu->name);
		}
3079

3080 3081 3082
		/*
		 * TBD:
		 * we could share the same root & context tables
L
Lucas De Marchi 已提交
3083
		 * among all IOMMU's. Need to Split it later.
3084 3085
		 */
		ret = iommu_alloc_root_entry(iommu);
3086
		if (ret)
3087
			goto free_iommu;
3088

3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
		if (translation_pre_enabled(iommu)) {
			pr_info("Translation already enabled - trying to copy translation structures\n");

			ret = copy_translation_tables(iommu);
			if (ret) {
				/*
				 * We found the IOMMU with translation
				 * enabled - but failed to copy over the
				 * old root-entry table. Try to proceed
				 * by disabling translation now and
				 * allocating a clean root-entry table.
				 * This might cause DMAR faults, but
				 * probably the dump will still succeed.
				 */
				pr_err("Failed to copy translation tables from previous kernel for %s\n",
				       iommu->name);
				iommu_disable_translation(iommu);
				clear_translation_pre_enabled(iommu);
			} else {
				pr_info("Copied translation tables from previous kernel for %s\n",
					iommu->name);
3110
				copied_tables = true;
3111 3112 3113
			}
		}

3114 3115 3116 3117 3118
		iommu_flush_write_buffer(iommu);
		iommu_set_root_entry(iommu);
		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);

F
Fenghua Yu 已提交
3119
		if (!ecap_pass_through(iommu->ecap))
3120
			hw_pass_through = 0;
3121 3122
	}

3123
	if (iommu_pass_through)
3124 3125
		iommu_identity_mapping |= IDENTMAP_ALL;

3126
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3127
	iommu_identity_mapping |= IDENTMAP_GFX;
3128
#endif
3129

3130 3131 3132 3133 3134 3135
	if (iommu_identity_mapping) {
		ret = si_domain_init(hw_pass_through);
		if (ret)
			goto free_iommu;
	}

3136 3137
	check_tylersburg_isoch();

3138 3139 3140 3141 3142 3143 3144 3145 3146
	/*
	 * If we copied translations from a previous kernel in the kdump
	 * case, we can not assign the devices to domains now, as that
	 * would eliminate the old mappings. So skip this part and defer
	 * the assignment to device driver initialization time.
	 */
	if (copied_tables)
		goto domains_done;

3147
	/*
3148 3149 3150
	 * If pass through is not set or not enabled, setup context entries for
	 * identity mappings for rmrr, gfx, and isa and may fall back to static
	 * identity mapping if iommu_identity_mapping is set.
3151
	 */
3152 3153
	if (iommu_identity_mapping) {
		ret = iommu_prepare_static_identity_mapping(hw_pass_through);
F
Fenghua Yu 已提交
3154
		if (ret) {
J
Joerg Roedel 已提交
3155
			pr_crit("Failed to setup IOMMU pass-through\n");
3156
			goto free_iommu;
3157 3158 3159
		}
	}
	/*
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
	 * For each rmrr
	 *   for each dev attached to rmrr
	 *   do
	 *     locate drhd for dev, alloc domain for dev
	 *     allocate free domain
	 *     allocate page table entries for rmrr
	 *     if context not allocated for bus
	 *           allocate and init context
	 *           set present in root table for this bus
	 *     init context with domain, translation etc
	 *    endfor
	 * endfor
3172
	 */
J
Joerg Roedel 已提交
3173
	pr_info("Setting RMRR:\n");
3174
	for_each_rmrr_units(rmrr) {
3175 3176
		/* some BIOS lists non-exist devices in DMAR table. */
		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3177
					  i, dev) {
3178
			ret = iommu_prepare_rmrr_dev(rmrr, dev);
3179
			if (ret)
J
Joerg Roedel 已提交
3180
				pr_err("Mapping reserved region failed\n");
3181
		}
F
Fenghua Yu 已提交
3182
	}
3183

3184 3185
	iommu_prepare_isa();

3186 3187
domains_done:

3188 3189 3190 3191 3192 3193 3194
	/*
	 * for each drhd
	 *   enable fault log
	 *   global invalidate context cache
	 *   global invalidate iotlb
	 *   enable translation
	 */
3195
	for_each_iommu(iommu, drhd) {
3196 3197 3198 3199 3200 3201
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
3202
				iommu_disable_protect_mem_regions(iommu);
3203
			continue;
3204
		}
3205 3206 3207

		iommu_flush_write_buffer(iommu);

3208 3209
		ret = dmar_set_interrupt(iommu);
		if (ret)
3210
			goto free_iommu;
3211

3212 3213 3214
		if (!translation_pre_enabled(iommu))
			iommu_enable_translation(iommu);

3215
		iommu_disable_protect_mem_regions(iommu);
3216 3217 3218
	}

	return 0;
3219 3220

free_iommu:
3221 3222
	for_each_active_iommu(iommu, drhd) {
		disable_dmar_iommu(iommu);
3223
		free_dmar_iommu(iommu);
3224
	}
3225
	kfree(deferred_flush);
3226
free_g_iommus:
W
Weidong Han 已提交
3227
	kfree(g_iommus);
3228
error:
3229 3230 3231
	return ret;
}

3232
/* This takes a number of _MM_ pages, not VTD pages */
3233 3234 3235
static struct iova *intel_alloc_iova(struct device *dev,
				     struct dmar_domain *domain,
				     unsigned long nrpages, uint64_t dma_mask)
3236 3237 3238
{
	struct iova *iova = NULL;

3239 3240 3241 3242
	/* Restrict dma_mask to the width that the iommu can handle */
	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);

	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3243 3244
		/*
		 * First try to allocate an io virtual address in
3245
		 * DMA_BIT_MASK(32) and if that fails then try allocating
J
Joe Perches 已提交
3246
		 * from higher range
3247
		 */
3248 3249 3250 3251 3252 3253 3254
		iova = alloc_iova(&domain->iovad, nrpages,
				  IOVA_PFN(DMA_BIT_MASK(32)), 1);
		if (iova)
			return iova;
	}
	iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
	if (unlikely(!iova)) {
J
Joerg Roedel 已提交
3255
		pr_err("Allocating %ld-page iova for %s failed",
3256
		       nrpages, dev_name(dev));
3257 3258 3259 3260 3261 3262
		return NULL;
	}

	return iova;
}

3263
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3264 3265 3266 3267
{
	struct dmar_domain *domain;
	int ret;

3268
	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3269
	if (!domain) {
J
Joerg Roedel 已提交
3270
		pr_err("Allocating domain for %s failed\n",
3271
		       dev_name(dev));
A
Al Viro 已提交
3272
		return NULL;
3273 3274 3275
	}

	/* make sure context mapping is ok */
3276
	if (unlikely(!domain_context_mapped(dev))) {
3277
		ret = domain_context_mapping(domain, dev);
3278
		if (ret) {
J
Joerg Roedel 已提交
3279
			pr_err("Domain context map for %s failed\n",
3280
			       dev_name(dev));
A
Al Viro 已提交
3281
			return NULL;
3282
		}
3283 3284
	}

3285 3286 3287
	return domain;
}

3288
static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3289 3290 3291 3292
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
3293
	info = dev->archdata.iommu;
3294 3295 3296 3297 3298 3299
	if (likely(info))
		return info->domain;

	return __get_valid_domain_for_dev(dev);
}

3300
/* Check if the dev needs to go through non-identity map and unmap process.*/
3301
static int iommu_no_mapping(struct device *dev)
3302 3303 3304
{
	int found;

3305
	if (iommu_dummy(dev))
3306 3307
		return 1;

3308
	if (!iommu_identity_mapping)
3309
		return 0;
3310

3311
	found = identity_mapping(dev);
3312
	if (found) {
3313
		if (iommu_should_identity_map(dev, 0))
3314 3315 3316 3317 3318 3319
			return 1;
		else {
			/*
			 * 32 bit DMA is removed from si_domain and fall back
			 * to non-identity mapping.
			 */
3320
			domain_remove_one_dev_info(si_domain, dev);
J
Joerg Roedel 已提交
3321 3322
			pr_info("32bit %s uses non-identity mapping\n",
				dev_name(dev));
3323 3324 3325 3326 3327 3328 3329
			return 0;
		}
	} else {
		/*
		 * In case of a detached 64 bit DMA device from vm, the device
		 * is put into si_domain for identity mapping.
		 */
3330
		if (iommu_should_identity_map(dev, 0)) {
3331
			int ret;
3332
			ret = domain_add_dev_info(si_domain, dev);
3333
			if (!ret) {
J
Joerg Roedel 已提交
3334 3335
				pr_info("64bit %s uses identity mapping\n",
					dev_name(dev));
3336 3337 3338 3339 3340
				return 1;
			}
		}
	}

3341
	return 0;
3342 3343
}

3344
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3345
				     size_t size, int dir, u64 dma_mask)
3346 3347
{
	struct dmar_domain *domain;
F
Fenghua Yu 已提交
3348
	phys_addr_t start_paddr;
3349 3350
	struct iova *iova;
	int prot = 0;
I
Ingo Molnar 已提交
3351
	int ret;
3352
	struct intel_iommu *iommu;
3353
	unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3354 3355

	BUG_ON(dir == DMA_NONE);
3356

3357
	if (iommu_no_mapping(dev))
I
Ingo Molnar 已提交
3358
		return paddr;
3359

3360
	domain = get_valid_domain_for_dev(dev);
3361 3362 3363
	if (!domain)
		return 0;

3364
	iommu = domain_get_iommu(domain);
3365
	size = aligned_nrpages(paddr, size);
3366

3367
	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3368 3369 3370
	if (!iova)
		goto error;

3371 3372 3373 3374 3375
	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3376
			!cap_zlr(iommu->cap))
3377 3378 3379 3380
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;
	/*
I
Ingo Molnar 已提交
3381
	 * paddr - (paddr + size) might be partial page, we should map the whole
3382
	 * page.  Note: if two part of one page are separately mapped, we
I
Ingo Molnar 已提交
3383
	 * might have two guest_addr mapping to the same host paddr, but this
3384 3385
	 * is not a big problem
	 */
3386
	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3387
				 mm_to_dma_pfn(paddr_pfn), size, prot);
3388 3389 3390
	if (ret)
		goto error;

3391 3392
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3393 3394 3395
		iommu_flush_iotlb_psi(iommu, domain,
				      mm_to_dma_pfn(iova->pfn_lo),
				      size, 0, 1);
3396
	else
3397
		iommu_flush_write_buffer(iommu);
3398

3399 3400 3401
	start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
	start_paddr += paddr & ~PAGE_MASK;
	return start_paddr;
3402 3403

error:
3404 3405
	if (iova)
		__free_iova(&domain->iovad, iova);
J
Joerg Roedel 已提交
3406
	pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3407
		dev_name(dev), size, (unsigned long long)paddr, dir);
3408 3409 3410
	return 0;
}

3411 3412 3413 3414
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
				 struct dma_attrs *attrs)
3415
{
3416
	return __intel_map_single(dev, page_to_phys(page) + offset, size,
3417
				  dir, *dev->dma_mask);
3418 3419
}

M
mark gross 已提交
3420 3421
static void flush_unmaps(void)
{
3422
	int i, j;
M
mark gross 已提交
3423 3424 3425 3426 3427

	timer_on = 0;

	/* just flush them all */
	for (i = 0; i < g_num_of_iommus; i++) {
3428 3429 3430
		struct intel_iommu *iommu = g_iommus[i];
		if (!iommu)
			continue;
3431

3432 3433 3434
		if (!deferred_flush[i].next)
			continue;

3435 3436 3437
		/* In caching mode, global flushes turn emulation expensive */
		if (!cap_caching_mode(iommu->cap))
			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Y
Yu Zhao 已提交
3438
					 DMA_TLB_GLOBAL_FLUSH);
3439
		for (j = 0; j < deferred_flush[i].next; j++) {
Y
Yu Zhao 已提交
3440 3441
			unsigned long mask;
			struct iova *iova = deferred_flush[i].iova[j];
3442 3443 3444 3445
			struct dmar_domain *domain = deferred_flush[i].domain[j];

			/* On real hardware multiple invalidations are expensive */
			if (cap_caching_mode(iommu->cap))
3446
				iommu_flush_iotlb_psi(iommu, domain,
3447
					iova->pfn_lo, iova_size(iova),
3448
					!deferred_flush[i].freelist[j], 0);
3449
			else {
3450
				mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3451 3452 3453
				iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
						(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
			}
Y
Yu Zhao 已提交
3454
			__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3455 3456
			if (deferred_flush[i].freelist[j])
				dma_free_pagelist(deferred_flush[i].freelist[j]);
3457
		}
3458
		deferred_flush[i].next = 0;
M
mark gross 已提交
3459 3460 3461 3462 3463 3464 3465
	}

	list_size = 0;
}

static void flush_unmaps_timeout(unsigned long data)
{
3466 3467 3468
	unsigned long flags;

	spin_lock_irqsave(&async_umap_flush_lock, flags);
M
mark gross 已提交
3469
	flush_unmaps();
3470
	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
M
mark gross 已提交
3471 3472
}

3473
static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
M
mark gross 已提交
3474 3475
{
	unsigned long flags;
3476
	int next, iommu_id;
3477
	struct intel_iommu *iommu;
M
mark gross 已提交
3478 3479

	spin_lock_irqsave(&async_umap_flush_lock, flags);
3480 3481 3482
	if (list_size == HIGH_WATER_MARK)
		flush_unmaps();

3483 3484
	iommu = domain_get_iommu(dom);
	iommu_id = iommu->seq_id;
3485

3486 3487 3488
	next = deferred_flush[iommu_id].next;
	deferred_flush[iommu_id].domain[next] = dom;
	deferred_flush[iommu_id].iova[next] = iova;
3489
	deferred_flush[iommu_id].freelist[next] = freelist;
3490
	deferred_flush[iommu_id].next++;
M
mark gross 已提交
3491 3492 3493 3494 3495 3496 3497 3498 3499

	if (!timer_on) {
		mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
		timer_on = 1;
	}
	list_size++;
	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}

3500
static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3501
{
3502
	struct dmar_domain *domain;
3503
	unsigned long start_pfn, last_pfn;
3504
	struct iova *iova;
3505
	struct intel_iommu *iommu;
3506
	struct page *freelist;
3507

3508
	if (iommu_no_mapping(dev))
3509
		return;
3510

3511
	domain = find_domain(dev);
3512 3513
	BUG_ON(!domain);

3514 3515
	iommu = domain_get_iommu(domain);

3516
	iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3517 3518
	if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
		      (unsigned long long)dev_addr))
3519 3520
		return;

3521 3522
	start_pfn = mm_to_dma_pfn(iova->pfn_lo);
	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3523

3524
	pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3525
		 dev_name(dev), start_pfn, last_pfn);
3526

3527
	freelist = domain_unmap(domain, start_pfn, last_pfn);
3528

M
mark gross 已提交
3529
	if (intel_iommu_strict) {
3530
		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3531
				      last_pfn - start_pfn + 1, !freelist, 0);
M
mark gross 已提交
3532 3533
		/* free iova */
		__free_iova(&domain->iovad, iova);
3534
		dma_free_pagelist(freelist);
M
mark gross 已提交
3535
	} else {
3536
		add_unmap(domain, iova, freelist);
M
mark gross 已提交
3537 3538 3539 3540 3541
		/*
		 * queue up the release of the unmap to save the 1/6th of the
		 * cpu used up by the iotlb flush operation...
		 */
	}
3542 3543
}

3544 3545 3546 3547 3548 3549 3550
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
			     size_t size, enum dma_data_direction dir,
			     struct dma_attrs *attrs)
{
	intel_unmap(dev, dev_addr);
}

3551
static void *intel_alloc_coherent(struct device *dev, size_t size,
3552 3553
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
3554
{
A
Akinobu Mita 已提交
3555
	struct page *page = NULL;
3556 3557
	int order;

F
Fenghua Yu 已提交
3558
	size = PAGE_ALIGN(size);
3559
	order = get_order(size);
3560

3561
	if (!iommu_no_mapping(dev))
3562
		flags &= ~(GFP_DMA | GFP_DMA32);
3563 3564
	else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
		if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3565 3566 3567 3568
			flags |= GFP_DMA;
		else
			flags |= GFP_DMA32;
	}
3569

A
Akinobu Mita 已提交
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
	if (flags & __GFP_WAIT) {
		unsigned int count = size >> PAGE_SHIFT;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (page && iommu_no_mapping(dev) &&
		    page_to_phys(page) + size > dev->coherent_dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}

	if (!page)
		page = alloc_pages(flags, order);
	if (!page)
3584
		return NULL;
A
Akinobu Mita 已提交
3585
	memset(page_address(page), 0, size);
3586

A
Akinobu Mita 已提交
3587
	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3588
					 DMA_BIDIRECTIONAL,
3589
					 dev->coherent_dma_mask);
3590
	if (*dma_handle)
A
Akinobu Mita 已提交
3591 3592 3593 3594
		return page_address(page);
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);

3595 3596 3597
	return NULL;
}

3598
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3599
				dma_addr_t dma_handle, struct dma_attrs *attrs)
3600 3601
{
	int order;
A
Akinobu Mita 已提交
3602
	struct page *page = virt_to_page(vaddr);
3603

F
Fenghua Yu 已提交
3604
	size = PAGE_ALIGN(size);
3605 3606
	order = get_order(size);

3607
	intel_unmap(dev, dma_handle);
A
Akinobu Mita 已提交
3608 3609
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);
3610 3611
}

3612
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3613 3614
			   int nelems, enum dma_data_direction dir,
			   struct dma_attrs *attrs)
3615
{
3616
	intel_unmap(dev, sglist[0].dma_address);
3617 3618 3619
}

static int intel_nontranslate_map_sg(struct device *hddev,
F
FUJITA Tomonori 已提交
3620
	struct scatterlist *sglist, int nelems, int dir)
3621 3622
{
	int i;
F
FUJITA Tomonori 已提交
3623
	struct scatterlist *sg;
3624

F
FUJITA Tomonori 已提交
3625
	for_each_sg(sglist, sg, nelems, i) {
F
FUJITA Tomonori 已提交
3626
		BUG_ON(!sg_page(sg));
3627
		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
F
FUJITA Tomonori 已提交
3628
		sg->dma_length = sg->length;
3629 3630 3631 3632
	}
	return nelems;
}

3633
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3634
			enum dma_data_direction dir, struct dma_attrs *attrs)
3635 3636 3637
{
	int i;
	struct dmar_domain *domain;
3638 3639 3640 3641
	size_t size = 0;
	int prot = 0;
	struct iova *iova = NULL;
	int ret;
F
FUJITA Tomonori 已提交
3642
	struct scatterlist *sg;
3643
	unsigned long start_vpfn;
3644
	struct intel_iommu *iommu;
3645 3646

	BUG_ON(dir == DMA_NONE);
3647 3648
	if (iommu_no_mapping(dev))
		return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3649

3650
	domain = get_valid_domain_for_dev(dev);
3651 3652 3653
	if (!domain)
		return 0;

3654 3655
	iommu = domain_get_iommu(domain);

3656
	for_each_sg(sglist, sg, nelems, i)
3657
		size += aligned_nrpages(sg->offset, sg->length);
3658

3659 3660
	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
				*dev->dma_mask);
3661
	if (!iova) {
F
FUJITA Tomonori 已提交
3662
		sglist->dma_length = 0;
3663 3664 3665 3666 3667 3668 3669 3670
		return 0;
	}

	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3671
			!cap_zlr(iommu->cap))
3672 3673 3674 3675
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;

3676
	start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3677

3678
	ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3679 3680 3681 3682 3683
	if (unlikely(ret)) {
		dma_pte_free_pagetable(domain, start_vpfn,
				       start_vpfn + size - 1);
		__free_iova(&domain->iovad, iova);
		return 0;
3684 3685
	}

3686 3687
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3688
		iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3689
	else
3690
		iommu_flush_write_buffer(iommu);
3691

3692 3693 3694
	return nelems;
}

3695 3696 3697 3698 3699
static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return !dma_addr;
}

3700
struct dma_map_ops intel_dma_ops = {
3701 3702
	.alloc = intel_alloc_coherent,
	.free = intel_free_coherent,
3703 3704
	.map_sg = intel_map_sg,
	.unmap_sg = intel_unmap_sg,
3705 3706
	.map_page = intel_map_page,
	.unmap_page = intel_unmap_page,
3707
	.mapping_error = intel_mapping_error,
3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
};

static inline int iommu_domain_cache_init(void)
{
	int ret = 0;

	iommu_domain_cache = kmem_cache_create("iommu_domain",
					 sizeof(struct dmar_domain),
					 0,
					 SLAB_HWCACHE_ALIGN,

					 NULL);
	if (!iommu_domain_cache) {
J
Joerg Roedel 已提交
3721
		pr_err("Couldn't create iommu_domain cache\n");
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
		ret = -ENOMEM;
	}

	return ret;
}

static inline int iommu_devinfo_cache_init(void)
{
	int ret = 0;

	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
					 sizeof(struct device_domain_info),
					 0,
					 SLAB_HWCACHE_ALIGN,
					 NULL);
	if (!iommu_devinfo_cache) {
J
Joerg Roedel 已提交
3738
		pr_err("Couldn't create devinfo cache\n");
3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
		ret = -ENOMEM;
	}

	return ret;
}

static int __init iommu_init_mempool(void)
{
	int ret;
	ret = iommu_iova_cache_init();
	if (ret)
		return ret;

	ret = iommu_domain_cache_init();
	if (ret)
		goto domain_error;

	ret = iommu_devinfo_cache_init();
	if (!ret)
		return ret;

	kmem_cache_destroy(iommu_domain_cache);
domain_error:
3762
	iommu_iova_cache_destroy();
3763 3764 3765 3766 3767 3768 3769 3770

	return -ENOMEM;
}

static void __init iommu_exit_mempool(void)
{
	kmem_cache_destroy(iommu_devinfo_cache);
	kmem_cache_destroy(iommu_domain_cache);
3771
	iommu_iova_cache_destroy();
3772 3773
}

3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
	struct dmar_drhd_unit *drhd;
	u32 vtbar;
	int rc;

	/* We know that this device on this chipset has its own IOMMU.
	 * If we find it under a different IOMMU, then the BIOS is lying
	 * to us. Hope that the IOMMU for this device is actually
	 * disabled, and it needs no translation...
	 */
	rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
	if (rc) {
		/* "can't" happen */
		dev_info(&pdev->dev, "failed to run vt-d quirk\n");
		return;
	}
	vtbar &= 0xffff0000;

	/* we know that the this iommu should be at offset 0xa000 from vtbar */
	drhd = dmar_find_matched_drhd_unit(pdev);
	if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
			    TAINT_FIRMWARE_WORKAROUND,
			    "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
		pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);

3802 3803 3804
static void __init init_no_remapping_devices(void)
{
	struct dmar_drhd_unit *drhd;
3805
	struct device *dev;
3806
	int i;
3807 3808 3809

	for_each_drhd_unit(drhd) {
		if (!drhd->include_all) {
3810 3811 3812
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
				break;
3813
			/* ignore DMAR unit if no devices exist */
3814 3815 3816 3817 3818
			if (i == drhd->devices_cnt)
				drhd->ignored = 1;
		}
	}

3819 3820
	for_each_active_drhd_unit(drhd) {
		if (drhd->include_all)
3821 3822
			continue;

3823 3824
		for_each_active_dev_scope(drhd->devices,
					  drhd->devices_cnt, i, dev)
3825
			if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3826 3827 3828 3829
				break;
		if (i < drhd->devices_cnt)
			continue;

3830 3831 3832 3833 3834 3835
		/* This IOMMU has *only* gfx devices. Either bypass it or
		   set the gfx_mapped flag, as appropriate */
		if (dmar_map_gfx) {
			intel_iommu_gfx_mapped = 1;
		} else {
			drhd->ignored = 1;
3836 3837
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
3838
				dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3839 3840 3841 3842
		}
	}
}

3843 3844 3845 3846 3847 3848 3849 3850 3851 3852
#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;

	for_each_active_iommu(iommu, drhd)
		if (iommu->qi)
			dmar_reenable_qi(iommu);

3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
	for_each_iommu(iommu, drhd) {
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
				iommu_disable_protect_mem_regions(iommu);
			continue;
		}
	
3864 3865 3866 3867 3868
		iommu_flush_write_buffer(iommu);

		iommu_set_root_entry(iommu);

		iommu->flush.flush_context(iommu, 0, 0, 0,
3869
					   DMA_CCMD_GLOBAL_INVL);
3870 3871
		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
		iommu_enable_translation(iommu);
3872
		iommu_disable_protect_mem_regions(iommu);
3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884
	}

	return 0;
}

static void iommu_flush_all(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;

	for_each_active_iommu(iommu, drhd) {
		iommu->flush.flush_context(iommu, 0, 0, 0,
3885
					   DMA_CCMD_GLOBAL_INVL);
3886
		iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3887
					 DMA_TLB_GLOBAL_FLUSH);
3888 3889 3890
	}
}

3891
static int iommu_suspend(void)
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	for_each_active_iommu(iommu, drhd) {
		iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
						 GFP_ATOMIC);
		if (!iommu->iommu_state)
			goto nomem;
	}

	iommu_flush_all();

	for_each_active_iommu(iommu, drhd) {
		iommu_disable_translation(iommu);

3909
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
3910 3911 3912 3913 3914 3915 3916 3917 3918 3919

		iommu->iommu_state[SR_DMAR_FECTL_REG] =
			readl(iommu->reg + DMAR_FECTL_REG);
		iommu->iommu_state[SR_DMAR_FEDATA_REG] =
			readl(iommu->reg + DMAR_FEDATA_REG);
		iommu->iommu_state[SR_DMAR_FEADDR_REG] =
			readl(iommu->reg + DMAR_FEADDR_REG);
		iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
			readl(iommu->reg + DMAR_FEUADDR_REG);

3920
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3921 3922 3923 3924 3925 3926 3927 3928 3929 3930
	}
	return 0;

nomem:
	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);

	return -ENOMEM;
}

3931
static void iommu_resume(void)
3932 3933 3934 3935 3936 3937
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	if (init_iommu_hw()) {
3938 3939 3940 3941
		if (force_on)
			panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
		else
			WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3942
		return;
3943 3944 3945 3946
	}

	for_each_active_iommu(iommu, drhd) {

3947
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
3948 3949 3950 3951 3952 3953 3954 3955 3956 3957

		writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
			iommu->reg + DMAR_FECTL_REG);
		writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
			iommu->reg + DMAR_FEDATA_REG);
		writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
			iommu->reg + DMAR_FEADDR_REG);
		writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
			iommu->reg + DMAR_FEUADDR_REG);

3958
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3959 3960 3961 3962 3963 3964
	}

	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);
}

3965
static struct syscore_ops iommu_syscore_ops = {
3966 3967 3968 3969
	.resume		= iommu_resume,
	.suspend	= iommu_suspend,
};

3970
static void __init init_iommu_pm_ops(void)
3971
{
3972
	register_syscore_ops(&iommu_syscore_ops);
3973 3974 3975
}

#else
3976
static inline void init_iommu_pm_ops(void) {}
3977 3978
#endif	/* CONFIG_PM */

3979

3980
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
{
	struct acpi_dmar_reserved_memory *rmrr;
	struct dmar_rmrr_unit *rmrru;

	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
	if (!rmrru)
		return -ENOMEM;

	rmrru->hdr = header;
	rmrr = (struct acpi_dmar_reserved_memory *)header;
	rmrru->base_address = rmrr->base_address;
	rmrru->end_address = rmrr->end_address;
3993 3994 3995 3996 3997 3998 3999
	rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				&rmrru->devices_cnt);
	if (rmrru->devices_cnt && rmrru->devices == NULL) {
		kfree(rmrru);
		return -ENOMEM;
	}
4000

4001
	list_add(&rmrru->list, &dmar_rmrr_units);
4002

4003
	return 0;
4004 4005
}

4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{
	struct dmar_atsr_unit *atsru;
	struct acpi_dmar_atsr *tmp;

	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
		tmp = (struct acpi_dmar_atsr *)atsru->hdr;
		if (atsr->segment != tmp->segment)
			continue;
		if (atsr->header.length != tmp->header.length)
			continue;
		if (memcmp(atsr, tmp, atsr->header.length) == 0)
			return atsru;
	}

	return NULL;
}

int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4025 4026 4027 4028
{
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

4029 4030 4031
	if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
		return 0;

4032
	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4033 4034 4035 4036 4037
	atsru = dmar_find_atsr(atsr);
	if (atsru)
		return 0;

	atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4038 4039 4040
	if (!atsru)
		return -ENOMEM;

4041 4042 4043 4044 4045 4046 4047
	/*
	 * If memory is allocated from slab by ACPI _DSM method, we need to
	 * copy the memory content because the memory buffer will be freed
	 * on return.
	 */
	atsru->hdr = (void *)(atsru + 1);
	memcpy(atsru->hdr, hdr, hdr->length);
4048
	atsru->include_all = atsr->flags & 0x1;
4049 4050 4051 4052 4053 4054 4055 4056 4057
	if (!atsru->include_all) {
		atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
				(void *)atsr + atsr->header.length,
				&atsru->devices_cnt);
		if (atsru->devices_cnt && atsru->devices == NULL) {
			kfree(atsru);
			return -ENOMEM;
		}
	}
4058

4059
	list_add_rcu(&atsru->list, &dmar_atsr_units);
4060 4061 4062 4063

	return 0;
}

4064 4065 4066 4067 4068 4069
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
	dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
	kfree(atsru);
}

4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
	atsru = dmar_find_atsr(atsr);
	if (atsru) {
		list_del_rcu(&atsru->list);
		synchronize_rcu();
		intel_iommu_free_atsr(atsru);
	}

	return 0;
}

int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
	int i;
	struct device *dev;
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
	atsru = dmar_find_atsr(atsr);
	if (!atsru)
		return 0;

	if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
		for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
					  i, dev)
			return -EBUSY;

	return 0;
}

4106 4107 4108 4109 4110 4111 4112 4113 4114
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
	int sp, ret = 0;
	struct intel_iommu *iommu = dmaru->iommu;

	if (g_iommus[iommu->seq_id])
		return 0;

	if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
J
Joerg Roedel 已提交
4115
		pr_warn("%s: Doesn't support hardware pass through.\n",
4116 4117 4118 4119 4120
			iommu->name);
		return -ENXIO;
	}
	if (!ecap_sc_support(iommu->ecap) &&
	    domain_update_iommu_snooping(iommu)) {
J
Joerg Roedel 已提交
4121
		pr_warn("%s: Doesn't support snooping.\n",
4122 4123 4124 4125 4126
			iommu->name);
		return -ENXIO;
	}
	sp = domain_update_iommu_superpage(iommu) - 1;
	if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
J
Joerg Roedel 已提交
4127
		pr_warn("%s: Doesn't support large page.\n",
4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174
			iommu->name);
		return -ENXIO;
	}

	/*
	 * Disable translation if already enabled prior to OS handover.
	 */
	if (iommu->gcmd & DMA_GCMD_TE)
		iommu_disable_translation(iommu);

	g_iommus[iommu->seq_id] = iommu;
	ret = iommu_init_domains(iommu);
	if (ret == 0)
		ret = iommu_alloc_root_entry(iommu);
	if (ret)
		goto out;

	if (dmaru->ignored) {
		/*
		 * we always have to disable PMRs or DMA may fail on this device
		 */
		if (force_on)
			iommu_disable_protect_mem_regions(iommu);
		return 0;
	}

	intel_iommu_init_qi(iommu);
	iommu_flush_write_buffer(iommu);
	ret = dmar_set_interrupt(iommu);
	if (ret)
		goto disable_iommu;

	iommu_set_root_entry(iommu);
	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
	iommu_enable_translation(iommu);

	iommu_disable_protect_mem_regions(iommu);
	return 0;

disable_iommu:
	disable_dmar_iommu(iommu);
out:
	free_dmar_iommu(iommu);
	return ret;
}

4175 4176
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
	int ret = 0;
	struct intel_iommu *iommu = dmaru->iommu;

	if (!intel_iommu_enabled)
		return 0;
	if (iommu == NULL)
		return -EINVAL;

	if (insert) {
		ret = intel_iommu_add(dmaru);
	} else {
		disable_dmar_iommu(iommu);
		free_dmar_iommu(iommu);
	}

	return ret;
4193 4194
}

4195 4196 4197 4198 4199 4200 4201 4202 4203
static void intel_iommu_free_dmars(void)
{
	struct dmar_rmrr_unit *rmrru, *rmrr_n;
	struct dmar_atsr_unit *atsru, *atsr_n;

	list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
		list_del(&rmrru->list);
		dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
		kfree(rmrru);
4204 4205
	}

4206 4207 4208 4209
	list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
		list_del(&atsru->list);
		intel_iommu_free_atsr(atsru);
	}
4210 4211 4212 4213
}

int dmar_find_matched_atsr_unit(struct pci_dev *dev)
{
4214
	int i, ret = 1;
4215
	struct pci_bus *bus;
4216 4217
	struct pci_dev *bridge = NULL;
	struct device *tmp;
4218 4219 4220 4221 4222
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	dev = pci_physfn(dev);
	for (bus = dev->bus; bus; bus = bus->parent) {
4223
		bridge = bus->self;
4224
		if (!bridge || !pci_is_pcie(bridge) ||
4225
		    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4226
			return 0;
4227
		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4228 4229
			break;
	}
4230 4231
	if (!bridge)
		return 0;
4232

4233
	rcu_read_lock();
4234 4235 4236 4237 4238
	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (atsr->segment != pci_domain_nr(dev->bus))
			continue;

4239
		for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4240
			if (tmp == &bridge->dev)
4241
				goto out;
4242 4243

		if (atsru->include_all)
4244
			goto out;
4245
	}
4246 4247
	ret = 0;
out:
4248
	rcu_read_unlock();
4249

4250
	return ret;
4251 4252
}

4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
	int ret = 0;
	struct dmar_rmrr_unit *rmrru;
	struct dmar_atsr_unit *atsru;
	struct acpi_dmar_atsr *atsr;
	struct acpi_dmar_reserved_memory *rmrr;

	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
		return 0;

	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
		rmrr = container_of(rmrru->hdr,
				    struct acpi_dmar_reserved_memory, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				rmrr->segment, rmrru->devices,
				rmrru->devices_cnt);
4272
			if(ret < 0)
4273 4274
				return ret;
		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4275 4276
			dmar_remove_dev_scope(info, rmrr->segment,
				rmrru->devices, rmrru->devices_cnt);
4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
		}
	}

	list_for_each_entry(atsru, &dmar_atsr_units, list) {
		if (atsru->include_all)
			continue;

		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
					(void *)atsr + atsr->header.length,
					atsr->segment, atsru->devices,
					atsru->devices_cnt);
			if (ret > 0)
				break;
			else if(ret < 0)
				return ret;
		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
			if (dmar_remove_dev_scope(info, atsr->segment,
					atsru->devices, atsru->devices_cnt))
				break;
		}
	}

	return 0;
}

F
Fenghua Yu 已提交
4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315
/*
 * Here we only respond to action of unbound device from driver.
 *
 * Added device is not attached to its DMAR domain here yet. That will happen
 * when mapping the device to iova.
 */
static int device_notifier(struct notifier_block *nb,
				  unsigned long action, void *data)
{
	struct device *dev = data;
	struct dmar_domain *domain;

4316
	if (iommu_dummy(dev))
4317 4318
		return 0;

4319
	if (action != BUS_NOTIFY_REMOVED_DEVICE)
4320 4321
		return 0;

4322
	domain = find_domain(dev);
F
Fenghua Yu 已提交
4323 4324 4325
	if (!domain)
		return 0;

4326
	down_read(&dmar_global_lock);
4327
	domain_remove_one_dev_info(domain, dev);
4328
	if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4329
		domain_exit(domain);
4330
	up_read(&dmar_global_lock);
4331

F
Fenghua Yu 已提交
4332 4333 4334 4335 4336 4337 4338
	return 0;
}

static struct notifier_block device_nb = {
	.notifier_call = device_notifier,
};

4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350
static int intel_iommu_memory_notifier(struct notifier_block *nb,
				       unsigned long val, void *v)
{
	struct memory_notify *mhp = v;
	unsigned long long start, end;
	unsigned long start_vpfn, last_vpfn;

	switch (val) {
	case MEM_GOING_ONLINE:
		start = mhp->start_pfn << PAGE_SHIFT;
		end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
		if (iommu_domain_identity_map(si_domain, start, end)) {
J
Joerg Roedel 已提交
4351
			pr_warn("Failed to build identity map for [%llx-%llx]\n",
4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364
				start, end);
			return NOTIFY_BAD;
		}
		break;

	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
		start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
		last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
		while (start_vpfn <= last_vpfn) {
			struct iova *iova;
			struct dmar_drhd_unit *drhd;
			struct intel_iommu *iommu;
4365
			struct page *freelist;
4366 4367 4368

			iova = find_iova(&si_domain->iovad, start_vpfn);
			if (iova == NULL) {
J
Joerg Roedel 已提交
4369
				pr_debug("Failed get IOVA for PFN %lx\n",
4370 4371 4372 4373 4374 4375 4376
					 start_vpfn);
				break;
			}

			iova = split_and_remove_iova(&si_domain->iovad, iova,
						     start_vpfn, last_vpfn);
			if (iova == NULL) {
J
Joerg Roedel 已提交
4377
				pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4378 4379 4380 4381
					start_vpfn, last_vpfn);
				return NOTIFY_BAD;
			}

4382 4383 4384
			freelist = domain_unmap(si_domain, iova->pfn_lo,
					       iova->pfn_hi);

4385 4386
			rcu_read_lock();
			for_each_active_iommu(iommu, drhd)
4387
				iommu_flush_iotlb_psi(iommu, si_domain,
4388
					iova->pfn_lo, iova_size(iova),
4389
					!freelist, 0);
4390
			rcu_read_unlock();
4391
			dma_free_pagelist(freelist);
4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406

			start_vpfn = iova->pfn_hi + 1;
			free_iova_mem(iova);
		}
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block intel_iommu_memory_nb = {
	.notifier_call = intel_iommu_memory_notifier,
	.priority = 0
};

4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445

static ssize_t intel_iommu_show_version(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	u32 ver = readl(iommu->reg + DMAR_VER_REG);
	return sprintf(buf, "%d:%d\n",
		       DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
}
static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);

static ssize_t intel_iommu_show_address(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->reg_phys);
}
static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);

static ssize_t intel_iommu_show_cap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->cap);
}
static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);

static ssize_t intel_iommu_show_ecap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->ecap);
}
static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);

4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464
static ssize_t intel_iommu_show_ndoms(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
}
static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);

static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
						  cap_ndoms(iommu->cap)));
}
static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);

4465 4466 4467 4468 4469
static struct attribute *intel_iommu_attrs[] = {
	&dev_attr_version.attr,
	&dev_attr_address.attr,
	&dev_attr_cap.attr,
	&dev_attr_ecap.attr,
4470 4471
	&dev_attr_domains_supported.attr,
	&dev_attr_domains_used.attr,
4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484
	NULL,
};

static struct attribute_group intel_iommu_group = {
	.name = "intel-iommu",
	.attrs = intel_iommu_attrs,
};

const struct attribute_group *intel_iommu_groups[] = {
	&intel_iommu_group,
	NULL,
};

4485 4486
int __init intel_iommu_init(void)
{
4487
	int ret = -ENODEV;
4488
	struct dmar_drhd_unit *drhd;
4489
	struct intel_iommu *iommu;
4490

4491 4492 4493
	/* VT-d is required for a TXT/tboot launch, so enforce that */
	force_on = tboot_force_iommu();

4494 4495 4496 4497 4498 4499 4500
	if (iommu_init_mempool()) {
		if (force_on)
			panic("tboot: Failed to initialize iommu memory\n");
		return -ENOMEM;
	}

	down_write(&dmar_global_lock);
4501 4502 4503
	if (dmar_table_init()) {
		if (force_on)
			panic("tboot: Failed to initialize DMAR table\n");
4504
		goto out_free_dmar;
4505
	}
4506

4507
	if (dmar_dev_scope_init() < 0) {
4508 4509
		if (force_on)
			panic("tboot: Failed to initialize DMAR device scope\n");
4510
		goto out_free_dmar;
4511
	}
4512

4513
	if (no_iommu || dmar_disabled)
4514
		goto out_free_dmar;
4515

4516
	if (list_empty(&dmar_rmrr_units))
J
Joerg Roedel 已提交
4517
		pr_info("No RMRR found\n");
4518 4519

	if (list_empty(&dmar_atsr_units))
J
Joerg Roedel 已提交
4520
		pr_info("No ATSR found\n");
4521

4522 4523 4524
	if (dmar_init_reserved_ranges()) {
		if (force_on)
			panic("tboot: Failed to reserve iommu ranges\n");
4525
		goto out_free_reserved_range;
4526
	}
4527 4528 4529

	init_no_remapping_devices();

4530
	ret = init_dmars();
4531
	if (ret) {
4532 4533
		if (force_on)
			panic("tboot: Failed to initialize DMARs\n");
J
Joerg Roedel 已提交
4534
		pr_err("Initialization failed\n");
4535
		goto out_free_reserved_range;
4536
	}
4537
	up_write(&dmar_global_lock);
J
Joerg Roedel 已提交
4538
	pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4539

M
mark gross 已提交
4540
	init_timer(&unmap_timer);
4541 4542 4543
#ifdef CONFIG_SWIOTLB
	swiotlb = 0;
#endif
4544
	dma_ops = &intel_dma_ops;
F
Fenghua Yu 已提交
4545

4546
	init_iommu_pm_ops();
4547

4548 4549 4550
	for_each_active_iommu(iommu, drhd)
		iommu->iommu_dev = iommu_device_create(NULL, iommu,
						       intel_iommu_groups,
4551
						       "%s", iommu->name);
4552

4553
	bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
F
Fenghua Yu 已提交
4554
	bus_register_notifier(&pci_bus_type, &device_nb);
4555 4556
	if (si_domain && !hw_pass_through)
		register_memory_notifier(&intel_iommu_memory_nb);
F
Fenghua Yu 已提交
4557

4558 4559
	intel_iommu_enabled = 1;

4560
	return 0;
4561 4562 4563 4564 4565

out_free_reserved_range:
	put_iova_domain(&reserved_iova_list);
out_free_dmar:
	intel_iommu_free_dmars();
4566 4567
	up_write(&dmar_global_lock);
	iommu_exit_mempool();
4568
	return ret;
4569
}
4570

4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584
static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
	return 0;
}

/*
 * NB - intel-iommu lacks any sort of reference counting for the users of
 * dependent devices.  If multiple endpoints have intersecting dependent
 * devices, unbinding the driver from any one of them will possibly leave
 * the others unable to operate.
 */
4585
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4586
					   struct device *dev)
4587
{
4588
	if (!iommu || !dev || !dev_is_pci(dev))
4589 4590
		return;

4591
	pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4592 4593
}

4594
static void domain_remove_one_dev_info(struct dmar_domain *domain,
4595
				       struct device *dev)
4596
{
4597
	struct device_domain_info *info;
4598 4599
	struct intel_iommu *iommu;
	unsigned long flags;
4600
	u8 bus, devfn;
4601

4602
	iommu = device_to_iommu(dev, &bus, &devfn);
4603 4604 4605
	if (!iommu)
		return;

4606
	info = dev->archdata.iommu;
4607

4608 4609
	if (WARN_ON(!info))
		return;
4610

4611 4612
	spin_lock_irqsave(&device_domain_lock, flags);
	unlink_domain_info(info);
4613 4614
	spin_unlock_irqrestore(&device_domain_lock, flags);

4615 4616 4617 4618 4619 4620 4621 4622 4623 4624
	iommu_disable_dev_iotlb(info);
	iommu_detach_dev(iommu, info->bus, info->devfn);
	iommu_detach_dependent_devices(iommu, dev);
	free_devinfo_mem(info);
	domain_detach_iommu(domain, iommu);

	spin_lock_irqsave(&domain->iommu_lock, flags);
	if (!domain->iommu_refcnt[iommu->seq_id])
		iommu_detach_domain(domain, iommu);
	spin_unlock_irqrestore(&domain->iommu_lock, flags);
4625 4626
}

4627
static int md_domain_init(struct dmar_domain *domain, int guest_width)
4628 4629 4630
{
	int adjust_width;

4631 4632
	init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
4633 4634 4635 4636 4637 4638 4639 4640
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	domain->agaw = width_to_agaw(adjust_width);

	domain->iommu_coherency = 0;
4641
	domain->iommu_snooping = 0;
4642
	domain->iommu_superpage = 0;
4643
	domain->max_addr = 0;
4644 4645

	/* always allocate the top pgd */
4646
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4647 4648 4649 4650 4651 4652
	if (!domain->pgd)
		return -ENOMEM;
	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
	return 0;
}

4653
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
K
Kay, Allen M 已提交
4654
{
4655
	struct dmar_domain *dmar_domain;
4656 4657 4658 4659
	struct iommu_domain *domain;

	if (type != IOMMU_DOMAIN_UNMANAGED)
		return NULL;
K
Kay, Allen M 已提交
4660

4661
	dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4662
	if (!dmar_domain) {
J
Joerg Roedel 已提交
4663
		pr_err("Can't allocate dmar_domain\n");
4664
		return NULL;
K
Kay, Allen M 已提交
4665
	}
4666
	if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
J
Joerg Roedel 已提交
4667
		pr_err("Domain initialization failed\n");
4668
		domain_exit(dmar_domain);
4669
		return NULL;
K
Kay, Allen M 已提交
4670
	}
4671
	domain_update_iommu_cap(dmar_domain);
4672

4673
	domain = &dmar_domain->domain;
4674 4675 4676 4677
	domain->geometry.aperture_start = 0;
	domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
	domain->geometry.force_aperture = true;

4678
	return domain;
K
Kay, Allen M 已提交
4679 4680
}

4681
static void intel_iommu_domain_free(struct iommu_domain *domain)
K
Kay, Allen M 已提交
4682
{
4683
	domain_exit(to_dmar_domain(domain));
K
Kay, Allen M 已提交
4684 4685
}

4686 4687
static int intel_iommu_attach_device(struct iommu_domain *domain,
				     struct device *dev)
K
Kay, Allen M 已提交
4688
{
4689
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4690 4691
	struct intel_iommu *iommu;
	int addr_width;
4692
	u8 bus, devfn;
4693

4694 4695 4696 4697 4698
	if (device_is_rmrr_locked(dev)) {
		dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
		return -EPERM;
	}

4699 4700
	/* normally dev is not mapped */
	if (unlikely(domain_context_mapped(dev))) {
4701 4702
		struct dmar_domain *old_domain;

4703
		old_domain = find_domain(dev);
4704
		if (old_domain) {
4705
			if (domain_type_is_vm_or_si(dmar_domain))
4706
				domain_remove_one_dev_info(old_domain, dev);
4707 4708
			else
				domain_remove_dev_info(old_domain);
4709 4710 4711 4712

			if (!domain_type_is_vm_or_si(old_domain) &&
			     list_empty(&old_domain->devices))
				domain_exit(old_domain);
4713 4714 4715
		}
	}

4716
	iommu = device_to_iommu(dev, &bus, &devfn);
4717 4718 4719 4720 4721
	if (!iommu)
		return -ENODEV;

	/* check if this iommu agaw is sufficient for max mapped address */
	addr_width = agaw_to_width(iommu->agaw);
4722 4723 4724 4725
	if (addr_width > cap_mgaw(iommu->cap))
		addr_width = cap_mgaw(iommu->cap);

	if (dmar_domain->max_addr > (1LL << addr_width)) {
J
Joerg Roedel 已提交
4726
		pr_err("%s: iommu width (%d) is not "
4727
		       "sufficient for the mapped address (%llx)\n",
4728
		       __func__, addr_width, dmar_domain->max_addr);
4729 4730
		return -EFAULT;
	}
4731 4732 4733 4734 4735 4736 4737 4738 4739 4740
	dmar_domain->gaw = addr_width;

	/*
	 * Knock out extra levels of page tables if necessary
	 */
	while (iommu->agaw < dmar_domain->agaw) {
		struct dma_pte *pte;

		pte = dmar_domain->pgd;
		if (dma_pte_present(pte)) {
4741 4742
			dmar_domain->pgd = (struct dma_pte *)
				phys_to_virt(dma_pte_addr(pte));
4743
			free_pgtable_page(pte);
4744 4745 4746
		}
		dmar_domain->agaw--;
	}
4747

4748
	return domain_add_dev_info(dmar_domain, dev);
K
Kay, Allen M 已提交
4749 4750
}

4751 4752
static void intel_iommu_detach_device(struct iommu_domain *domain,
				      struct device *dev)
K
Kay, Allen M 已提交
4753
{
4754
	domain_remove_one_dev_info(to_dmar_domain(domain), dev);
4755
}
4756

4757 4758
static int intel_iommu_map(struct iommu_domain *domain,
			   unsigned long iova, phys_addr_t hpa,
4759
			   size_t size, int iommu_prot)
4760
{
4761
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4762
	u64 max_addr;
4763
	int prot = 0;
4764
	int ret;
4765

4766 4767 4768 4769
	if (iommu_prot & IOMMU_READ)
		prot |= DMA_PTE_READ;
	if (iommu_prot & IOMMU_WRITE)
		prot |= DMA_PTE_WRITE;
4770 4771
	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
		prot |= DMA_PTE_SNP;
4772

4773
	max_addr = iova + size;
4774
	if (dmar_domain->max_addr < max_addr) {
4775 4776 4777
		u64 end;

		/* check if minimum agaw is sufficient for mapped address */
4778
		end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4779
		if (end < max_addr) {
J
Joerg Roedel 已提交
4780
			pr_err("%s: iommu width (%d) is not "
4781
			       "sufficient for the mapped address (%llx)\n",
4782
			       __func__, dmar_domain->gaw, max_addr);
4783 4784
			return -EFAULT;
		}
4785
		dmar_domain->max_addr = max_addr;
4786
	}
4787 4788
	/* Round up size to next multiple of PAGE_SIZE, if it and
	   the low bits of hpa would take us onto the next page */
4789
	size = aligned_nrpages(hpa, size);
4790 4791
	ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
				 hpa >> VTD_PAGE_SHIFT, size, prot);
4792
	return ret;
K
Kay, Allen M 已提交
4793 4794
}

4795
static size_t intel_iommu_unmap(struct iommu_domain *domain,
4796
				unsigned long iova, size_t size)
K
Kay, Allen M 已提交
4797
{
4798
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4799 4800 4801 4802
	struct page *freelist = NULL;
	struct intel_iommu *iommu;
	unsigned long start_pfn, last_pfn;
	unsigned int npages;
4803
	int iommu_id, level = 0;
4804 4805 4806 4807 4808 4809 4810 4811

	/* Cope with horrid API which requires us to unmap more than the
	   size argument if it happens to be a large-page mapping. */
	if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
		BUG();

	if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
		size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4812

4813 4814 4815 4816 4817 4818 4819
	start_pfn = iova >> VTD_PAGE_SHIFT;
	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;

	freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);

	npages = last_pfn - start_pfn + 1;

4820
	for_each_domain_iommu(iommu_id, dmar_domain) {
4821 4822
		iommu = g_iommus[iommu_id];

4823 4824
		iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
				      start_pfn, npages, !freelist, 0);
4825 4826 4827
	}

	dma_free_pagelist(freelist);
4828

4829 4830
	if (dmar_domain->max_addr == iova + size)
		dmar_domain->max_addr = iova;
4831

4832
	return size;
K
Kay, Allen M 已提交
4833 4834
}

4835
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4836
					    dma_addr_t iova)
K
Kay, Allen M 已提交
4837
{
4838
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
K
Kay, Allen M 已提交
4839
	struct dma_pte *pte;
4840
	int level = 0;
4841
	u64 phys = 0;
K
Kay, Allen M 已提交
4842

4843
	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
K
Kay, Allen M 已提交
4844
	if (pte)
4845
		phys = dma_pte_addr(pte);
K
Kay, Allen M 已提交
4846

4847
	return phys;
K
Kay, Allen M 已提交
4848
}
4849

4850
static bool intel_iommu_capable(enum iommu_cap cap)
S
Sheng Yang 已提交
4851 4852
{
	if (cap == IOMMU_CAP_CACHE_COHERENCY)
4853
		return domain_update_iommu_snooping(NULL) == 1;
4854
	if (cap == IOMMU_CAP_INTR_REMAP)
4855
		return irq_remapping_enabled == 1;
S
Sheng Yang 已提交
4856

4857
	return false;
S
Sheng Yang 已提交
4858 4859
}

4860 4861
static int intel_iommu_add_device(struct device *dev)
{
4862
	struct intel_iommu *iommu;
4863
	struct iommu_group *group;
4864
	u8 bus, devfn;
4865

4866 4867
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
4868 4869
		return -ENODEV;

4870
	iommu_device_link(iommu->iommu_dev, dev);
4871

4872
	group = iommu_group_get_for_dev(dev);
4873

4874 4875
	if (IS_ERR(group))
		return PTR_ERR(group);
4876

4877
	iommu_group_put(group);
4878
	return 0;
4879
}
4880

4881 4882
static void intel_iommu_remove_device(struct device *dev)
{
4883 4884 4885 4886 4887 4888 4889
	struct intel_iommu *iommu;
	u8 bus, devfn;

	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return;

4890
	iommu_group_remove_device(dev);
4891 4892

	iommu_device_unlink(iommu->iommu_dev, dev);
4893 4894
}

4895
static const struct iommu_ops intel_iommu_ops = {
4896
	.capable	= intel_iommu_capable,
4897 4898
	.domain_alloc	= intel_iommu_domain_alloc,
	.domain_free	= intel_iommu_domain_free,
4899 4900
	.attach_dev	= intel_iommu_attach_device,
	.detach_dev	= intel_iommu_detach_device,
4901 4902
	.map		= intel_iommu_map,
	.unmap		= intel_iommu_unmap,
O
Olav Haugan 已提交
4903
	.map_sg		= default_iommu_map_sg,
4904
	.iova_to_phys	= intel_iommu_iova_to_phys,
4905 4906
	.add_device	= intel_iommu_add_device,
	.remove_device	= intel_iommu_remove_device,
4907
	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
4908
};
4909

4910 4911 4912
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{
	/* G4x/GM45 integrated gfx dmar support is totally busted. */
J
Joerg Roedel 已提交
4913
	pr_info("Disabling IOMMU for graphics on this chipset\n");
4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924
	dmar_map_gfx = 0;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);

4925
static void quirk_iommu_rwbf(struct pci_dev *dev)
4926 4927 4928
{
	/*
	 * Mobile 4 Series Chipset neglects to set RWBF capability,
4929
	 * but needs it. Same seems to hold for the desktop versions.
4930
	 */
J
Joerg Roedel 已提交
4931
	pr_info("Forcing write-buffer flush capability\n");
4932 4933 4934 4935
	rwbf_quirk = 1;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4936 4937 4938 4939 4940 4941
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4942

4943 4944 4945 4946 4947 4948 4949 4950 4951 4952
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK	(0xf << 8)
#define GGC_MEMORY_SIZE_NONE	(0x0 << 8)
#define GGC_MEMORY_SIZE_1M	(0x1 << 8)
#define GGC_MEMORY_SIZE_2M	(0x3 << 8)
#define GGC_MEMORY_VT_ENABLED	(0x8 << 8)
#define GGC_MEMORY_SIZE_2M_VT	(0x9 << 8)
#define GGC_MEMORY_SIZE_3M_VT	(0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT	(0xb << 8)

4953
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4954 4955 4956
{
	unsigned short ggc;

4957
	if (pci_read_config_word(dev, GGC, &ggc))
4958 4959
		return;

4960
	if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
J
Joerg Roedel 已提交
4961
		pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4962
		dmar_map_gfx = 0;
4963 4964
	} else if (dmar_map_gfx) {
		/* we have to ensure the gfx device is idle before we flush */
J
Joerg Roedel 已提交
4965
		pr_info("Disabling batched IOTLB flush on Ironlake\n");
4966 4967
		intel_iommu_strict = 1;
       }
4968 4969 4970 4971 4972 4973
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026
/* On Tylersburg chipsets, some BIOSes have been known to enable the
   ISOCH DMAR unit for the Azalia sound device, but not give it any
   TLB entries, which causes it to deadlock. Check for that.  We do
   this in a function called from init_dmars(), instead of in a PCI
   quirk, because we don't want to print the obnoxious "BIOS broken"
   message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{
	struct pci_dev *pdev;
	uint32_t vtisochctrl;

	/* If there's no Azalia in the system anyway, forget it. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
	if (!pdev)
		return;
	pci_dev_put(pdev);

	/* System Management Registers. Might be hidden, in which case
	   we can't do the sanity check. But that's OK, because the
	   known-broken BIOSes _don't_ actually hide it, so far. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
	if (!pdev)
		return;

	if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
		pci_dev_put(pdev);
		return;
	}

	pci_dev_put(pdev);

	/* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
	if (vtisochctrl & 1)
		return;

	/* Drop all bits other than the number of TLB entries */
	vtisochctrl &= 0x1c;

	/* If we have the recommended number of TLB entries (16), fine. */
	if (vtisochctrl == 0x10)
		return;

	/* Zero TLB entries? You get to ride the short bus to school. */
	if (!vtisochctrl) {
		WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		iommu_identity_mapping |= IDENTMAP_AZALIA;
		return;
	}
J
Joerg Roedel 已提交
5027 5028

	pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5029 5030
	       vtisochctrl);
}