intel-iommu.c 135.0 KB
Newer Older
1
/*
2
 * Copyright © 2006-2014 Intel Corporation.
3 4 5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
13 14 15 16 17
 * Authors: David Woodhouse <dwmw2@infradead.org>,
 *          Ashok Raj <ashok.raj@intel.com>,
 *          Shaohua Li <shaohua.li@intel.com>,
 *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
 *          Fenghua Yu <fenghua.yu@intel.com>
J
Joerg Roedel 已提交
18
 *          Joerg Roedel <jroedel@suse.de>
19 20
 */

J
Joerg Roedel 已提交
21 22
#define pr_fmt(fmt)     "DMAR: " fmt

23 24
#include <linux/init.h>
#include <linux/bitmap.h>
M
mark gross 已提交
25
#include <linux/debugfs.h>
26
#include <linux/export.h>
27 28 29 30 31 32 33 34
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
35
#include <linux/memory.h>
36
#include <linux/cpu.h>
M
mark gross 已提交
37
#include <linux/timer.h>
38
#include <linux/io.h>
K
Kay, Allen M 已提交
39
#include <linux/iova.h>
40
#include <linux/iommu.h>
K
Kay, Allen M 已提交
41
#include <linux/intel-iommu.h>
42
#include <linux/syscore_ops.h>
43
#include <linux/tboot.h>
44
#include <linux/dmi.h>
45
#include <linux/pci-ats.h>
T
Tejun Heo 已提交
46
#include <linux/memblock.h>
A
Akinobu Mita 已提交
47
#include <linux/dma-contiguous.h>
48
#include <linux/crash_dump.h>
49
#include <asm/irq_remapping.h>
50
#include <asm/cacheflush.h>
51
#include <asm/iommu.h>
52

53 54
#include "irq_remapping.h"

F
Fenghua Yu 已提交
55 56 57
#define ROOT_SIZE		VTD_PAGE_SIZE
#define CONTEXT_SIZE		VTD_PAGE_SIZE

58
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
62 63 64 65 66 67 68

#define IOAPIC_RANGE_START	(0xfee00000)
#define IOAPIC_RANGE_END	(0xfeefffff)
#define IOVA_START_ADDR		(0x1000)

#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48

F
Fenghua Yu 已提交
69
#define MAX_AGAW_WIDTH 64
70
#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
F
Fenghua Yu 已提交
71

72 73 74 75 76 77 78 79
#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)

/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw)	((unsigned long) min_t(uint64_t, \
				__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw)	(((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
80

81 82 83
/* IO virtual address start page frame number */
#define IOVA_START_PFN		(1)

84
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
85
#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))
86
#define DMA_64BIT_PFN		IOVA_PFN(DMA_BIT_MASK(64))
M
mark gross 已提交
87

88 89 90 91
/* page table handling */
#define LEVEL_STRIDE		(9)
#define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */
#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)

110 111 112 113 114 115 116
static inline int agaw_to_level(int agaw)
{
	return agaw + 2;
}

static inline int agaw_to_width(int agaw)
{
117
	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 119 120 121
}

static inline int width_to_agaw(int width)
{
122
	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
}

static inline unsigned int level_to_offset_bits(int level)
{
	return (level - 1) * LEVEL_STRIDE;
}

static inline int pfn_level_offset(unsigned long pfn, int level)
{
	return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}

static inline unsigned long level_mask(int level)
{
	return -1UL << level_to_offset_bits(level);
}

static inline unsigned long level_size(int level)
{
	return 1UL << level_to_offset_bits(level);
}

static inline unsigned long align_to_level(unsigned long pfn, int level)
{
	return (pfn + level_size(level) - 1) & level_mask(level);
}
149

150 151
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
152
	return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 154
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
   are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
{
	return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
}

static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
	return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
	return mm_to_dma_pfn(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
	return page_to_dma_pfn(virt_to_page(p));
}

W
Weidong Han 已提交
175 176 177
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;

178
static void __init check_tylersburg_isoch(void);
179 180
static int rwbf_quirk;

181 182 183 184 185 186
/*
 * set to 1 to panic kernel if can't successfully enable VT-d
 * (used when kernel is launched w/ TXT)
 */
static int force_on = 0;

187 188 189 190 191 192 193
/*
 * 0: Present
 * 1-11: Reserved
 * 12-63: Context Ptr (12 - (haw-1))
 * 64-127: Reserved
 */
struct root_entry {
194 195
	u64	lo;
	u64	hi;
196 197 198
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
 * if marked present.
 */
static phys_addr_t root_entry_lctp(struct root_entry *re)
{
	if (!(re->lo & 1))
		return 0;

	return re->lo & VTD_PAGE_MASK;
}

/*
 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
 * if marked present.
 */
static phys_addr_t root_entry_uctp(struct root_entry *re)
{
	if (!(re->hi & 1))
		return 0;
219

220 221
	return re->hi & VTD_PAGE_MASK;
}
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/*
 * low 64 bits:
 * 0: present
 * 1: fault processing disable
 * 2-3: translation type
 * 12-63: address space root
 * high 64 bits:
 * 0-2: address width
 * 3-6: aval
 * 8-23: domain id
 */
struct context_entry {
	u64 lo;
	u64 hi;
};
237

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static inline void context_clear_pasid_enable(struct context_entry *context)
{
	context->lo &= ~(1ULL << 11);
}

static inline bool context_pasid_enabled(struct context_entry *context)
{
	return !!(context->lo & (1ULL << 11));
}

static inline void context_set_copied(struct context_entry *context)
{
	context->hi |= (1ull << 3);
}

static inline bool context_copied(struct context_entry *context)
{
	return !!(context->hi & (1ULL << 3));
}

static inline bool __context_present(struct context_entry *context)
259 260 261
{
	return (context->lo & 1);
}
262 263 264 265 266 267 268 269

static inline bool context_present(struct context_entry *context)
{
	return context_pasid_enabled(context) ?
	     __context_present(context) :
	     __context_present(context) && !context_copied(context);
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static inline void context_set_present(struct context_entry *context)
{
	context->lo |= 1;
}

static inline void context_set_fault_enable(struct context_entry *context)
{
	context->lo &= (((u64)-1) << 2) | 1;
}

static inline void context_set_translation_type(struct context_entry *context,
						unsigned long value)
{
	context->lo &= (((u64)-1) << 4) | 3;
	context->lo |= (value & 3) << 2;
}

static inline void context_set_address_root(struct context_entry *context,
					    unsigned long value)
{
290
	context->lo &= ~VTD_PAGE_MASK;
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	context->lo |= value & VTD_PAGE_MASK;
}

static inline void context_set_address_width(struct context_entry *context,
					     unsigned long value)
{
	context->hi |= value & 7;
}

static inline void context_set_domain_id(struct context_entry *context,
					 unsigned long value)
{
	context->hi |= (value & ((1 << 16) - 1)) << 8;
}

306 307 308 309 310
static inline int context_domain_id(struct context_entry *c)
{
	return((c->hi >> 8) & 0xffff);
}

311 312 313 314 315
static inline void context_clear_entry(struct context_entry *context)
{
	context->lo = 0;
	context->hi = 0;
}
316

317 318 319 320 321
/*
 * 0: readable
 * 1: writable
 * 2-6: reserved
 * 7: super page
322 323
 * 8-10: available
 * 11: snoop behavior
324 325 326 327 328 329
 * 12-63: Host physcial address
 */
struct dma_pte {
	u64 val;
};

330 331 332 333 334 335 336
static inline void dma_clear_pte(struct dma_pte *pte)
{
	pte->val = 0;
}

static inline u64 dma_pte_addr(struct dma_pte *pte)
{
337 338 339 340
#ifdef CONFIG_64BIT
	return pte->val & VTD_PAGE_MASK;
#else
	/* Must have a full atomic 64-bit read */
341
	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
342
#endif
343 344 345 346 347 348
}

static inline bool dma_pte_present(struct dma_pte *pte)
{
	return (pte->val & 3) != 0;
}
349

350 351
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
352
	return (pte->val & DMA_PTE_LARGE_PAGE);
353 354
}

355 356 357 358 359
static inline int first_pte_in_page(struct dma_pte *pte)
{
	return !((unsigned long)pte & ~VTD_PAGE_MASK);
}

360 361 362 363 364 365
/*
 * This domain is a statically identity mapping domain.
 *	1. This domain creats a static 1:1 mapping to all usable memory.
 * 	2. It maps to each iommu if successful.
 *	3. Each iommu mapps to this domain if successful.
 */
366 367
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
368

369 370
/*
 * Domain represents a virtual machine, more than one devices
371 372
 * across iommus may be owned in one domain, e.g. kvm guest.
 */
373
#define DOMAIN_FLAG_VIRTUAL_MACHINE	(1 << 0)
374

375
/* si_domain contains mulitple devices */
376
#define DOMAIN_FLAG_STATIC_IDENTITY	(1 << 1)
377

378 379 380 381
#define for_each_domain_iommu(idx, domain)			\
	for (idx = 0; idx < g_num_of_iommus; idx++)		\
		if (domain->iommu_refcnt[idx])

382
struct dmar_domain {
383
	int	nid;			/* node id */
384 385 386 387

	unsigned	iommu_refcnt[DMAR_UNITS_SUPPORTED];
					/* Refcount of devices per iommu */

388

389 390 391 392
	u16		iommu_did[DMAR_UNITS_SUPPORTED];
					/* Domain ids per IOMMU. Use u16 since
					 * domain ids are 16 bit wide according
					 * to VT-d spec, section 9.3 */
393

394
	bool has_iotlb_device;
395
	struct list_head devices;	/* all devices' list */
396 397 398 399 400 401 402 403
	struct iova_domain iovad;	/* iova's that belong to this domain */

	struct dma_pte	*pgd;		/* virtual address */
	int		gaw;		/* max guest address width */

	/* adjusted guest address width, 0 is level 2 30-bit */
	int		agaw;

W
Weidong Han 已提交
404
	int		flags;		/* flags to find out type of domain */
W
Weidong Han 已提交
405 406

	int		iommu_coherency;/* indicate coherency of iommu access */
407
	int		iommu_snooping; /* indicate snooping control feature*/
408
	int		iommu_count;	/* reference count of iommu */
409 410 411
	int		iommu_superpage;/* Level of superpages supported:
					   0 == 4KiB (no superpages), 1 == 2MiB,
					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
412
	u64		max_addr;	/* maximum mapped address */
413 414 415

	struct iommu_domain domain;	/* generic domain data structure for
					   iommu core */
416 417
};

418 419 420 421
/* PCI domain-device relationship */
struct device_domain_info {
	struct list_head link;	/* link to domain siblings */
	struct list_head global; /* link to global list */
422
	u8 bus;			/* PCI bus number */
423
	u8 devfn;		/* PCI devfn number */
424 425 426 427 428 429 430
	u8 pasid_supported:3;
	u8 pasid_enabled:1;
	u8 pri_supported:1;
	u8 pri_enabled:1;
	u8 ats_supported:1;
	u8 ats_enabled:1;
	u8 ats_qdep;
431
	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Y
Yu Zhao 已提交
432
	struct intel_iommu *iommu; /* IOMMU used by this device */
433 434 435
	struct dmar_domain *domain; /* pointer to domain */
};

436 437 438 439 440
struct dmar_rmrr_unit {
	struct list_head list;		/* list of rmrr units	*/
	struct acpi_dmar_header *hdr;	/* ACPI header		*/
	u64	base_address;		/* reserved base address*/
	u64	end_address;		/* reserved end address */
441
	struct dmar_dev_scope *devices;	/* target devices */
442 443 444 445 446 447
	int	devices_cnt;		/* target device count */
};

struct dmar_atsr_unit {
	struct list_head list;		/* list of ATSR units */
	struct acpi_dmar_header *hdr;	/* ACPI header */
448
	struct dmar_dev_scope *devices;	/* target devices */
449 450 451 452 453 454 455 456 457 458
	int devices_cnt;		/* target device count */
	u8 include_all:1;		/* include all ports */
};

static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);

#define for_each_rmrr_units(rmrr) \
	list_for_each_entry(rmrr, &dmar_rmrr_units, list)

M
mark gross 已提交
459 460
static void flush_unmaps_timeout(unsigned long data);

461
struct deferred_flush_entry {
462
	unsigned long iova_pfn;
463
	unsigned long nrpages;
464 465 466
	struct dmar_domain *domain;
	struct page *freelist;
};
M
mark gross 已提交
467

468
#define HIGH_WATER_MARK 250
469
struct deferred_flush_table {
470
	int next;
471
	struct deferred_flush_entry entries[HIGH_WATER_MARK];
472 473
};

474 475 476 477 478 479
struct deferred_flush_data {
	spinlock_t lock;
	int timer_on;
	struct timer_list timer;
	long size;
	struct deferred_flush_table *tables;
480 481
};

482
DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
483

M
mark gross 已提交
484 485 486
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;

487
static void domain_exit(struct dmar_domain *domain);
488
static void domain_remove_dev_info(struct dmar_domain *domain);
489 490
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
				     struct device *dev);
491
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
492 493
static void domain_context_clear(struct intel_iommu *iommu,
				 struct device *dev);
494 495
static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu);
496

497
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
498 499 500
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
501
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
502

503 504 505
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);

506
static int dmar_map_gfx = 1;
507
static int dmar_forcedac;
M
mark gross 已提交
508
static int intel_iommu_strict;
509
static int intel_iommu_superpage = 1;
510
static int intel_iommu_ecs = 1;
511 512
static int intel_iommu_pasid28;
static int iommu_identity_mapping;
513

514 515 516
#define IDENTMAP_ALL		1
#define IDENTMAP_GFX		2
#define IDENTMAP_AZALIA		4
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
/* Broadwell and Skylake have broken ECS support — normal so-called "second
 * level" translation of DMA requests-without-PASID doesn't actually happen
 * unless you also set the NESTE bit in an extended context-entry. Which of
 * course means that SVM doesn't work because it's trying to do nested
 * translation of the physical addresses it finds in the process page tables,
 * through the IOVA->phys mapping found in the "second level" page tables.
 *
 * The VT-d specification was retroactively changed to change the definition
 * of the capability bits and pretend that Broadwell/Skylake never happened...
 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
 * for some reason it was the PASID capability bit which was redefined (from
 * bit 28 on BDW/SKL to bit 40 in future).
 *
 * So our test for ECS needs to eschew those implementations which set the old
 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
 * Unless we are working around the 'pasid28' limitations, that is, by putting
 * the device into passthrough mode for normal DMA and thus masking the bug.
 */
536
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
537 538 539 540 541
			    (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
/* PASID support is thus enabled if ECS is enabled and *either* of the old
 * or new capability bits are set. */
#define pasid_enabled(iommu) (ecs_enabled(iommu) &&			\
			      (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
542

543 544 545
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);

546 547 548 549
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);

550
static const struct iommu_ops intel_iommu_ops;
551

552 553 554 555 556
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
	return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
}

557 558 559 560 561
static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{
	iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
}

562 563 564 565 566 567 568 569 570
static void init_translation_status(struct intel_iommu *iommu)
{
	u32 gsts;

	gsts = readl(iommu->reg + DMAR_GSTS_REG);
	if (gsts & DMA_GSTS_TES)
		iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}

571 572 573 574 575 576
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
	return container_of(dom, struct dmar_domain, domain);
}

577 578 579 580 581
static int __init intel_iommu_setup(char *str)
{
	if (!str)
		return -EINVAL;
	while (*str) {
582 583
		if (!strncmp(str, "on", 2)) {
			dmar_disabled = 0;
J
Joerg Roedel 已提交
584
			pr_info("IOMMU enabled\n");
585
		} else if (!strncmp(str, "off", 3)) {
586
			dmar_disabled = 1;
J
Joerg Roedel 已提交
587
			pr_info("IOMMU disabled\n");
588 589
		} else if (!strncmp(str, "igfx_off", 8)) {
			dmar_map_gfx = 0;
J
Joerg Roedel 已提交
590
			pr_info("Disable GFX device mapping\n");
591
		} else if (!strncmp(str, "forcedac", 8)) {
J
Joerg Roedel 已提交
592
			pr_info("Forcing DAC for PCI devices\n");
593
			dmar_forcedac = 1;
M
mark gross 已提交
594
		} else if (!strncmp(str, "strict", 6)) {
J
Joerg Roedel 已提交
595
			pr_info("Disable batched IOTLB flush\n");
M
mark gross 已提交
596
			intel_iommu_strict = 1;
597
		} else if (!strncmp(str, "sp_off", 6)) {
J
Joerg Roedel 已提交
598
			pr_info("Disable supported super page\n");
599
			intel_iommu_superpage = 0;
600 601 602 603
		} else if (!strncmp(str, "ecs_off", 7)) {
			printk(KERN_INFO
				"Intel-IOMMU: disable extended context table support\n");
			intel_iommu_ecs = 0;
604 605 606 607 608
		} else if (!strncmp(str, "pasid28", 7)) {
			printk(KERN_INFO
				"Intel-IOMMU: enable pre-production PASID support\n");
			intel_iommu_pasid28 = 1;
			iommu_identity_mapping |= IDENTMAP_GFX;
609 610 611 612 613 614 615 616 617 618 619 620 621
		}

		str += strcspn(str, ",");
		while (*str == ',')
			str++;
	}
	return 0;
}
__setup("intel_iommu=", intel_iommu_setup);

static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;

622 623
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
{
624 625 626 627 628 629 630 631
	struct dmar_domain **domains;
	int idx = did >> 8;

	domains = iommu->domains[idx];
	if (!domains)
		return NULL;

	return domains[did & 0xff];
632 633 634 635 636
}

static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
			     struct dmar_domain *domain)
{
637 638 639 640 641 642 643 644 645 646 647 648 649
	struct dmar_domain **domains;
	int idx = did >> 8;

	if (!iommu->domains[idx]) {
		size_t size = 256 * sizeof(struct dmar_domain *);
		iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
	}

	domains = iommu->domains[idx];
	if (WARN_ON(!domains))
		return;
	else
		domains[did & 0xff] = domain;
650 651
}

652
static inline void *alloc_pgtable_page(int node)
653
{
654 655
	struct page *page;
	void *vaddr = NULL;
656

657 658 659
	page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
	if (page)
		vaddr = page_address(page);
660
	return vaddr;
661 662 663 664 665 666 667 668 669
}

static inline void free_pgtable_page(void *vaddr)
{
	free_page((unsigned long)vaddr);
}

static inline void *alloc_domain_mem(void)
{
670
	return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
671 672
}

K
Kay, Allen M 已提交
673
static void free_domain_mem(void *vaddr)
674 675 676 677 678 679
{
	kmem_cache_free(iommu_domain_cache, vaddr);
}

static inline void * alloc_devinfo_mem(void)
{
680
	return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
681 682 683 684 685 686 687
}

static inline void free_devinfo_mem(void *vaddr)
{
	kmem_cache_free(iommu_devinfo_cache, vaddr);
}

688 689 690 691 692
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
	return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}

693 694 695 696 697
static inline int domain_type_is_si(struct dmar_domain *domain)
{
	return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}

698 699 700 701 702
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
	return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
				DOMAIN_FLAG_STATIC_IDENTITY);
}
W
Weidong Han 已提交
703

704 705 706 707 708 709 710 711
static inline int domain_pfn_supported(struct dmar_domain *domain,
				       unsigned long pfn)
{
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

	return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}

F
Fenghua Yu 已提交
712
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
W
Weidong Han 已提交
713 714 715 716 717
{
	unsigned long sagaw;
	int agaw = -1;

	sagaw = cap_sagaw(iommu->cap);
F
Fenghua Yu 已提交
718
	for (agaw = width_to_agaw(max_gaw);
W
Weidong Han 已提交
719 720 721 722 723 724 725 726
	     agaw >= 0; agaw--) {
		if (test_bit(agaw, &sagaw))
			break;
	}

	return agaw;
}

F
Fenghua Yu 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
/*
 * Calculate max SAGAW for each iommu.
 */
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}

/*
 * calculate agaw for each iommu.
 * "SAGAW" may be different across iommus, use a default agaw, and
 * get a supported less agaw for iommus that don't support the default agaw.
 */
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}

745
/* This functionin only returns single iommu in a domain */
746 747 748 749
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
	int iommu_id;

750
	/* si_domain and vm domain should not get here. */
751
	BUG_ON(domain_type_is_vm_or_si(domain));
752 753 754
	for_each_domain_iommu(iommu_id, domain)
		break;

755 756 757 758 759 760
	if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
		return NULL;

	return g_iommus[iommu_id];
}

W
Weidong Han 已提交
761 762
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
763 764
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
765 766
	bool found = false;
	int i;
767

768
	domain->iommu_coherency = 1;
W
Weidong Han 已提交
769

770
	for_each_domain_iommu(i, domain) {
771
		found = true;
W
Weidong Han 已提交
772 773 774 775 776
		if (!ecap_coherent(g_iommus[i]->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
777 778 779 780 781 782 783 784 785 786 787 788
	if (found)
		return;

	/* No hardware attached; use lowest common denominator */
	rcu_read_lock();
	for_each_active_iommu(iommu, drhd) {
		if (!ecap_coherent(iommu->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
	rcu_read_unlock();
W
Weidong Han 已提交
789 790
}

791
static int domain_update_iommu_snooping(struct intel_iommu *skip)
792
{
793 794 795
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	int ret = 1;
796

797 798 799 800 801 802 803
	rcu_read_lock();
	for_each_active_iommu(iommu, drhd) {
		if (iommu != skip) {
			if (!ecap_sc_support(iommu->ecap)) {
				ret = 0;
				break;
			}
804 805
		}
	}
806 807 808
	rcu_read_unlock();

	return ret;
809 810
}

811
static int domain_update_iommu_superpage(struct intel_iommu *skip)
812
{
813
	struct dmar_drhd_unit *drhd;
814
	struct intel_iommu *iommu;
815
	int mask = 0xf;
816 817

	if (!intel_iommu_superpage) {
818
		return 0;
819 820
	}

821
	/* set iommu_superpage to the smallest common denominator */
822
	rcu_read_lock();
823
	for_each_active_iommu(iommu, drhd) {
824 825 826 827
		if (iommu != skip) {
			mask &= cap_super_page_val(iommu->cap);
			if (!mask)
				break;
828 829
		}
	}
830 831
	rcu_read_unlock();

832
	return fls(mask);
833 834
}

835 836 837 838
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
	domain_update_iommu_coherency(domain);
839 840
	domain->iommu_snooping = domain_update_iommu_snooping(NULL);
	domain->iommu_superpage = domain_update_iommu_superpage(NULL);
841 842
}

843 844 845 846 847 848 849
static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
						       u8 bus, u8 devfn, int alloc)
{
	struct root_entry *root = &iommu->root_entry[bus];
	struct context_entry *context;
	u64 *entry;

850
	entry = &root->lo;
851
	if (ecs_enabled(iommu)) {
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
		if (devfn >= 0x80) {
			devfn -= 0x80;
			entry = &root->hi;
		}
		devfn *= 2;
	}
	if (*entry & 1)
		context = phys_to_virt(*entry & VTD_PAGE_MASK);
	else {
		unsigned long phy_addr;
		if (!alloc)
			return NULL;

		context = alloc_pgtable_page(iommu->node);
		if (!context)
			return NULL;

		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
		phy_addr = virt_to_phys((void *)context);
		*entry = phy_addr | 1;
		__iommu_flush_cache(iommu, entry, sizeof(*entry));
	}
	return &context[devfn];
}

877 878 879 880 881
static int iommu_dummy(struct device *dev)
{
	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}

882
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
883 884
{
	struct dmar_drhd_unit *drhd = NULL;
885
	struct intel_iommu *iommu;
886 887
	struct device *tmp;
	struct pci_dev *ptmp, *pdev = NULL;
888
	u16 segment = 0;
889 890
	int i;

891 892 893
	if (iommu_dummy(dev))
		return NULL;

894 895 896
	if (dev_is_pci(dev)) {
		pdev = to_pci_dev(dev);
		segment = pci_domain_nr(pdev->bus);
897
	} else if (has_acpi_companion(dev))
898 899
		dev = &ACPI_COMPANION(dev)->dev;

900
	rcu_read_lock();
901
	for_each_active_iommu(iommu, drhd) {
902
		if (pdev && segment != drhd->segment)
903
			continue;
904

905
		for_each_active_dev_scope(drhd->devices,
906 907 908 909
					  drhd->devices_cnt, i, tmp) {
			if (tmp == dev) {
				*bus = drhd->devices[i].bus;
				*devfn = drhd->devices[i].devfn;
910
				goto out;
911 912 913 914 915 916 917 918 919 920
			}

			if (!pdev || !dev_is_pci(tmp))
				continue;

			ptmp = to_pci_dev(tmp);
			if (ptmp->subordinate &&
			    ptmp->subordinate->number <= pdev->bus->number &&
			    ptmp->subordinate->busn_res.end >= pdev->bus->number)
				goto got_pdev;
921
		}
922

923 924 925 926
		if (pdev && drhd->include_all) {
		got_pdev:
			*bus = pdev->bus->number;
			*devfn = pdev->devfn;
927
			goto out;
928
		}
929
	}
930
	iommu = NULL;
931
 out:
932
	rcu_read_unlock();
933

934
	return iommu;
935 936
}

W
Weidong Han 已提交
937 938 939 940 941 942 943
static void domain_flush_cache(struct dmar_domain *domain,
			       void *addr, int size)
{
	if (!domain->iommu_coherency)
		clflush_cache_range(addr, size);
}

944 945 946
static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct context_entry *context;
947
	int ret = 0;
948 949 950
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
951 952 953
	context = iommu_context_addr(iommu, bus, devfn, 0);
	if (context)
		ret = context_present(context);
954 955 956 957 958 959 960 961 962 963
	spin_unlock_irqrestore(&iommu->lock, flags);
	return ret;
}

static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct context_entry *context;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
964
	context = iommu_context_addr(iommu, bus, devfn, 0);
965
	if (context) {
966 967
		context_clear_entry(context);
		__iommu_flush_cache(iommu, context, sizeof(*context));
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
	}
	spin_unlock_irqrestore(&iommu->lock, flags);
}

static void free_context_table(struct intel_iommu *iommu)
{
	int i;
	unsigned long flags;
	struct context_entry *context;

	spin_lock_irqsave(&iommu->lock, flags);
	if (!iommu->root_entry) {
		goto out;
	}
	for (i = 0; i < ROOT_ENTRY_NR; i++) {
983
		context = iommu_context_addr(iommu, i, 0, 0);
984 985
		if (context)
			free_pgtable_page(context);
986

987
		if (!ecs_enabled(iommu))
988 989 990 991 992 993
			continue;

		context = iommu_context_addr(iommu, i, 0x80, 0);
		if (context)
			free_pgtable_page(context);

994 995 996 997 998 999 1000
	}
	free_pgtable_page(iommu->root_entry);
	iommu->root_entry = NULL;
out:
	spin_unlock_irqrestore(&iommu->lock, flags);
}

1001
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1002
				      unsigned long pfn, int *target_level)
1003 1004 1005
{
	struct dma_pte *parent, *pte = NULL;
	int level = agaw_to_level(domain->agaw);
1006
	int offset;
1007 1008

	BUG_ON(!domain->pgd);
1009

1010
	if (!domain_pfn_supported(domain, pfn))
1011 1012 1013
		/* Address beyond IOMMU's addressing capabilities. */
		return NULL;

1014 1015
	parent = domain->pgd;

1016
	while (1) {
1017 1018
		void *tmp_page;

1019
		offset = pfn_level_offset(pfn, level);
1020
		pte = &parent[offset];
1021
		if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1022
			break;
1023
		if (level == *target_level)
1024 1025
			break;

1026
		if (!dma_pte_present(pte)) {
1027 1028
			uint64_t pteval;

1029
			tmp_page = alloc_pgtable_page(domain->nid);
1030

1031
			if (!tmp_page)
1032
				return NULL;
1033

1034
			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1035
			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1036
			if (cmpxchg64(&pte->val, 0ULL, pteval))
1037 1038
				/* Someone else set it while we were thinking; use theirs. */
				free_pgtable_page(tmp_page);
1039
			else
1040
				domain_flush_cache(domain, pte, sizeof(*pte));
1041
		}
1042 1043 1044
		if (level == 1)
			break;

1045
		parent = phys_to_virt(dma_pte_addr(pte));
1046 1047 1048
		level--;
	}

1049 1050 1051
	if (!*target_level)
		*target_level = level;

1052 1053 1054
	return pte;
}

1055

1056
/* return address's pte at specific level */
1057 1058
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
					 unsigned long pfn,
1059
					 int level, int *large_page)
1060 1061 1062 1063 1064 1065 1066
{
	struct dma_pte *parent, *pte = NULL;
	int total = agaw_to_level(domain->agaw);
	int offset;

	parent = domain->pgd;
	while (level <= total) {
1067
		offset = pfn_level_offset(pfn, total);
1068 1069 1070 1071
		pte = &parent[offset];
		if (level == total)
			return pte;

1072 1073
		if (!dma_pte_present(pte)) {
			*large_page = total;
1074
			break;
1075 1076
		}

1077
		if (dma_pte_superpage(pte)) {
1078 1079 1080 1081
			*large_page = total;
			return pte;
		}

1082
		parent = phys_to_virt(dma_pte_addr(pte));
1083 1084 1085 1086 1087 1088
		total--;
	}
	return NULL;
}

/* clear last level pte, a tlb flush should be followed */
1089
static void dma_pte_clear_range(struct dmar_domain *domain,
1090 1091
				unsigned long start_pfn,
				unsigned long last_pfn)
1092
{
1093
	unsigned int large_page = 1;
1094
	struct dma_pte *first_pte, *pte;
1095

1096 1097
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1098
	BUG_ON(start_pfn > last_pfn);
1099

1100
	/* we don't need lock here; nobody else touches the iova range */
1101
	do {
1102 1103
		large_page = 1;
		first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1104
		if (!pte) {
1105
			start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1106 1107
			continue;
		}
1108
		do {
1109
			dma_clear_pte(pte);
1110
			start_pfn += lvl_to_nr_pages(large_page);
1111
			pte++;
1112 1113
		} while (start_pfn <= last_pfn && !first_pte_in_page(pte));

1114 1115
		domain_flush_cache(domain, first_pte,
				   (void *)pte - (void *)first_pte);
1116 1117

	} while (start_pfn && start_pfn <= last_pfn);
1118 1119
}

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
static void dma_pte_free_level(struct dmar_domain *domain, int level,
			       struct dma_pte *pte, unsigned long pfn,
			       unsigned long start_pfn, unsigned long last_pfn)
{
	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;
		struct dma_pte *level_pte;

		if (!dma_pte_present(pte) || dma_pte_superpage(pte))
			goto next;

		level_pfn = pfn & level_mask(level - 1);
		level_pte = phys_to_virt(dma_pte_addr(pte));

		if (level > 2)
			dma_pte_free_level(domain, level - 1, level_pte,
					   level_pfn, start_pfn, last_pfn);

		/* If range covers entire pagetable, free it */
		if (!(start_pfn > level_pfn ||
1143
		      last_pfn < level_pfn + level_size(level) - 1)) {
1144 1145 1146 1147 1148 1149 1150 1151 1152
			dma_clear_pte(pte);
			domain_flush_cache(domain, pte, sizeof(*pte));
			free_pgtable_page(level_pte);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}

1153
/* clear last level (leaf) ptes and free page table pages. */
1154
static void dma_pte_free_pagetable(struct dmar_domain *domain,
1155 1156
				   unsigned long start_pfn,
				   unsigned long last_pfn)
1157
{
1158 1159
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1160
	BUG_ON(start_pfn > last_pfn);
1161

1162 1163
	dma_pte_clear_range(domain, start_pfn, last_pfn);

1164
	/* We don't need lock here; nobody else touches the iova range */
1165 1166
	dma_pte_free_level(domain, agaw_to_level(domain->agaw),
			   domain->pgd, 0, start_pfn, last_pfn);
1167

1168
	/* free pgd */
1169
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1170 1171 1172 1173 1174
		free_pgtable_page(domain->pgd);
		domain->pgd = NULL;
	}
}

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
/* When a page at a given level is being unlinked from its parent, we don't
   need to *modify* it at all. All we need to do is make a list of all the
   pages which can be freed just as soon as we've flushed the IOTLB and we
   know the hardware page-walk will no longer touch them.
   The 'pte' argument is the *parent* PTE, pointing to the page that is to
   be freed. */
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
					    int level, struct dma_pte *pte,
					    struct page *freelist)
{
	struct page *pg;

	pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
	pg->freelist = freelist;
	freelist = pg;

	if (level == 1)
		return freelist;

1194 1195
	pte = page_address(pg);
	do {
1196 1197 1198
		if (dma_pte_present(pte) && !dma_pte_superpage(pte))
			freelist = dma_pte_list_pagetables(domain, level - 1,
							   pte, freelist);
1199 1200
		pte++;
	} while (!first_pte_in_page(pte));
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

	return freelist;
}

static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
					struct dma_pte *pte, unsigned long pfn,
					unsigned long start_pfn,
					unsigned long last_pfn,
					struct page *freelist)
{
	struct dma_pte *first_pte = NULL, *last_pte = NULL;

	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;

		if (!dma_pte_present(pte))
			goto next;

		level_pfn = pfn & level_mask(level);

		/* If range covers entire pagetable, free it */
		if (start_pfn <= level_pfn &&
		    last_pfn >= level_pfn + level_size(level) - 1) {
			/* These suborbinate page tables are going away entirely. Don't
			   bother to clear them; we're just going to *free* them. */
			if (level > 1 && !dma_pte_superpage(pte))
				freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);

			dma_clear_pte(pte);
			if (!first_pte)
				first_pte = pte;
			last_pte = pte;
		} else if (level > 1) {
			/* Recurse down into a level that isn't *entirely* obsolete */
			freelist = dma_pte_clear_level(domain, level - 1,
						       phys_to_virt(dma_pte_addr(pte)),
						       level_pfn, start_pfn, last_pfn,
						       freelist);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);

	if (first_pte)
		domain_flush_cache(domain, first_pte,
				   (void *)++last_pte - (void *)first_pte);

	return freelist;
}

/* We can't just free the pages because the IOMMU may still be walking
   the page tables, and may have cached the intermediate levels. The
   pages can only be freed after the IOTLB flush has been done. */
1257 1258 1259
static struct page *domain_unmap(struct dmar_domain *domain,
				 unsigned long start_pfn,
				 unsigned long last_pfn)
1260 1261 1262
{
	struct page *freelist = NULL;

1263 1264
	BUG_ON(!domain_pfn_supported(domain, start_pfn));
	BUG_ON(!domain_pfn_supported(domain, last_pfn));
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
	BUG_ON(start_pfn > last_pfn);

	/* we don't need lock here; nobody else touches the iova range */
	freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
				       domain->pgd, 0, start_pfn, last_pfn, NULL);

	/* free pgd */
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
		struct page *pgd_page = virt_to_page(domain->pgd);
		pgd_page->freelist = freelist;
		freelist = pgd_page;

		domain->pgd = NULL;
	}

	return freelist;
}

1283
static void dma_free_pagelist(struct page *freelist)
1284 1285 1286 1287 1288 1289 1290 1291 1292
{
	struct page *pg;

	while ((pg = freelist)) {
		freelist = pg->freelist;
		free_pgtable_page(page_address(pg));
	}
}

1293 1294 1295 1296 1297 1298
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
	struct root_entry *root;
	unsigned long flags;

1299
	root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1300
	if (!root) {
J
Joerg Roedel 已提交
1301
		pr_err("Allocating root entry for %s failed\n",
1302
			iommu->name);
1303
		return -ENOMEM;
1304
	}
1305

F
Fenghua Yu 已提交
1306
	__iommu_flush_cache(iommu, root, ROOT_SIZE);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316

	spin_lock_irqsave(&iommu->lock, flags);
	iommu->root_entry = root;
	spin_unlock_irqrestore(&iommu->lock, flags);

	return 0;
}

static void iommu_set_root_entry(struct intel_iommu *iommu)
{
1317
	u64 addr;
1318
	u32 sts;
1319 1320
	unsigned long flag;

1321
	addr = virt_to_phys(iommu->root_entry);
1322
	if (ecs_enabled(iommu))
1323
		addr |= DMA_RTADDR_RTT;
1324

1325
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1326
	dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1327

1328
	writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1329 1330 1331

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1332
		      readl, (sts & DMA_GSTS_RTPS), sts);
1333

1334
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1335 1336 1337 1338 1339 1340 1341
}

static void iommu_flush_write_buffer(struct intel_iommu *iommu)
{
	u32 val;
	unsigned long flag;

1342
	if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1343 1344
		return;

1345
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1346
	writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1347 1348 1349

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1350
		      readl, (!(val & DMA_GSTS_WBFS)), val);
1351

1352
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1353 1354 1355
}

/* return value determine if we need a write buffer flush */
1356 1357 1358
static void __iommu_flush_context(struct intel_iommu *iommu,
				  u16 did, u16 source_id, u8 function_mask,
				  u64 type)
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
{
	u64 val = 0;
	unsigned long flag;

	switch (type) {
	case DMA_CCMD_GLOBAL_INVL:
		val = DMA_CCMD_GLOBAL_INVL;
		break;
	case DMA_CCMD_DOMAIN_INVL:
		val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
		break;
	case DMA_CCMD_DEVICE_INVL:
		val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
			| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
		break;
	default:
		BUG();
	}
	val |= DMA_CCMD_ICC;

1379
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1380 1381 1382 1383 1384 1385
	dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
		dmar_readq, (!(val & DMA_CCMD_ICC)), val);

1386
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1387 1388 1389
}

/* return value determine if we need a write buffer flush */
1390 1391
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
				u64 addr, unsigned int size_order, u64 type)
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
{
	int tlb_offset = ecap_iotlb_offset(iommu->ecap);
	u64 val = 0, val_iva = 0;
	unsigned long flag;

	switch (type) {
	case DMA_TLB_GLOBAL_FLUSH:
		/* global flush doesn't need set IVA_REG */
		val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
		break;
	case DMA_TLB_DSI_FLUSH:
		val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
		break;
	case DMA_TLB_PSI_FLUSH:
		val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1407
		/* IH bit is passed in as part of address */
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
		val_iva = size_order | addr;
		break;
	default:
		BUG();
	}
	/* Note: set drain read/write */
#if 0
	/*
	 * This is probably to be super secure.. Looks like we can
	 * ignore it without any impact.
	 */
	if (cap_read_drain(iommu->cap))
		val |= DMA_TLB_READ_DRAIN;
#endif
	if (cap_write_drain(iommu->cap))
		val |= DMA_TLB_WRITE_DRAIN;

1425
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1426 1427 1428 1429 1430 1431 1432 1433 1434
	/* Note: Only uses first TLB reg currently */
	if (val_iva)
		dmar_writeq(iommu->reg + tlb_offset, val_iva);
	dmar_writeq(iommu->reg + tlb_offset + 8, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, tlb_offset + 8,
		dmar_readq, (!(val & DMA_TLB_IVT)), val);

1435
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1436 1437 1438

	/* check IOTLB invalidation granularity */
	if (DMA_TLB_IAIG(val) == 0)
J
Joerg Roedel 已提交
1439
		pr_err("Flush IOTLB failed\n");
1440
	if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
J
Joerg Roedel 已提交
1441
		pr_debug("TLB flush request %Lx, actual %Lx\n",
F
Fenghua Yu 已提交
1442 1443
			(unsigned long long)DMA_TLB_IIRG(type),
			(unsigned long long)DMA_TLB_IAIG(val));
1444 1445
}

1446 1447 1448
static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
			 u8 bus, u8 devfn)
Y
Yu Zhao 已提交
1449 1450 1451
{
	struct device_domain_info *info;

1452 1453
	assert_spin_locked(&device_domain_lock);

Y
Yu Zhao 已提交
1454 1455 1456 1457
	if (!iommu->qi)
		return NULL;

	list_for_each_entry(info, &domain->devices, link)
1458 1459
		if (info->iommu == iommu && info->bus == bus &&
		    info->devfn == devfn) {
1460 1461
			if (info->ats_supported && info->dev)
				return info;
Y
Yu Zhao 已提交
1462 1463 1464
			break;
		}

1465
	return NULL;
Y
Yu Zhao 已提交
1466 1467
}

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
static void domain_update_iotlb(struct dmar_domain *domain)
{
	struct device_domain_info *info;
	bool has_iotlb_device = false;

	assert_spin_locked(&device_domain_lock);

	list_for_each_entry(info, &domain->devices, link) {
		struct pci_dev *pdev;

		if (!info->dev || !dev_is_pci(info->dev))
			continue;

		pdev = to_pci_dev(info->dev);
		if (pdev->ats_enabled) {
			has_iotlb_device = true;
			break;
		}
	}

	domain->has_iotlb_device = has_iotlb_device;
}

Y
Yu Zhao 已提交
1491
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1492
{
1493 1494
	struct pci_dev *pdev;

1495 1496
	assert_spin_locked(&device_domain_lock);

1497
	if (!info || !dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1498 1499
		return;

1500 1501
	pdev = to_pci_dev(info->dev);

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
#ifdef CONFIG_INTEL_IOMMU_SVM
	/* The PCIe spec, in its wisdom, declares that the behaviour of
	   the device if you enable PASID support after ATS support is
	   undefined. So always enable PASID support on devices which
	   have it, even if we can't yet know if we're ever going to
	   use it. */
	if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
		info->pasid_enabled = 1;

	if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
		info->pri_enabled = 1;
#endif
	if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
		info->ats_enabled = 1;
1516
		domain_update_iotlb(info->domain);
1517 1518
		info->ats_qdep = pci_ats_queue_depth(pdev);
	}
Y
Yu Zhao 已提交
1519 1520 1521 1522
}

static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
1523 1524
	struct pci_dev *pdev;

1525 1526
	assert_spin_locked(&device_domain_lock);

1527
	if (!dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1528 1529
		return;

1530 1531 1532 1533 1534
	pdev = to_pci_dev(info->dev);

	if (info->ats_enabled) {
		pci_disable_ats(pdev);
		info->ats_enabled = 0;
1535
		domain_update_iotlb(info->domain);
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
	}
#ifdef CONFIG_INTEL_IOMMU_SVM
	if (info->pri_enabled) {
		pci_disable_pri(pdev);
		info->pri_enabled = 0;
	}
	if (info->pasid_enabled) {
		pci_disable_pasid(pdev);
		info->pasid_enabled = 0;
	}
#endif
Y
Yu Zhao 已提交
1547 1548 1549 1550 1551 1552 1553 1554 1555
}

static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
				  u64 addr, unsigned mask)
{
	u16 sid, qdep;
	unsigned long flags;
	struct device_domain_info *info;

1556 1557 1558
	if (!domain->has_iotlb_device)
		return;

Y
Yu Zhao 已提交
1559 1560
	spin_lock_irqsave(&device_domain_lock, flags);
	list_for_each_entry(info, &domain->devices, link) {
1561
		if (!info->ats_enabled)
Y
Yu Zhao 已提交
1562 1563 1564
			continue;

		sid = info->bus << 8 | info->devfn;
1565
		qdep = info->ats_qdep;
Y
Yu Zhao 已提交
1566 1567 1568 1569 1570
		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
	}
	spin_unlock_irqrestore(&device_domain_lock, flags);
}

1571 1572 1573 1574
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
				  struct dmar_domain *domain,
				  unsigned long pfn, unsigned int pages,
				  int ih, int map)
1575
{
1576
	unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1577
	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1578
	u16 did = domain->iommu_did[iommu->seq_id];
1579 1580 1581

	BUG_ON(pages == 0);

1582 1583
	if (ih)
		ih = 1 << 6;
1584
	/*
1585 1586
	 * Fallback to domain selective flush if no PSI support or the size is
	 * too big.
1587 1588 1589
	 * PSI requires page size to be 2 ^ x, and the base address is naturally
	 * aligned to the size
	 */
1590 1591
	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
		iommu->flush.flush_iotlb(iommu, did, 0, 0,
1592
						DMA_TLB_DSI_FLUSH);
1593
	else
1594
		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1595
						DMA_TLB_PSI_FLUSH);
1596 1597

	/*
1598 1599
	 * In caching mode, changes of pages from non-present to present require
	 * flush. However, device IOTLB doesn't need to be flushed in this case.
1600
	 */
1601
	if (!cap_caching_mode(iommu->cap) || !map)
1602 1603
		iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
				      addr, mask);
1604 1605
}

M
mark gross 已提交
1606 1607 1608 1609 1610
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
	u32 pmen;
	unsigned long flags;

1611
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
M
mark gross 已提交
1612 1613 1614 1615 1616 1617 1618 1619
	pmen = readl(iommu->reg + DMAR_PMEN_REG);
	pmen &= ~DMA_PMEN_EPM;
	writel(pmen, iommu->reg + DMAR_PMEN_REG);

	/* wait for the protected region status bit to clear */
	IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
		readl, !(pmen & DMA_PMEN_PRS), pmen);

1620
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
M
mark gross 已提交
1621 1622
}

1623
static void iommu_enable_translation(struct intel_iommu *iommu)
1624 1625 1626 1627
{
	u32 sts;
	unsigned long flags;

1628
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1629 1630
	iommu->gcmd |= DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1631 1632 1633

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1634
		      readl, (sts & DMA_GSTS_TES), sts);
1635

1636
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1637 1638
}

1639
static void iommu_disable_translation(struct intel_iommu *iommu)
1640 1641 1642 1643
{
	u32 sts;
	unsigned long flag;

1644
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1645 1646 1647 1648 1649
	iommu->gcmd &= ~DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1650
		      readl, (!(sts & DMA_GSTS_TES)), sts);
1651

1652
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1653 1654
}

1655

1656 1657
static int iommu_init_domains(struct intel_iommu *iommu)
{
1658 1659
	u32 ndomains, nlongs;
	size_t size;
1660 1661

	ndomains = cap_ndoms(iommu->cap);
1662
	pr_debug("%s: Number of Domains supported <%d>\n",
J
Joerg Roedel 已提交
1663
		 iommu->name, ndomains);
1664 1665
	nlongs = BITS_TO_LONGS(ndomains);

1666 1667
	spin_lock_init(&iommu->lock);

1668 1669
	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
	if (!iommu->domain_ids) {
J
Joerg Roedel 已提交
1670 1671
		pr_err("%s: Allocating domain id array failed\n",
		       iommu->name);
1672 1673
		return -ENOMEM;
	}
1674

1675
	size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1676 1677 1678 1679 1680 1681 1682 1683
	iommu->domains = kzalloc(size, GFP_KERNEL);

	if (iommu->domains) {
		size = 256 * sizeof(struct dmar_domain *);
		iommu->domains[0] = kzalloc(size, GFP_KERNEL);
	}

	if (!iommu->domains || !iommu->domains[0]) {
J
Joerg Roedel 已提交
1684 1685
		pr_err("%s: Allocating domain array failed\n",
		       iommu->name);
1686
		kfree(iommu->domain_ids);
1687
		kfree(iommu->domains);
1688
		iommu->domain_ids = NULL;
1689
		iommu->domains    = NULL;
1690 1691 1692
		return -ENOMEM;
	}

1693 1694


1695
	/*
1696 1697 1698 1699
	 * If Caching mode is set, then invalid translations are tagged
	 * with domain-id 0, hence we need to pre-allocate it. We also
	 * use domain-id 0 as a marker for non-allocated domain-id, so
	 * make sure it is not used for a real domain.
1700
	 */
1701 1702
	set_bit(0, iommu->domain_ids);

1703 1704 1705
	return 0;
}

1706
static void disable_dmar_iommu(struct intel_iommu *iommu)
1707
{
1708
	struct device_domain_info *info, *tmp;
1709
	unsigned long flags;
1710

1711 1712
	if (!iommu->domains || !iommu->domain_ids)
		return;
1713

1714
	spin_lock_irqsave(&device_domain_lock, flags);
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
	list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
		struct dmar_domain *domain;

		if (info->iommu != iommu)
			continue;

		if (!info->dev || !info->domain)
			continue;

		domain = info->domain;

1726
		dmar_remove_one_dev_info(domain, info->dev);
1727 1728 1729

		if (!domain_type_is_vm_or_si(domain))
			domain_exit(domain);
1730
	}
1731
	spin_unlock_irqrestore(&device_domain_lock, flags);
1732 1733 1734

	if (iommu->gcmd & DMA_GCMD_TE)
		iommu_disable_translation(iommu);
1735
}
1736

1737 1738 1739
static void free_dmar_iommu(struct intel_iommu *iommu)
{
	if ((iommu->domains) && (iommu->domain_ids)) {
1740
		int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1741 1742 1743 1744
		int i;

		for (i = 0; i < elems; i++)
			kfree(iommu->domains[i]);
1745 1746 1747 1748 1749
		kfree(iommu->domains);
		kfree(iommu->domain_ids);
		iommu->domains = NULL;
		iommu->domain_ids = NULL;
	}
1750

W
Weidong Han 已提交
1751 1752
	g_iommus[iommu->seq_id] = NULL;

1753 1754
	/* free context mapping */
	free_context_table(iommu);
1755 1756

#ifdef CONFIG_INTEL_IOMMU_SVM
1757 1758 1759
	if (pasid_enabled(iommu)) {
		if (ecap_prs(iommu->ecap))
			intel_svm_finish_prq(iommu);
1760
		intel_svm_free_pasid_tables(iommu);
1761
	}
1762
#endif
1763 1764
}

1765
static struct dmar_domain *alloc_domain(int flags)
1766 1767 1768 1769 1770 1771 1772
{
	struct dmar_domain *domain;

	domain = alloc_domain_mem();
	if (!domain)
		return NULL;

1773
	memset(domain, 0, sizeof(*domain));
1774
	domain->nid = -1;
1775
	domain->flags = flags;
1776
	domain->has_iotlb_device = false;
1777
	INIT_LIST_HEAD(&domain->devices);
1778 1779 1780 1781

	return domain;
}

1782 1783
/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
1784 1785
			       struct intel_iommu *iommu)
{
1786
	unsigned long ndomains;
1787
	int num;
1788

1789
	assert_spin_locked(&device_domain_lock);
1790
	assert_spin_locked(&iommu->lock);
1791

1792 1793 1794
	domain->iommu_refcnt[iommu->seq_id] += 1;
	domain->iommu_count += 1;
	if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1795
		ndomains = cap_ndoms(iommu->cap);
1796 1797 1798 1799 1800 1801
		num      = find_first_zero_bit(iommu->domain_ids, ndomains);

		if (num >= ndomains) {
			pr_err("%s: No free domain ids\n", iommu->name);
			domain->iommu_refcnt[iommu->seq_id] -= 1;
			domain->iommu_count -= 1;
1802
			return -ENOSPC;
1803
		}
1804

1805 1806 1807 1808 1809
		set_bit(num, iommu->domain_ids);
		set_iommu_domain(iommu, num, domain);

		domain->iommu_did[iommu->seq_id] = num;
		domain->nid			 = iommu->node;
1810 1811 1812

		domain_update_iommu_cap(domain);
	}
1813

1814
	return 0;
1815 1816 1817 1818 1819
}

static int domain_detach_iommu(struct dmar_domain *domain,
			       struct intel_iommu *iommu)
{
1820 1821
	int num, count = INT_MAX;

1822
	assert_spin_locked(&device_domain_lock);
1823
	assert_spin_locked(&iommu->lock);
1824

1825 1826 1827
	domain->iommu_refcnt[iommu->seq_id] -= 1;
	count = --domain->iommu_count;
	if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1828 1829 1830
		num = domain->iommu_did[iommu->seq_id];
		clear_bit(num, iommu->domain_ids);
		set_iommu_domain(iommu, num, NULL);
1831 1832

		domain_update_iommu_cap(domain);
1833
		domain->iommu_did[iommu->seq_id] = 0;
1834 1835 1836 1837 1838
	}

	return count;
}

1839
static struct iova_domain reserved_iova_list;
M
Mark Gross 已提交
1840
static struct lock_class_key reserved_rbtree_key;
1841

1842
static int dmar_init_reserved_ranges(void)
1843 1844 1845 1846 1847
{
	struct pci_dev *pdev = NULL;
	struct iova *iova;
	int i;

1848 1849
	init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
1850

M
Mark Gross 已提交
1851 1852 1853
	lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
		&reserved_rbtree_key);

1854 1855 1856
	/* IOAPIC ranges shouldn't be accessed by DMA */
	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
		IOVA_PFN(IOAPIC_RANGE_END));
1857
	if (!iova) {
J
Joerg Roedel 已提交
1858
		pr_err("Reserve IOAPIC range failed\n");
1859 1860
		return -ENODEV;
	}
1861 1862 1863 1864 1865 1866 1867 1868 1869

	/* Reserve all PCI MMIO to avoid peer-to-peer access */
	for_each_pci_dev(pdev) {
		struct resource *r;

		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
			r = &pdev->resource[i];
			if (!r->flags || !(r->flags & IORESOURCE_MEM))
				continue;
1870 1871 1872
			iova = reserve_iova(&reserved_iova_list,
					    IOVA_PFN(r->start),
					    IOVA_PFN(r->end));
1873
			if (!iova) {
J
Joerg Roedel 已提交
1874
				pr_err("Reserve iova failed\n");
1875 1876
				return -ENODEV;
			}
1877 1878
		}
	}
1879
	return 0;
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
}

static void domain_reserve_special_ranges(struct dmar_domain *domain)
{
	copy_reserved_iova(&reserved_iova_list, &domain->iovad);
}

static inline int guestwidth_to_adjustwidth(int gaw)
{
	int agaw;
	int r = (gaw - 12) % 9;

	if (r == 0)
		agaw = gaw;
	else
		agaw = gaw + 9 - r;
	if (agaw > 64)
		agaw = 64;
	return agaw;
}

1901 1902
static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
		       int guest_width)
1903 1904 1905 1906
{
	int adjust_width, agaw;
	unsigned long sagaw;

1907 1908
	init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
	if (guest_width > cap_mgaw(iommu->cap))
		guest_width = cap_mgaw(iommu->cap);
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	agaw = width_to_agaw(adjust_width);
	sagaw = cap_sagaw(iommu->cap);
	if (!test_bit(agaw, &sagaw)) {
		/* hardware doesn't support it, choose a bigger one */
J
Joerg Roedel 已提交
1920
		pr_debug("Hardware doesn't support agaw %d\n", agaw);
1921 1922 1923 1924 1925 1926
		agaw = find_next_bit(&sagaw, 5, agaw);
		if (agaw >= 5)
			return -ENODEV;
	}
	domain->agaw = agaw;

W
Weidong Han 已提交
1927 1928 1929 1930 1931
	if (ecap_coherent(iommu->ecap))
		domain->iommu_coherency = 1;
	else
		domain->iommu_coherency = 0;

1932 1933 1934 1935 1936
	if (ecap_sc_support(iommu->ecap))
		domain->iommu_snooping = 1;
	else
		domain->iommu_snooping = 0;

1937 1938 1939 1940 1941
	if (intel_iommu_superpage)
		domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
	else
		domain->iommu_superpage = 0;

1942
	domain->nid = iommu->node;
1943

1944
	/* always allocate the top pgd */
1945
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1946 1947
	if (!domain->pgd)
		return -ENOMEM;
F
Fenghua Yu 已提交
1948
	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1949 1950 1951 1952 1953
	return 0;
}

static void domain_exit(struct dmar_domain *domain)
{
1954
	struct page *freelist = NULL;
1955 1956 1957 1958 1959

	/* Domain 0 is reserved, so dont process it */
	if (!domain)
		return;

1960
	/* Flush any lazy unmaps that may reference this domain */
1961 1962 1963 1964 1965 1966
	if (!intel_iommu_strict) {
		int cpu;

		for_each_possible_cpu(cpu)
			flush_unmaps_timeout(cpu);
	}
1967

1968 1969
	/* Remove associated devices and clear attached or cached domains */
	rcu_read_lock();
1970
	domain_remove_dev_info(domain);
1971
	rcu_read_unlock();
1972

1973 1974 1975
	/* destroy iovas */
	put_iova_domain(&domain->iovad);

1976
	freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1977

1978 1979
	dma_free_pagelist(freelist);

1980 1981 1982
	free_domain_mem(domain);
}

1983 1984
static int domain_context_mapping_one(struct dmar_domain *domain,
				      struct intel_iommu *iommu,
1985
				      u8 bus, u8 devfn)
1986
{
1987
	u16 did = domain->iommu_did[iommu->seq_id];
1988 1989
	int translation = CONTEXT_TT_MULTI_LEVEL;
	struct device_domain_info *info = NULL;
1990 1991
	struct context_entry *context;
	unsigned long flags;
1992
	struct dma_pte *pgd;
1993
	int ret, agaw;
1994

1995 1996
	WARN_ON(did == 0);

1997 1998
	if (hw_pass_through && domain_type_is_si(domain))
		translation = CONTEXT_TT_PASS_THROUGH;
1999 2000 2001

	pr_debug("Set context mapping for %02x:%02x.%d\n",
		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
F
Fenghua Yu 已提交
2002

2003
	BUG_ON(!domain->pgd);
W
Weidong Han 已提交
2004

2005 2006 2007 2008
	spin_lock_irqsave(&device_domain_lock, flags);
	spin_lock(&iommu->lock);

	ret = -ENOMEM;
2009
	context = iommu_context_addr(iommu, bus, devfn, 1);
2010
	if (!context)
2011
		goto out_unlock;
2012

2013 2014 2015
	ret = 0;
	if (context_present(context))
		goto out_unlock;
2016

2017 2018
	pgd = domain->pgd;

2019
	context_clear_entry(context);
2020
	context_set_domain_id(context, did);
2021

2022 2023 2024 2025
	/*
	 * Skip top levels of page tables for iommu which has less agaw
	 * than default.  Unnecessary for PT mode.
	 */
Y
Yu Zhao 已提交
2026
	if (translation != CONTEXT_TT_PASS_THROUGH) {
2027
		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2028
			ret = -ENOMEM;
2029
			pgd = phys_to_virt(dma_pte_addr(pgd));
2030 2031
			if (!dma_pte_present(pgd))
				goto out_unlock;
2032
		}
F
Fenghua Yu 已提交
2033

2034
		info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2035 2036 2037 2038
		if (info && info->ats_supported)
			translation = CONTEXT_TT_DEV_IOTLB;
		else
			translation = CONTEXT_TT_MULTI_LEVEL;
2039

Y
Yu Zhao 已提交
2040 2041
		context_set_address_root(context, virt_to_phys(pgd));
		context_set_address_width(context, iommu->agaw);
2042 2043 2044 2045 2046 2047 2048
	} else {
		/*
		 * In pass through mode, AW must be programmed to
		 * indicate the largest AGAW value supported by
		 * hardware. And ASR is ignored by hardware.
		 */
		context_set_address_width(context, iommu->msagaw);
Y
Yu Zhao 已提交
2049
	}
F
Fenghua Yu 已提交
2050 2051

	context_set_translation_type(context, translation);
2052 2053
	context_set_fault_enable(context);
	context_set_present(context);
W
Weidong Han 已提交
2054
	domain_flush_cache(domain, context, sizeof(*context));
2055

2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	/*
	 * It's a non-present to present mapping. If hardware doesn't cache
	 * non-present entry we only need to flush the write-buffer. If the
	 * _does_ cache non-present entries, then it does so in the special
	 * domain #0, which we have to flush:
	 */
	if (cap_caching_mode(iommu->cap)) {
		iommu->flush.flush_context(iommu, 0,
					   (((u16)bus) << 8) | devfn,
					   DMA_CCMD_MASK_NOBIT,
					   DMA_CCMD_DEVICE_INVL);
2067
		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2068
	} else {
2069
		iommu_flush_write_buffer(iommu);
2070
	}
Y
Yu Zhao 已提交
2071
	iommu_enable_dev_iotlb(info);
2072

2073 2074 2075 2076 2077
	ret = 0;

out_unlock:
	spin_unlock(&iommu->lock);
	spin_unlock_irqrestore(&device_domain_lock, flags);
2078

2079
	return ret;
2080 2081
}

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
struct domain_context_mapping_data {
	struct dmar_domain *domain;
	struct intel_iommu *iommu;
};

static int domain_context_mapping_cb(struct pci_dev *pdev,
				     u16 alias, void *opaque)
{
	struct domain_context_mapping_data *data = opaque;

	return domain_context_mapping_one(data->domain, data->iommu,
2093
					  PCI_BUS_NUM(alias), alias & 0xff);
2094 2095
}

2096
static int
2097
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2098
{
2099
	struct intel_iommu *iommu;
2100
	u8 bus, devfn;
2101
	struct domain_context_mapping_data data;
2102

2103
	iommu = device_to_iommu(dev, &bus, &devfn);
2104 2105
	if (!iommu)
		return -ENODEV;
2106

2107
	if (!dev_is_pci(dev))
2108
		return domain_context_mapping_one(domain, iommu, bus, devfn);
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122

	data.domain = domain;
	data.iommu = iommu;

	return pci_for_each_dma_alias(to_pci_dev(dev),
				      &domain_context_mapping_cb, &data);
}

static int domain_context_mapped_cb(struct pci_dev *pdev,
				    u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2123 2124
}

2125
static int domain_context_mapped(struct device *dev)
2126
{
W
Weidong Han 已提交
2127
	struct intel_iommu *iommu;
2128
	u8 bus, devfn;
W
Weidong Han 已提交
2129

2130
	iommu = device_to_iommu(dev, &bus, &devfn);
W
Weidong Han 已提交
2131 2132
	if (!iommu)
		return -ENODEV;
2133

2134 2135
	if (!dev_is_pci(dev))
		return device_context_mapped(iommu, bus, devfn);
2136

2137 2138
	return !pci_for_each_dma_alias(to_pci_dev(dev),
				       domain_context_mapped_cb, iommu);
2139 2140
}

2141 2142 2143 2144 2145 2146 2147 2148
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
					    size_t size)
{
	host_addr &= ~PAGE_MASK;
	return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain,
					  unsigned long iov_pfn,
					  unsigned long phy_pfn,
					  unsigned long pages)
{
	int support, level = 1;
	unsigned long pfnmerge;

	support = domain->iommu_superpage;

	/* To use a large page, the virtual *and* physical addresses
	   must be aligned to 2MiB/1GiB/etc. Lower bits set in either
	   of them will mean we have to use smaller pages. So just
	   merge them and check both at once. */
	pfnmerge = iov_pfn | phy_pfn;

	while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
		pages >>= VTD_STRIDE_SHIFT;
		if (!pages)
			break;
		pfnmerge >>= VTD_STRIDE_SHIFT;
		level++;
		support--;
	}
	return level;
}

2177 2178 2179
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
			    struct scatterlist *sg, unsigned long phys_pfn,
			    unsigned long nr_pages, int prot)
2180 2181
{
	struct dma_pte *first_pte = NULL, *pte = NULL;
2182
	phys_addr_t uninitialized_var(pteval);
2183
	unsigned long sg_res = 0;
2184 2185
	unsigned int largepage_lvl = 0;
	unsigned long lvl_pages = 0;
2186

2187
	BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2188 2189 2190 2191 2192 2193

	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
		return -EINVAL;

	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;

2194 2195
	if (!sg) {
		sg_res = nr_pages;
2196 2197 2198
		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
	}

2199
	while (nr_pages > 0) {
2200 2201
		uint64_t tmp;

2202
		if (!sg_res) {
2203
			sg_res = aligned_nrpages(sg->offset, sg->length);
2204 2205
			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
			sg->dma_length = sg->length;
D
Dan Williams 已提交
2206
			pteval = page_to_phys(sg_page(sg)) | prot;
2207
			phys_pfn = pteval >> VTD_PAGE_SHIFT;
2208
		}
2209

2210
		if (!pte) {
2211 2212
			largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);

2213
			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2214 2215
			if (!pte)
				return -ENOMEM;
2216
			/* It is large page*/
2217
			if (largepage_lvl > 1) {
2218 2219
				unsigned long nr_superpages, end_pfn;

2220
				pteval |= DMA_PTE_LARGE_PAGE;
2221
				lvl_pages = lvl_to_nr_pages(largepage_lvl);
2222 2223 2224 2225

				nr_superpages = sg_res / lvl_pages;
				end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;

2226 2227
				/*
				 * Ensure that old small page tables are
2228
				 * removed to make room for superpage(s).
2229
				 */
2230
				dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2231
			} else {
2232
				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2233
			}
2234

2235 2236 2237 2238
		}
		/* We don't need lock here, nobody else
		 * touches the iova range
		 */
2239
		tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2240
		if (tmp) {
2241
			static int dumps = 5;
J
Joerg Roedel 已提交
2242 2243
			pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
				iov_pfn, tmp, (unsigned long long)pteval);
2244 2245 2246 2247 2248 2249
			if (dumps) {
				dumps--;
				debug_dma_dump_mappings(NULL);
			}
			WARN_ON(1);
		}
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272

		lvl_pages = lvl_to_nr_pages(largepage_lvl);

		BUG_ON(nr_pages < lvl_pages);
		BUG_ON(sg_res < lvl_pages);

		nr_pages -= lvl_pages;
		iov_pfn += lvl_pages;
		phys_pfn += lvl_pages;
		pteval += lvl_pages * VTD_PAGE_SIZE;
		sg_res -= lvl_pages;

		/* If the next PTE would be the first in a new page, then we
		   need to flush the cache on the entries we've just written.
		   And then we'll need to recalculate 'pte', so clear it and
		   let it get set again in the if (!pte) block above.

		   If we're done (!nr_pages) we need to flush the cache too.

		   Also if we've been setting superpages, we may need to
		   recalculate 'pte' and switch back to smaller pages for the
		   end of the mapping, if the trailing size is not enough to
		   use another superpage (i.e. sg_res < lvl_pages). */
2273
		pte++;
2274 2275
		if (!nr_pages || first_pte_in_page(pte) ||
		    (largepage_lvl > 1 && sg_res < lvl_pages)) {
2276 2277 2278 2279
			domain_flush_cache(domain, first_pte,
					   (void *)pte - (void *)first_pte);
			pte = NULL;
		}
2280 2281

		if (!sg_res && nr_pages)
2282 2283 2284 2285 2286
			sg = sg_next(sg);
	}
	return 0;
}

2287 2288 2289
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				    struct scatterlist *sg, unsigned long nr_pages,
				    int prot)
2290
{
2291 2292
	return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
2293

2294 2295 2296 2297 2298
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				     unsigned long phys_pfn, unsigned long nr_pages,
				     int prot)
{
	return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2299 2300
}

2301
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2302
{
2303 2304
	if (!iommu)
		return;
2305 2306 2307

	clear_context_table(iommu, bus, devfn);
	iommu->flush.flush_context(iommu, 0, 0, 0,
2308
					   DMA_CCMD_GLOBAL_INVL);
2309
	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2310 2311
}

2312 2313 2314 2315 2316 2317
static inline void unlink_domain_info(struct device_domain_info *info)
{
	assert_spin_locked(&device_domain_lock);
	list_del(&info->link);
	list_del(&info->global);
	if (info->dev)
2318
		info->dev->archdata.iommu = NULL;
2319 2320
}

2321 2322
static void domain_remove_dev_info(struct dmar_domain *domain)
{
2323
	struct device_domain_info *info, *tmp;
2324
	unsigned long flags;
2325 2326

	spin_lock_irqsave(&device_domain_lock, flags);
2327
	list_for_each_entry_safe(info, tmp, &domain->devices, link)
2328
		__dmar_remove_one_dev_info(info);
2329 2330 2331 2332 2333
	spin_unlock_irqrestore(&device_domain_lock, flags);
}

/*
 * find_domain
2334
 * Note: we use struct device->archdata.iommu stores the info
2335
 */
2336
static struct dmar_domain *find_domain(struct device *dev)
2337 2338 2339 2340
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
2341
	info = dev->archdata.iommu;
2342 2343 2344 2345 2346
	if (info)
		return info->domain;
	return NULL;
}

2347
static inline struct device_domain_info *
2348 2349 2350 2351 2352
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
	struct device_domain_info *info;

	list_for_each_entry(info, &device_domain_list, global)
2353
		if (info->iommu->segment == segment && info->bus == bus &&
2354
		    info->devfn == devfn)
2355
			return info;
2356 2357 2358 2359

	return NULL;
}

2360 2361 2362 2363
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
						    int bus, int devfn,
						    struct device *dev,
						    struct dmar_domain *domain)
2364
{
2365
	struct dmar_domain *found = NULL;
2366 2367
	struct device_domain_info *info;
	unsigned long flags;
2368
	int ret;
2369 2370 2371

	info = alloc_devinfo_mem();
	if (!info)
2372
		return NULL;
2373 2374 2375

	info->bus = bus;
	info->devfn = devfn;
2376 2377 2378
	info->ats_supported = info->pasid_supported = info->pri_supported = 0;
	info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
	info->ats_qdep = 0;
2379 2380
	info->dev = dev;
	info->domain = domain;
2381
	info->iommu = iommu;
2382

2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
	if (dev && dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(info->dev);

		if (ecap_dev_iotlb_support(iommu->ecap) &&
		    pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
		    dmar_find_matched_atsr_unit(pdev))
			info->ats_supported = 1;

		if (ecs_enabled(iommu)) {
			if (pasid_enabled(iommu)) {
				int features = pci_pasid_features(pdev);
				if (features >= 0)
					info->pasid_supported = features | 1;
			}

			if (info->ats_supported && ecap_prs(iommu->ecap) &&
			    pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
				info->pri_supported = 1;
		}
	}

2404 2405
	spin_lock_irqsave(&device_domain_lock, flags);
	if (dev)
2406
		found = find_domain(dev);
2407 2408

	if (!found) {
2409
		struct device_domain_info *info2;
2410
		info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2411 2412 2413 2414
		if (info2) {
			found      = info2->domain;
			info2->dev = dev;
		}
2415
	}
2416

2417 2418 2419
	if (found) {
		spin_unlock_irqrestore(&device_domain_lock, flags);
		free_devinfo_mem(info);
2420 2421
		/* Caller must free the original domain */
		return found;
2422 2423
	}

2424 2425 2426 2427 2428
	spin_lock(&iommu->lock);
	ret = domain_attach_iommu(domain, iommu);
	spin_unlock(&iommu->lock);

	if (ret) {
2429
		spin_unlock_irqrestore(&device_domain_lock, flags);
2430
		free_devinfo_mem(info);
2431 2432 2433
		return NULL;
	}

2434 2435 2436 2437 2438 2439
	list_add(&info->link, &domain->devices);
	list_add(&info->global, &device_domain_list);
	if (dev)
		dev->archdata.iommu = info;
	spin_unlock_irqrestore(&device_domain_lock, flags);

2440 2441
	if (dev && domain_context_mapping(domain, dev)) {
		pr_err("Domain context map for %s failed\n", dev_name(dev));
2442
		dmar_remove_one_dev_info(domain, dev);
2443 2444 2445
		return NULL;
	}

2446
	return domain;
2447 2448
}

2449 2450 2451 2452 2453 2454
static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
{
	*(u16 *)opaque = alias;
	return 0;
}

2455
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2456
{
2457
	struct device_domain_info *info = NULL;
2458
	struct dmar_domain *domain = NULL;
2459
	struct intel_iommu *iommu;
2460
	u16 req_id, dma_alias;
2461
	unsigned long flags;
2462
	u8 bus, devfn;
2463

2464 2465 2466 2467
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return NULL;

2468 2469
	req_id = ((u16)bus << 8) | devfn;

2470 2471
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2472

2473 2474 2475 2476 2477 2478 2479 2480 2481
		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);

		spin_lock_irqsave(&device_domain_lock, flags);
		info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
						      PCI_BUS_NUM(dma_alias),
						      dma_alias & 0xff);
		if (info) {
			iommu = info->iommu;
			domain = info->domain;
2482
		}
2483
		spin_unlock_irqrestore(&device_domain_lock, flags);
2484

2485
		/* DMA alias already has a domain, use it */
2486
		if (info)
2487
			goto out;
2488
	}
2489

2490
	/* Allocate and initialize new domain for the device */
2491
	domain = alloc_domain(0);
2492
	if (!domain)
2493
		return NULL;
2494
	if (domain_init(domain, iommu, gaw)) {
2495 2496
		domain_exit(domain);
		return NULL;
2497
	}
2498

2499
out:
2500

2501 2502
	return domain;
}
2503

2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
static struct dmar_domain *set_domain_for_dev(struct device *dev,
					      struct dmar_domain *domain)
{
	struct intel_iommu *iommu;
	struct dmar_domain *tmp;
	u16 req_id, dma_alias;
	u8 bus, devfn;

	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return NULL;

	req_id = ((u16)bus << 8) | devfn;

	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);

		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);

		/* register PCI DMA alias device */
		if (req_id != dma_alias) {
			tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
					dma_alias & 0xff, NULL, domain);

			if (!tmp || tmp != domain)
				return tmp;
		}
2531 2532
	}

2533
	tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2534 2535 2536 2537 2538
	if (!tmp || tmp != domain)
		return tmp;

	return domain;
}
2539

2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{
	struct dmar_domain *domain, *tmp;

	domain = find_domain(dev);
	if (domain)
		goto out;

	domain = find_or_alloc_domain(dev, gaw);
	if (!domain)
		goto out;

	tmp = set_domain_for_dev(dev, domain);
	if (!tmp || domain != tmp) {
2554 2555 2556
		domain_exit(domain);
		domain = tmp;
	}
2557

2558 2559
out:

2560
	return domain;
2561 2562
}

2563 2564 2565
static int iommu_domain_identity_map(struct dmar_domain *domain,
				     unsigned long long start,
				     unsigned long long end)
2566
{
2567 2568 2569 2570 2571
	unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
	unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;

	if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
			  dma_to_mm_pfn(last_vpfn))) {
J
Joerg Roedel 已提交
2572
		pr_err("Reserving iova failed\n");
2573
		return -ENOMEM;
2574 2575
	}

J
Joerg Roedel 已提交
2576
	pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2577 2578 2579 2580
	/*
	 * RMRR range might have overlap with physical memory range,
	 * clear it first
	 */
2581
	dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2582

2583 2584
	return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
				  last_vpfn - first_vpfn + 1,
2585
				  DMA_PTE_READ|DMA_PTE_WRITE);
2586 2587
}

2588 2589 2590 2591
static int domain_prepare_identity_map(struct device *dev,
				       struct dmar_domain *domain,
				       unsigned long long start,
				       unsigned long long end)
2592
{
2593 2594 2595 2596 2597
	/* For _hardware_ passthrough, don't bother. But for software
	   passthrough, we do it anyway -- it may indicate a memory
	   range which is reserved in E820, so which didn't get set
	   up to start with in si_domain */
	if (domain == si_domain && hw_pass_through) {
J
Joerg Roedel 已提交
2598 2599
		pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
			dev_name(dev), start, end);
2600 2601 2602
		return 0;
	}

J
Joerg Roedel 已提交
2603 2604 2605
	pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
		dev_name(dev), start, end);

2606 2607 2608 2609 2610 2611
	if (end < start) {
		WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
			"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
			dmi_get_system_info(DMI_BIOS_VENDOR),
			dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
2612
		return -EIO;
2613 2614
	}

2615 2616 2617 2618 2619 2620 2621
	if (end >> agaw_to_width(domain->agaw)) {
		WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     agaw_to_width(domain->agaw),
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
2622
		return -EIO;
2623
	}
2624

2625 2626
	return iommu_domain_identity_map(domain, start, end);
}
2627

2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
static int iommu_prepare_identity_map(struct device *dev,
				      unsigned long long start,
				      unsigned long long end)
{
	struct dmar_domain *domain;
	int ret;

	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
	if (!domain)
		return -ENOMEM;

	ret = domain_prepare_identity_map(dev, domain, start, end);
	if (ret)
		domain_exit(domain);
2642

2643 2644 2645 2646
	return ret;
}

static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2647
					 struct device *dev)
2648
{
2649
	if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2650
		return 0;
2651 2652
	return iommu_prepare_identity_map(dev, rmrr->base_address,
					  rmrr->end_address);
2653 2654
}

2655
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2656 2657 2658 2659 2660 2661 2662 2663 2664
static inline void iommu_prepare_isa(void)
{
	struct pci_dev *pdev;
	int ret;

	pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
	if (!pdev)
		return;

J
Joerg Roedel 已提交
2665
	pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2666
	ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2667 2668

	if (ret)
J
Joerg Roedel 已提交
2669
		pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2670

2671
	pci_dev_put(pdev);
2672 2673 2674 2675 2676 2677
}
#else
static inline void iommu_prepare_isa(void)
{
	return;
}
2678
#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2679

2680
static int md_domain_init(struct dmar_domain *domain, int guest_width);
2681

2682
static int __init si_domain_init(int hw)
2683
{
2684
	int nid, ret = 0;
2685

2686
	si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2687 2688 2689 2690 2691 2692 2693 2694
	if (!si_domain)
		return -EFAULT;

	if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
		domain_exit(si_domain);
		return -EFAULT;
	}

2695
	pr_debug("Identity mapping domain allocated\n");
2696

2697 2698 2699
	if (hw)
		return 0;

2700
	for_each_online_node(nid) {
2701 2702 2703 2704 2705 2706 2707 2708 2709
		unsigned long start_pfn, end_pfn;
		int i;

		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
			ret = iommu_domain_identity_map(si_domain,
					PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
			if (ret)
				return ret;
		}
2710 2711
	}

2712 2713 2714
	return 0;
}

2715
static int identity_mapping(struct device *dev)
2716 2717 2718 2719 2720 2721
{
	struct device_domain_info *info;

	if (likely(!iommu_identity_mapping))
		return 0;

2722
	info = dev->archdata.iommu;
2723 2724
	if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
		return (info->domain == si_domain);
2725 2726 2727 2728

	return 0;
}

2729
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2730
{
2731
	struct dmar_domain *ndomain;
2732
	struct intel_iommu *iommu;
2733
	u8 bus, devfn;
2734

2735
	iommu = device_to_iommu(dev, &bus, &devfn);
2736 2737 2738
	if (!iommu)
		return -ENODEV;

2739
	ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2740 2741
	if (ndomain != domain)
		return -EBUSY;
2742 2743 2744 2745

	return 0;
}

2746
static bool device_has_rmrr(struct device *dev)
2747 2748
{
	struct dmar_rmrr_unit *rmrr;
2749
	struct device *tmp;
2750 2751
	int i;

2752
	rcu_read_lock();
2753
	for_each_rmrr_units(rmrr) {
2754 2755 2756 2757 2758 2759
		/*
		 * Return TRUE if this RMRR contains the device that
		 * is passed in.
		 */
		for_each_active_dev_scope(rmrr->devices,
					  rmrr->devices_cnt, i, tmp)
2760
			if (tmp == dev) {
2761
				rcu_read_unlock();
2762
				return true;
2763
			}
2764
	}
2765
	rcu_read_unlock();
2766 2767 2768
	return false;
}

2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
/*
 * There are a couple cases where we need to restrict the functionality of
 * devices associated with RMRRs.  The first is when evaluating a device for
 * identity mapping because problems exist when devices are moved in and out
 * of domains and their respective RMRR information is lost.  This means that
 * a device with associated RMRRs will never be in a "passthrough" domain.
 * The second is use of the device through the IOMMU API.  This interface
 * expects to have full control of the IOVA space for the device.  We cannot
 * satisfy both the requirement that RMRR access is maintained and have an
 * unencumbered IOVA space.  We also have no ability to quiesce the device's
 * use of the RMRR space or even inform the IOMMU API user of the restriction.
 * We therefore prevent devices associated with an RMRR from participating in
 * the IOMMU API, which eliminates them from device assignment.
 *
 * In both cases we assume that PCI USB devices with RMRRs have them largely
 * for historical reasons and that the RMRR space is not actively used post
 * boot.  This exclusion may change if vendors begin to abuse it.
2786 2787 2788 2789
 *
 * The same exception is made for graphics devices, with the requirement that
 * any use of the RMRR regions will be torn down before assigning the device
 * to a guest.
2790 2791 2792 2793 2794 2795 2796 2797 2798
 */
static bool device_is_rmrr_locked(struct device *dev)
{
	if (!device_has_rmrr(dev))
		return false;

	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);

2799
		if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2800 2801 2802 2803 2804 2805
			return false;
	}

	return true;
}

2806
static int iommu_should_identity_map(struct device *dev, int startup)
2807
{
2808

2809 2810
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2811

2812
		if (device_is_rmrr_locked(dev))
2813
			return 0;
2814

2815 2816
		if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
			return 1;
2817

2818 2819
		if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
			return 1;
2820

2821
		if (!(iommu_identity_mapping & IDENTMAP_ALL))
2822
			return 0;
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846

		/*
		 * We want to start off with all devices in the 1:1 domain, and
		 * take them out later if we find they can't access all of memory.
		 *
		 * However, we can't do this for PCI devices behind bridges,
		 * because all PCI devices behind the same bridge will end up
		 * with the same source-id on their transactions.
		 *
		 * Practically speaking, we can't change things around for these
		 * devices at run-time, because we can't be sure there'll be no
		 * DMA transactions in flight for any of their siblings.
		 *
		 * So PCI devices (unless they're on the root bus) as well as
		 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
		 * the 1:1 domain, just in _case_ one of their siblings turns out
		 * not to be able to map all of memory.
		 */
		if (!pci_is_pcie(pdev)) {
			if (!pci_is_root_bus(pdev->bus))
				return 0;
			if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
				return 0;
		} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2847
			return 0;
2848 2849 2850 2851
	} else {
		if (device_has_rmrr(dev))
			return 0;
	}
2852

2853
	/*
2854
	 * At boot time, we don't yet know if devices will be 64-bit capable.
2855
	 * Assume that they will — if they turn out not to be, then we can
2856 2857
	 * take them out of the 1:1 domain later.
	 */
2858 2859 2860 2861 2862
	if (!startup) {
		/*
		 * If the device's dma_mask is less than the system's memory
		 * size then this is not a candidate for identity mapping.
		 */
2863
		u64 dma_mask = *dev->dma_mask;
2864

2865 2866 2867
		if (dev->coherent_dma_mask &&
		    dev->coherent_dma_mask < dma_mask)
			dma_mask = dev->coherent_dma_mask;
2868

2869
		return dma_mask >= dma_get_required_mask(dev);
2870
	}
2871 2872 2873 2874

	return 1;
}

2875 2876 2877 2878 2879 2880 2881
static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
{
	int ret;

	if (!iommu_should_identity_map(dev, 1))
		return 0;

2882
	ret = domain_add_dev_info(si_domain, dev);
2883
	if (!ret)
J
Joerg Roedel 已提交
2884 2885
		pr_info("%s identity mapping for device %s\n",
			hw ? "Hardware" : "Software", dev_name(dev));
2886 2887 2888 2889 2890 2891 2892 2893
	else if (ret == -ENODEV)
		/* device not associated with an iommu */
		ret = 0;

	return ret;
}


2894
static int __init iommu_prepare_static_identity_mapping(int hw)
2895 2896
{
	struct pci_dev *pdev = NULL;
2897 2898 2899 2900 2901
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	struct device *dev;
	int i;
	int ret = 0;
2902 2903

	for_each_pci_dev(pdev) {
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
		ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
		if (ret)
			return ret;
	}

	for_each_active_iommu(iommu, drhd)
		for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
			struct acpi_device_physical_node *pn;
			struct acpi_device *adev;

			if (dev->bus != &acpi_bus_type)
				continue;
2916

2917 2918 2919 2920 2921 2922
			adev= to_acpi_device(dev);
			mutex_lock(&adev->physical_node_lock);
			list_for_each_entry(pn, &adev->physical_node_list, node) {
				ret = dev_prepare_static_identity_mapping(pn->dev, hw);
				if (ret)
					break;
2923
			}
2924 2925 2926
			mutex_unlock(&adev->physical_node_lock);
			if (ret)
				return ret;
2927
		}
2928 2929 2930 2931

	return 0;
}

2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
	/*
	 * Start from the sane iommu hardware state.
	 * If the queued invalidation is already initialized by us
	 * (for example, while enabling interrupt-remapping) then
	 * we got the things already rolling from a sane state.
	 */
	if (!iommu->qi) {
		/*
		 * Clear any previous faults.
		 */
		dmar_fault(-1, iommu);
		/*
		 * Disable queued invalidation if supported and already enabled
		 * before OS handover.
		 */
		dmar_disable_qi(iommu);
	}

	if (dmar_enable_qi(iommu)) {
		/*
		 * Queued Invalidate not enabled, use Register Based Invalidate
		 */
		iommu->flush.flush_context = __iommu_flush_context;
		iommu->flush.flush_iotlb = __iommu_flush_iotlb;
J
Joerg Roedel 已提交
2958
		pr_info("%s: Using Register based invalidation\n",
2959 2960 2961 2962
			iommu->name);
	} else {
		iommu->flush.flush_context = qi_flush_context;
		iommu->flush.flush_iotlb = qi_flush_iotlb;
J
Joerg Roedel 已提交
2963
		pr_info("%s: Using Queued invalidation\n", iommu->name);
2964 2965 2966
	}
}

2967
static int copy_context_table(struct intel_iommu *iommu,
2968
			      struct root_entry *old_re,
2969 2970 2971
			      struct context_entry **tbl,
			      int bus, bool ext)
{
2972
	int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2973
	struct context_entry *new_ce = NULL, ce;
2974
	struct context_entry *old_ce = NULL;
2975
	struct root_entry re;
2976 2977 2978
	phys_addr_t old_ce_phys;

	tbl_idx = ext ? bus * 2 : bus;
2979
	memcpy(&re, old_re, sizeof(re));
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998

	for (devfn = 0; devfn < 256; devfn++) {
		/* First calculate the correct index */
		idx = (ext ? devfn * 2 : devfn) % 256;

		if (idx == 0) {
			/* First save what we may have and clean up */
			if (new_ce) {
				tbl[tbl_idx] = new_ce;
				__iommu_flush_cache(iommu, new_ce,
						    VTD_PAGE_SIZE);
				pos = 1;
			}

			if (old_ce)
				iounmap(old_ce);

			ret = 0;
			if (devfn < 0x80)
2999
				old_ce_phys = root_entry_lctp(&re);
3000
			else
3001
				old_ce_phys = root_entry_uctp(&re);
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013

			if (!old_ce_phys) {
				if (ext && devfn == 0) {
					/* No LCTP, try UCTP */
					devfn = 0x7f;
					continue;
				} else {
					goto out;
				}
			}

			ret = -ENOMEM;
3014 3015
			old_ce = memremap(old_ce_phys, PAGE_SIZE,
					MEMREMAP_WB);
3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
			if (!old_ce)
				goto out;

			new_ce = alloc_pgtable_page(iommu->node);
			if (!new_ce)
				goto out_unmap;

			ret = 0;
		}

		/* Now copy the context entry */
3027
		memcpy(&ce, old_ce + idx, sizeof(ce));
3028

3029
		if (!__context_present(&ce))
3030 3031
			continue;

3032 3033 3034 3035
		did = context_domain_id(&ce);
		if (did >= 0 && did < cap_ndoms(iommu->cap))
			set_bit(did, iommu->domain_ids);

3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
		/*
		 * We need a marker for copied context entries. This
		 * marker needs to work for the old format as well as
		 * for extended context entries.
		 *
		 * Bit 67 of the context entry is used. In the old
		 * format this bit is available to software, in the
		 * extended format it is the PGE bit, but PGE is ignored
		 * by HW if PASIDs are disabled (and thus still
		 * available).
		 *
		 * So disable PASIDs first and then mark the entry
		 * copied. This means that we don't copy PASID
		 * translations from the old kernel, but this is fine as
		 * faults there are not fatal.
		 */
		context_clear_pasid_enable(&ce);
		context_set_copied(&ce);

3055 3056 3057 3058 3059 3060 3061 3062
		new_ce[idx] = ce;
	}

	tbl[tbl_idx + pos] = new_ce;

	__iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);

out_unmap:
3063
	memunmap(old_ce);
3064 3065 3066 3067 3068 3069 3070 3071

out:
	return ret;
}

static int copy_translation_tables(struct intel_iommu *iommu)
{
	struct context_entry **ctxt_tbls;
3072
	struct root_entry *old_rt;
3073 3074 3075 3076 3077
	phys_addr_t old_rt_phys;
	int ctxt_table_entries;
	unsigned long flags;
	u64 rtaddr_reg;
	int bus, ret;
3078
	bool new_ext, ext;
3079 3080 3081

	rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
	ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
	new_ext    = !!ecap_ecs(iommu->ecap);

	/*
	 * The RTT bit can only be changed when translation is disabled,
	 * but disabling translation means to open a window for data
	 * corruption. So bail out and don't copy anything if we would
	 * have to change the bit.
	 */
	if (new_ext != ext)
		return -EINVAL;
3092 3093 3094 3095 3096

	old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
	if (!old_rt_phys)
		return -EINVAL;

3097
	old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
	if (!old_rt)
		return -ENOMEM;

	/* This is too big for the stack - allocate it from slab */
	ctxt_table_entries = ext ? 512 : 256;
	ret = -ENOMEM;
	ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
	if (!ctxt_tbls)
		goto out_unmap;

	for (bus = 0; bus < 256; bus++) {
		ret = copy_context_table(iommu, &old_rt[bus],
					 ctxt_tbls, bus, ext);
		if (ret) {
			pr_err("%s: Failed to copy context table for bus %d\n",
				iommu->name, bus);
			continue;
		}
	}

	spin_lock_irqsave(&iommu->lock, flags);

	/* Context tables are copied, now write them to the root_entry table */
	for (bus = 0; bus < 256; bus++) {
		int idx = ext ? bus * 2 : bus;
		u64 val;

		if (ctxt_tbls[idx]) {
			val = virt_to_phys(ctxt_tbls[idx]) | 1;
			iommu->root_entry[bus].lo = val;
		}

		if (!ext || !ctxt_tbls[idx + 1])
			continue;

		val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
		iommu->root_entry[bus].hi = val;
	}

	spin_unlock_irqrestore(&iommu->lock, flags);

	kfree(ctxt_tbls);

	__iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);

	ret = 0;

out_unmap:
3146
	memunmap(old_rt);
3147 3148 3149 3150

	return ret;
}

3151
static int __init init_dmars(void)
3152 3153 3154
{
	struct dmar_drhd_unit *drhd;
	struct dmar_rmrr_unit *rmrr;
3155
	bool copied_tables = false;
3156
	struct device *dev;
3157
	struct intel_iommu *iommu;
3158
	int i, ret, cpu;
3159

3160 3161 3162 3163 3164 3165 3166
	/*
	 * for each drhd
	 *    allocate root
	 *    initialize and program root entry to not present
	 * endfor
	 */
	for_each_drhd_unit(drhd) {
M
mark gross 已提交
3167 3168 3169 3170 3171
		/*
		 * lock not needed as this is only incremented in the single
		 * threaded kernel __init code path all other access are read
		 * only
		 */
3172
		if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3173 3174 3175
			g_num_of_iommus++;
			continue;
		}
J
Joerg Roedel 已提交
3176
		pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
M
mark gross 已提交
3177 3178
	}

3179 3180 3181 3182
	/* Preallocate enough resources for IOMMU hot-addition */
	if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
		g_num_of_iommus = DMAR_UNITS_SUPPORTED;

W
Weidong Han 已提交
3183 3184 3185
	g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
			GFP_KERNEL);
	if (!g_iommus) {
J
Joerg Roedel 已提交
3186
		pr_err("Allocating global iommu array failed\n");
W
Weidong Han 已提交
3187 3188 3189 3190
		ret = -ENOMEM;
		goto error;
	}

3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
	for_each_possible_cpu(cpu) {
		struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
							      cpu);

		dfd->tables = kzalloc(g_num_of_iommus *
				      sizeof(struct deferred_flush_table),
				      GFP_KERNEL);
		if (!dfd->tables) {
			ret = -ENOMEM;
			goto free_g_iommus;
		}

		spin_lock_init(&dfd->lock);
		setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
M
mark gross 已提交
3205 3206
	}

3207
	for_each_active_iommu(iommu, drhd) {
W
Weidong Han 已提交
3208
		g_iommus[iommu->seq_id] = iommu;
3209

3210 3211
		intel_iommu_init_qi(iommu);

3212 3213
		ret = iommu_init_domains(iommu);
		if (ret)
3214
			goto free_iommu;
3215

3216 3217
		init_translation_status(iommu);

3218 3219 3220 3221 3222 3223
		if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
			iommu_disable_translation(iommu);
			clear_translation_pre_enabled(iommu);
			pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
				iommu->name);
		}
3224

3225 3226 3227
		/*
		 * TBD:
		 * we could share the same root & context tables
L
Lucas De Marchi 已提交
3228
		 * among all IOMMU's. Need to Split it later.
3229 3230
		 */
		ret = iommu_alloc_root_entry(iommu);
3231
		if (ret)
3232
			goto free_iommu;
3233

3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
		if (translation_pre_enabled(iommu)) {
			pr_info("Translation already enabled - trying to copy translation structures\n");

			ret = copy_translation_tables(iommu);
			if (ret) {
				/*
				 * We found the IOMMU with translation
				 * enabled - but failed to copy over the
				 * old root-entry table. Try to proceed
				 * by disabling translation now and
				 * allocating a clean root-entry table.
				 * This might cause DMAR faults, but
				 * probably the dump will still succeed.
				 */
				pr_err("Failed to copy translation tables from previous kernel for %s\n",
				       iommu->name);
				iommu_disable_translation(iommu);
				clear_translation_pre_enabled(iommu);
			} else {
				pr_info("Copied translation tables from previous kernel for %s\n",
					iommu->name);
3255
				copied_tables = true;
3256 3257 3258
			}
		}

F
Fenghua Yu 已提交
3259
		if (!ecap_pass_through(iommu->ecap))
3260
			hw_pass_through = 0;
3261 3262 3263 3264
#ifdef CONFIG_INTEL_IOMMU_SVM
		if (pasid_enabled(iommu))
			intel_svm_alloc_pasid_tables(iommu);
#endif
3265 3266
	}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
	/*
	 * Now that qi is enabled on all iommus, set the root entry and flush
	 * caches. This is required on some Intel X58 chipsets, otherwise the
	 * flush_context function will loop forever and the boot hangs.
	 */
	for_each_active_iommu(iommu, drhd) {
		iommu_flush_write_buffer(iommu);
		iommu_set_root_entry(iommu);
		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
	}

3279
	if (iommu_pass_through)
3280 3281
		iommu_identity_mapping |= IDENTMAP_ALL;

3282
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3283
	iommu_identity_mapping |= IDENTMAP_GFX;
3284
#endif
3285

3286 3287 3288 3289 3290 3291
	if (iommu_identity_mapping) {
		ret = si_domain_init(hw_pass_through);
		if (ret)
			goto free_iommu;
	}

3292 3293
	check_tylersburg_isoch();

3294 3295 3296 3297 3298 3299 3300 3301 3302
	/*
	 * If we copied translations from a previous kernel in the kdump
	 * case, we can not assign the devices to domains now, as that
	 * would eliminate the old mappings. So skip this part and defer
	 * the assignment to device driver initialization time.
	 */
	if (copied_tables)
		goto domains_done;

3303
	/*
3304 3305 3306
	 * If pass through is not set or not enabled, setup context entries for
	 * identity mappings for rmrr, gfx, and isa and may fall back to static
	 * identity mapping if iommu_identity_mapping is set.
3307
	 */
3308 3309
	if (iommu_identity_mapping) {
		ret = iommu_prepare_static_identity_mapping(hw_pass_through);
F
Fenghua Yu 已提交
3310
		if (ret) {
J
Joerg Roedel 已提交
3311
			pr_crit("Failed to setup IOMMU pass-through\n");
3312
			goto free_iommu;
3313 3314 3315
		}
	}
	/*
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
	 * For each rmrr
	 *   for each dev attached to rmrr
	 *   do
	 *     locate drhd for dev, alloc domain for dev
	 *     allocate free domain
	 *     allocate page table entries for rmrr
	 *     if context not allocated for bus
	 *           allocate and init context
	 *           set present in root table for this bus
	 *     init context with domain, translation etc
	 *    endfor
	 * endfor
3328
	 */
J
Joerg Roedel 已提交
3329
	pr_info("Setting RMRR:\n");
3330
	for_each_rmrr_units(rmrr) {
3331 3332
		/* some BIOS lists non-exist devices in DMAR table. */
		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3333
					  i, dev) {
3334
			ret = iommu_prepare_rmrr_dev(rmrr, dev);
3335
			if (ret)
J
Joerg Roedel 已提交
3336
				pr_err("Mapping reserved region failed\n");
3337
		}
F
Fenghua Yu 已提交
3338
	}
3339

3340 3341
	iommu_prepare_isa();

3342 3343
domains_done:

3344 3345 3346 3347 3348 3349 3350
	/*
	 * for each drhd
	 *   enable fault log
	 *   global invalidate context cache
	 *   global invalidate iotlb
	 *   enable translation
	 */
3351
	for_each_iommu(iommu, drhd) {
3352 3353 3354 3355 3356 3357
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
3358
				iommu_disable_protect_mem_regions(iommu);
3359
			continue;
3360
		}
3361 3362 3363

		iommu_flush_write_buffer(iommu);

3364 3365 3366 3367 3368 3369 3370
#ifdef CONFIG_INTEL_IOMMU_SVM
		if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
			ret = intel_svm_enable_prq(iommu);
			if (ret)
				goto free_iommu;
		}
#endif
3371 3372
		ret = dmar_set_interrupt(iommu);
		if (ret)
3373
			goto free_iommu;
3374

3375 3376 3377
		if (!translation_pre_enabled(iommu))
			iommu_enable_translation(iommu);

3378
		iommu_disable_protect_mem_regions(iommu);
3379 3380 3381
	}

	return 0;
3382 3383

free_iommu:
3384 3385
	for_each_active_iommu(iommu, drhd) {
		disable_dmar_iommu(iommu);
3386
		free_dmar_iommu(iommu);
3387
	}
3388
free_g_iommus:
3389 3390
	for_each_possible_cpu(cpu)
		kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
W
Weidong Han 已提交
3391
	kfree(g_iommus);
3392
error:
3393 3394 3395
	return ret;
}

3396
/* This takes a number of _MM_ pages, not VTD pages */
3397
static unsigned long intel_alloc_iova(struct device *dev,
3398 3399
				     struct dmar_domain *domain,
				     unsigned long nrpages, uint64_t dma_mask)
3400
{
3401
	unsigned long iova_pfn = 0;
3402

3403 3404
	/* Restrict dma_mask to the width that the iommu can handle */
	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3405 3406
	/* Ensure we reserve the whole size-aligned region */
	nrpages = __roundup_pow_of_two(nrpages);
3407 3408

	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3409 3410
		/*
		 * First try to allocate an io virtual address in
3411
		 * DMA_BIT_MASK(32) and if that fails then try allocating
J
Joe Perches 已提交
3412
		 * from higher range
3413
		 */
3414 3415 3416 3417
		iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
					   IOVA_PFN(DMA_BIT_MASK(32)));
		if (iova_pfn)
			return iova_pfn;
3418
	}
3419 3420
	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
	if (unlikely(!iova_pfn)) {
J
Joerg Roedel 已提交
3421
		pr_err("Allocating %ld-page iova for %s failed",
3422
		       nrpages, dev_name(dev));
3423
		return 0;
3424 3425
	}

3426
	return iova_pfn;
3427 3428
}

3429
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3430
{
3431
	struct dmar_domain *domain, *tmp;
3432 3433 3434
	struct dmar_rmrr_unit *rmrr;
	struct device *i_dev;
	int i, ret;
3435

3436 3437 3438 3439 3440 3441 3442
	domain = find_domain(dev);
	if (domain)
		goto out;

	domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
	if (!domain)
		goto out;
3443

3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
	/* We have a new domain - setup possible RMRRs for the device */
	rcu_read_lock();
	for_each_rmrr_units(rmrr) {
		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
					  i, i_dev) {
			if (i_dev != dev)
				continue;

			ret = domain_prepare_identity_map(dev, domain,
							  rmrr->base_address,
							  rmrr->end_address);
			if (ret)
				dev_err(dev, "Mapping reserved region failed\n");
		}
	}
	rcu_read_unlock();

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
	tmp = set_domain_for_dev(dev, domain);
	if (!tmp || domain != tmp) {
		domain_exit(domain);
		domain = tmp;
	}

out:

	if (!domain)
		pr_err("Allocating domain for %s failed\n", dev_name(dev));


3473 3474 3475
	return domain;
}

3476
static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3477 3478 3479 3480
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
3481
	info = dev->archdata.iommu;
3482 3483 3484 3485 3486 3487
	if (likely(info))
		return info->domain;

	return __get_valid_domain_for_dev(dev);
}

3488
/* Check if the dev needs to go through non-identity map and unmap process.*/
3489
static int iommu_no_mapping(struct device *dev)
3490 3491 3492
{
	int found;

3493
	if (iommu_dummy(dev))
3494 3495
		return 1;

3496
	if (!iommu_identity_mapping)
3497
		return 0;
3498

3499
	found = identity_mapping(dev);
3500
	if (found) {
3501
		if (iommu_should_identity_map(dev, 0))
3502 3503 3504 3505 3506 3507
			return 1;
		else {
			/*
			 * 32 bit DMA is removed from si_domain and fall back
			 * to non-identity mapping.
			 */
3508
			dmar_remove_one_dev_info(si_domain, dev);
J
Joerg Roedel 已提交
3509 3510
			pr_info("32bit %s uses non-identity mapping\n",
				dev_name(dev));
3511 3512 3513 3514 3515 3516 3517
			return 0;
		}
	} else {
		/*
		 * In case of a detached 64 bit DMA device from vm, the device
		 * is put into si_domain for identity mapping.
		 */
3518
		if (iommu_should_identity_map(dev, 0)) {
3519
			int ret;
3520
			ret = domain_add_dev_info(si_domain, dev);
3521
			if (!ret) {
J
Joerg Roedel 已提交
3522 3523
				pr_info("64bit %s uses identity mapping\n",
					dev_name(dev));
3524 3525 3526 3527 3528
				return 1;
			}
		}
	}

3529
	return 0;
3530 3531
}

3532
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3533
				     size_t size, int dir, u64 dma_mask)
3534 3535
{
	struct dmar_domain *domain;
F
Fenghua Yu 已提交
3536
	phys_addr_t start_paddr;
3537
	unsigned long iova_pfn;
3538
	int prot = 0;
I
Ingo Molnar 已提交
3539
	int ret;
3540
	struct intel_iommu *iommu;
3541
	unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3542 3543

	BUG_ON(dir == DMA_NONE);
3544

3545
	if (iommu_no_mapping(dev))
I
Ingo Molnar 已提交
3546
		return paddr;
3547

3548
	domain = get_valid_domain_for_dev(dev);
3549 3550 3551
	if (!domain)
		return 0;

3552
	iommu = domain_get_iommu(domain);
3553
	size = aligned_nrpages(paddr, size);
3554

3555 3556
	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
	if (!iova_pfn)
3557 3558
		goto error;

3559 3560 3561 3562 3563
	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3564
			!cap_zlr(iommu->cap))
3565 3566 3567 3568
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;
	/*
I
Ingo Molnar 已提交
3569
	 * paddr - (paddr + size) might be partial page, we should map the whole
3570
	 * page.  Note: if two part of one page are separately mapped, we
I
Ingo Molnar 已提交
3571
	 * might have two guest_addr mapping to the same host paddr, but this
3572 3573
	 * is not a big problem
	 */
3574
	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3575
				 mm_to_dma_pfn(paddr_pfn), size, prot);
3576 3577 3578
	if (ret)
		goto error;

3579 3580
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3581
		iommu_flush_iotlb_psi(iommu, domain,
3582
				      mm_to_dma_pfn(iova_pfn),
3583
				      size, 0, 1);
3584
	else
3585
		iommu_flush_write_buffer(iommu);
3586

3587
	start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3588 3589
	start_paddr += paddr & ~PAGE_MASK;
	return start_paddr;
3590 3591

error:
3592
	if (iova_pfn)
3593
		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
J
Joerg Roedel 已提交
3594
	pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3595
		dev_name(dev), size, (unsigned long long)paddr, dir);
3596 3597 3598
	return 0;
}

3599 3600 3601
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
3602
				 unsigned long attrs)
3603
{
3604
	return __intel_map_single(dev, page_to_phys(page) + offset, size,
3605
				  dir, *dev->dma_mask);
3606 3607
}

3608
static void flush_unmaps(struct deferred_flush_data *flush_data)
M
mark gross 已提交
3609
{
3610
	int i, j;
M
mark gross 已提交
3611

3612
	flush_data->timer_on = 0;
M
mark gross 已提交
3613 3614 3615

	/* just flush them all */
	for (i = 0; i < g_num_of_iommus; i++) {
3616
		struct intel_iommu *iommu = g_iommus[i];
3617 3618
		struct deferred_flush_table *flush_table =
				&flush_data->tables[i];
3619 3620
		if (!iommu)
			continue;
3621

3622
		if (!flush_table->next)
3623 3624
			continue;

3625 3626 3627
		/* In caching mode, global flushes turn emulation expensive */
		if (!cap_caching_mode(iommu->cap))
			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Y
Yu Zhao 已提交
3628
					 DMA_TLB_GLOBAL_FLUSH);
3629
		for (j = 0; j < flush_table->next; j++) {
Y
Yu Zhao 已提交
3630
			unsigned long mask;
3631
			struct deferred_flush_entry *entry =
3632
						&flush_table->entries[j];
3633
			unsigned long iova_pfn = entry->iova_pfn;
3634
			unsigned long nrpages = entry->nrpages;
3635 3636
			struct dmar_domain *domain = entry->domain;
			struct page *freelist = entry->freelist;
3637 3638 3639

			/* On real hardware multiple invalidations are expensive */
			if (cap_caching_mode(iommu->cap))
3640
				iommu_flush_iotlb_psi(iommu, domain,
3641
					mm_to_dma_pfn(iova_pfn),
3642
					nrpages, !freelist, 0);
3643
			else {
3644
				mask = ilog2(nrpages);
3645
				iommu_flush_dev_iotlb(domain,
3646
						(uint64_t)iova_pfn << PAGE_SHIFT, mask);
3647
			}
3648
			free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3649 3650
			if (freelist)
				dma_free_pagelist(freelist);
3651
		}
3652
		flush_table->next = 0;
M
mark gross 已提交
3653 3654
	}

3655
	flush_data->size = 0;
M
mark gross 已提交
3656 3657
}

3658
static void flush_unmaps_timeout(unsigned long cpuid)
M
mark gross 已提交
3659
{
3660
	struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3661 3662
	unsigned long flags;

3663 3664 3665
	spin_lock_irqsave(&flush_data->lock, flags);
	flush_unmaps(flush_data);
	spin_unlock_irqrestore(&flush_data->lock, flags);
M
mark gross 已提交
3666 3667
}

3668
static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3669
		      unsigned long nrpages, struct page *freelist)
M
mark gross 已提交
3670 3671
{
	unsigned long flags;
3672
	int entry_id, iommu_id;
3673
	struct intel_iommu *iommu;
3674
	struct deferred_flush_entry *entry;
3675 3676
	struct deferred_flush_data *flush_data;
	unsigned int cpuid;
M
mark gross 已提交
3677

3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
	cpuid = get_cpu();
	flush_data = per_cpu_ptr(&deferred_flush, cpuid);

	/* Flush all CPUs' entries to avoid deferring too much.  If
	 * this becomes a bottleneck, can just flush us, and rely on
	 * flush timer for the rest.
	 */
	if (flush_data->size == HIGH_WATER_MARK) {
		int cpu;

		for_each_online_cpu(cpu)
			flush_unmaps_timeout(cpu);
	}

	spin_lock_irqsave(&flush_data->lock, flags);
3693

3694 3695
	iommu = domain_get_iommu(dom);
	iommu_id = iommu->seq_id;
3696

3697 3698
	entry_id = flush_data->tables[iommu_id].next;
	++(flush_data->tables[iommu_id].next);
M
mark gross 已提交
3699

3700
	entry = &flush_data->tables[iommu_id].entries[entry_id];
3701
	entry->domain = dom;
3702
	entry->iova_pfn = iova_pfn;
3703
	entry->nrpages = nrpages;
3704
	entry->freelist = freelist;
M
mark gross 已提交
3705

3706 3707 3708
	if (!flush_data->timer_on) {
		mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
		flush_data->timer_on = 1;
M
mark gross 已提交
3709
	}
3710 3711 3712 3713
	flush_data->size++;
	spin_unlock_irqrestore(&flush_data->lock, flags);

	put_cpu();
M
mark gross 已提交
3714 3715
}

3716
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3717
{
3718
	struct dmar_domain *domain;
3719
	unsigned long start_pfn, last_pfn;
3720
	unsigned long nrpages;
3721
	unsigned long iova_pfn;
3722
	struct intel_iommu *iommu;
3723
	struct page *freelist;
3724

3725
	if (iommu_no_mapping(dev))
3726
		return;
3727

3728
	domain = find_domain(dev);
3729 3730
	BUG_ON(!domain);

3731 3732
	iommu = domain_get_iommu(domain);

3733
	iova_pfn = IOVA_PFN(dev_addr);
3734

3735
	nrpages = aligned_nrpages(dev_addr, size);
3736
	start_pfn = mm_to_dma_pfn(iova_pfn);
3737
	last_pfn = start_pfn + nrpages - 1;
3738

3739
	pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3740
		 dev_name(dev), start_pfn, last_pfn);
3741

3742
	freelist = domain_unmap(domain, start_pfn, last_pfn);
3743

M
mark gross 已提交
3744
	if (intel_iommu_strict) {
3745
		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3746
				      nrpages, !freelist, 0);
M
mark gross 已提交
3747
		/* free iova */
3748
		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3749
		dma_free_pagelist(freelist);
M
mark gross 已提交
3750
	} else {
3751
		add_unmap(domain, iova_pfn, nrpages, freelist);
M
mark gross 已提交
3752 3753 3754 3755 3756
		/*
		 * queue up the release of the unmap to save the 1/6th of the
		 * cpu used up by the iotlb flush operation...
		 */
	}
3757 3758
}

3759 3760
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
			     size_t size, enum dma_data_direction dir,
3761
			     unsigned long attrs)
3762
{
3763
	intel_unmap(dev, dev_addr, size);
3764 3765
}

3766
static void *intel_alloc_coherent(struct device *dev, size_t size,
3767
				  dma_addr_t *dma_handle, gfp_t flags,
3768
				  unsigned long attrs)
3769
{
A
Akinobu Mita 已提交
3770
	struct page *page = NULL;
3771 3772
	int order;

F
Fenghua Yu 已提交
3773
	size = PAGE_ALIGN(size);
3774
	order = get_order(size);
3775

3776
	if (!iommu_no_mapping(dev))
3777
		flags &= ~(GFP_DMA | GFP_DMA32);
3778 3779
	else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
		if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3780 3781 3782 3783
			flags |= GFP_DMA;
		else
			flags |= GFP_DMA32;
	}
3784

3785
	if (gfpflags_allow_blocking(flags)) {
A
Akinobu Mita 已提交
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798
		unsigned int count = size >> PAGE_SHIFT;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (page && iommu_no_mapping(dev) &&
		    page_to_phys(page) + size > dev->coherent_dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}

	if (!page)
		page = alloc_pages(flags, order);
	if (!page)
3799
		return NULL;
A
Akinobu Mita 已提交
3800
	memset(page_address(page), 0, size);
3801

A
Akinobu Mita 已提交
3802
	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3803
					 DMA_BIDIRECTIONAL,
3804
					 dev->coherent_dma_mask);
3805
	if (*dma_handle)
A
Akinobu Mita 已提交
3806 3807 3808 3809
		return page_address(page);
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);

3810 3811 3812
	return NULL;
}

3813
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3814
				dma_addr_t dma_handle, unsigned long attrs)
3815 3816
{
	int order;
A
Akinobu Mita 已提交
3817
	struct page *page = virt_to_page(vaddr);
3818

F
Fenghua Yu 已提交
3819
	size = PAGE_ALIGN(size);
3820 3821
	order = get_order(size);

3822
	intel_unmap(dev, dma_handle, size);
A
Akinobu Mita 已提交
3823 3824
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);
3825 3826
}

3827
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3828
			   int nelems, enum dma_data_direction dir,
3829
			   unsigned long attrs)
3830
{
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
	dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
	unsigned long nrpages = 0;
	struct scatterlist *sg;
	int i;

	for_each_sg(sglist, sg, nelems, i) {
		nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
	}

	intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3841 3842 3843
}

static int intel_nontranslate_map_sg(struct device *hddev,
F
FUJITA Tomonori 已提交
3844
	struct scatterlist *sglist, int nelems, int dir)
3845 3846
{
	int i;
F
FUJITA Tomonori 已提交
3847
	struct scatterlist *sg;
3848

F
FUJITA Tomonori 已提交
3849
	for_each_sg(sglist, sg, nelems, i) {
F
FUJITA Tomonori 已提交
3850
		BUG_ON(!sg_page(sg));
D
Dan Williams 已提交
3851
		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
F
FUJITA Tomonori 已提交
3852
		sg->dma_length = sg->length;
3853 3854 3855 3856
	}
	return nelems;
}

3857
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3858
			enum dma_data_direction dir, unsigned long attrs)
3859 3860 3861
{
	int i;
	struct dmar_domain *domain;
3862 3863
	size_t size = 0;
	int prot = 0;
3864
	unsigned long iova_pfn;
3865
	int ret;
F
FUJITA Tomonori 已提交
3866
	struct scatterlist *sg;
3867
	unsigned long start_vpfn;
3868
	struct intel_iommu *iommu;
3869 3870

	BUG_ON(dir == DMA_NONE);
3871 3872
	if (iommu_no_mapping(dev))
		return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3873

3874
	domain = get_valid_domain_for_dev(dev);
3875 3876 3877
	if (!domain)
		return 0;

3878 3879
	iommu = domain_get_iommu(domain);

3880
	for_each_sg(sglist, sg, nelems, i)
3881
		size += aligned_nrpages(sg->offset, sg->length);
3882

3883
	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3884
				*dev->dma_mask);
3885
	if (!iova_pfn) {
F
FUJITA Tomonori 已提交
3886
		sglist->dma_length = 0;
3887 3888 3889 3890 3891 3892 3893 3894
		return 0;
	}

	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3895
			!cap_zlr(iommu->cap))
3896 3897 3898 3899
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;

3900
	start_vpfn = mm_to_dma_pfn(iova_pfn);
3901

3902
	ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3903 3904 3905
	if (unlikely(ret)) {
		dma_pte_free_pagetable(domain, start_vpfn,
				       start_vpfn + size - 1);
3906
		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3907
		return 0;
3908 3909
	}

3910 3911
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3912
		iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3913
	else
3914
		iommu_flush_write_buffer(iommu);
3915

3916 3917 3918
	return nelems;
}

3919 3920 3921 3922 3923
static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return !dma_addr;
}

3924
struct dma_map_ops intel_dma_ops = {
3925 3926
	.alloc = intel_alloc_coherent,
	.free = intel_free_coherent,
3927 3928
	.map_sg = intel_map_sg,
	.unmap_sg = intel_unmap_sg,
3929 3930
	.map_page = intel_map_page,
	.unmap_page = intel_unmap_page,
3931
	.mapping_error = intel_mapping_error,
3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944
};

static inline int iommu_domain_cache_init(void)
{
	int ret = 0;

	iommu_domain_cache = kmem_cache_create("iommu_domain",
					 sizeof(struct dmar_domain),
					 0,
					 SLAB_HWCACHE_ALIGN,

					 NULL);
	if (!iommu_domain_cache) {
J
Joerg Roedel 已提交
3945
		pr_err("Couldn't create iommu_domain cache\n");
3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
		ret = -ENOMEM;
	}

	return ret;
}

static inline int iommu_devinfo_cache_init(void)
{
	int ret = 0;

	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
					 sizeof(struct device_domain_info),
					 0,
					 SLAB_HWCACHE_ALIGN,
					 NULL);
	if (!iommu_devinfo_cache) {
J
Joerg Roedel 已提交
3962
		pr_err("Couldn't create devinfo cache\n");
3963 3964 3965 3966 3967 3968 3969 3970 3971
		ret = -ENOMEM;
	}

	return ret;
}

static int __init iommu_init_mempool(void)
{
	int ret;
3972
	ret = iova_cache_get();
3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985
	if (ret)
		return ret;

	ret = iommu_domain_cache_init();
	if (ret)
		goto domain_error;

	ret = iommu_devinfo_cache_init();
	if (!ret)
		return ret;

	kmem_cache_destroy(iommu_domain_cache);
domain_error:
3986
	iova_cache_put();
3987 3988 3989 3990 3991 3992 3993 3994

	return -ENOMEM;
}

static void __init iommu_exit_mempool(void)
{
	kmem_cache_destroy(iommu_devinfo_cache);
	kmem_cache_destroy(iommu_domain_cache);
3995
	iova_cache_put();
3996 3997
}

3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
	struct dmar_drhd_unit *drhd;
	u32 vtbar;
	int rc;

	/* We know that this device on this chipset has its own IOMMU.
	 * If we find it under a different IOMMU, then the BIOS is lying
	 * to us. Hope that the IOMMU for this device is actually
	 * disabled, and it needs no translation...
	 */
	rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
	if (rc) {
		/* "can't" happen */
		dev_info(&pdev->dev, "failed to run vt-d quirk\n");
		return;
	}
	vtbar &= 0xffff0000;

	/* we know that the this iommu should be at offset 0xa000 from vtbar */
	drhd = dmar_find_matched_drhd_unit(pdev);
	if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
			    TAINT_FIRMWARE_WORKAROUND,
			    "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
		pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);

4026 4027 4028
static void __init init_no_remapping_devices(void)
{
	struct dmar_drhd_unit *drhd;
4029
	struct device *dev;
4030
	int i;
4031 4032 4033

	for_each_drhd_unit(drhd) {
		if (!drhd->include_all) {
4034 4035 4036
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
				break;
4037
			/* ignore DMAR unit if no devices exist */
4038 4039 4040 4041 4042
			if (i == drhd->devices_cnt)
				drhd->ignored = 1;
		}
	}

4043 4044
	for_each_active_drhd_unit(drhd) {
		if (drhd->include_all)
4045 4046
			continue;

4047 4048
		for_each_active_dev_scope(drhd->devices,
					  drhd->devices_cnt, i, dev)
4049
			if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4050 4051 4052 4053
				break;
		if (i < drhd->devices_cnt)
			continue;

4054 4055 4056 4057 4058 4059
		/* This IOMMU has *only* gfx devices. Either bypass it or
		   set the gfx_mapped flag, as appropriate */
		if (dmar_map_gfx) {
			intel_iommu_gfx_mapped = 1;
		} else {
			drhd->ignored = 1;
4060 4061
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
4062
				dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4063 4064 4065 4066
		}
	}
}

4067 4068 4069 4070 4071 4072 4073 4074 4075 4076
#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;

	for_each_active_iommu(iommu, drhd)
		if (iommu->qi)
			dmar_reenable_qi(iommu);

4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
	for_each_iommu(iommu, drhd) {
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
				iommu_disable_protect_mem_regions(iommu);
			continue;
		}
	
4088 4089 4090 4091 4092
		iommu_flush_write_buffer(iommu);

		iommu_set_root_entry(iommu);

		iommu->flush.flush_context(iommu, 0, 0, 0,
4093
					   DMA_CCMD_GLOBAL_INVL);
4094 4095
		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
		iommu_enable_translation(iommu);
4096
		iommu_disable_protect_mem_regions(iommu);
4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
	}

	return 0;
}

static void iommu_flush_all(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;

	for_each_active_iommu(iommu, drhd) {
		iommu->flush.flush_context(iommu, 0, 0, 0,
4109
					   DMA_CCMD_GLOBAL_INVL);
4110
		iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4111
					 DMA_TLB_GLOBAL_FLUSH);
4112 4113 4114
	}
}

4115
static int iommu_suspend(void)
4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	for_each_active_iommu(iommu, drhd) {
		iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
						 GFP_ATOMIC);
		if (!iommu->iommu_state)
			goto nomem;
	}

	iommu_flush_all();

	for_each_active_iommu(iommu, drhd) {
		iommu_disable_translation(iommu);

4133
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143

		iommu->iommu_state[SR_DMAR_FECTL_REG] =
			readl(iommu->reg + DMAR_FECTL_REG);
		iommu->iommu_state[SR_DMAR_FEDATA_REG] =
			readl(iommu->reg + DMAR_FEDATA_REG);
		iommu->iommu_state[SR_DMAR_FEADDR_REG] =
			readl(iommu->reg + DMAR_FEADDR_REG);
		iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
			readl(iommu->reg + DMAR_FEUADDR_REG);

4144
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
	}
	return 0;

nomem:
	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);

	return -ENOMEM;
}

4155
static void iommu_resume(void)
4156 4157 4158 4159 4160 4161
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	if (init_iommu_hw()) {
4162 4163 4164 4165
		if (force_on)
			panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
		else
			WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4166
		return;
4167 4168 4169 4170
	}

	for_each_active_iommu(iommu, drhd) {

4171
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
4172 4173 4174 4175 4176 4177 4178 4179 4180 4181

		writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
			iommu->reg + DMAR_FECTL_REG);
		writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
			iommu->reg + DMAR_FEDATA_REG);
		writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
			iommu->reg + DMAR_FEADDR_REG);
		writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
			iommu->reg + DMAR_FEUADDR_REG);

4182
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4183 4184 4185 4186 4187 4188
	}

	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);
}

4189
static struct syscore_ops iommu_syscore_ops = {
4190 4191 4192 4193
	.resume		= iommu_resume,
	.suspend	= iommu_suspend,
};

4194
static void __init init_iommu_pm_ops(void)
4195
{
4196
	register_syscore_ops(&iommu_syscore_ops);
4197 4198 4199
}

#else
4200
static inline void init_iommu_pm_ops(void) {}
4201 4202
#endif	/* CONFIG_PM */

4203

4204
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216
{
	struct acpi_dmar_reserved_memory *rmrr;
	struct dmar_rmrr_unit *rmrru;

	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
	if (!rmrru)
		return -ENOMEM;

	rmrru->hdr = header;
	rmrr = (struct acpi_dmar_reserved_memory *)header;
	rmrru->base_address = rmrr->base_address;
	rmrru->end_address = rmrr->end_address;
4217 4218 4219 4220 4221 4222 4223
	rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				&rmrru->devices_cnt);
	if (rmrru->devices_cnt && rmrru->devices == NULL) {
		kfree(rmrru);
		return -ENOMEM;
	}
4224

4225
	list_add(&rmrru->list, &dmar_rmrr_units);
4226

4227
	return 0;
4228 4229
}

4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{
	struct dmar_atsr_unit *atsru;
	struct acpi_dmar_atsr *tmp;

	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
		tmp = (struct acpi_dmar_atsr *)atsru->hdr;
		if (atsr->segment != tmp->segment)
			continue;
		if (atsr->header.length != tmp->header.length)
			continue;
		if (memcmp(atsr, tmp, atsr->header.length) == 0)
			return atsru;
	}

	return NULL;
}

int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4249 4250 4251 4252
{
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

4253 4254 4255
	if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
		return 0;

4256
	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4257 4258 4259 4260 4261
	atsru = dmar_find_atsr(atsr);
	if (atsru)
		return 0;

	atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4262 4263 4264
	if (!atsru)
		return -ENOMEM;

4265 4266 4267 4268 4269 4270 4271
	/*
	 * If memory is allocated from slab by ACPI _DSM method, we need to
	 * copy the memory content because the memory buffer will be freed
	 * on return.
	 */
	atsru->hdr = (void *)(atsru + 1);
	memcpy(atsru->hdr, hdr, hdr->length);
4272
	atsru->include_all = atsr->flags & 0x1;
4273 4274 4275 4276 4277 4278 4279 4280 4281
	if (!atsru->include_all) {
		atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
				(void *)atsr + atsr->header.length,
				&atsru->devices_cnt);
		if (atsru->devices_cnt && atsru->devices == NULL) {
			kfree(atsru);
			return -ENOMEM;
		}
	}
4282

4283
	list_add_rcu(&atsru->list, &dmar_atsr_units);
4284 4285 4286 4287

	return 0;
}

4288 4289 4290 4291 4292 4293
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
	dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
	kfree(atsru);
}

4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
	atsru = dmar_find_atsr(atsr);
	if (atsru) {
		list_del_rcu(&atsru->list);
		synchronize_rcu();
		intel_iommu_free_atsr(atsru);
	}

	return 0;
}

int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
	int i;
	struct device *dev;
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
	atsru = dmar_find_atsr(atsr);
	if (!atsru)
		return 0;

4322
	if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4323 4324 4325
		for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
					  i, dev)
			return -EBUSY;
4326
	}
4327 4328 4329 4330

	return 0;
}

4331 4332 4333 4334 4335 4336 4337 4338 4339
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
	int sp, ret = 0;
	struct intel_iommu *iommu = dmaru->iommu;

	if (g_iommus[iommu->seq_id])
		return 0;

	if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
J
Joerg Roedel 已提交
4340
		pr_warn("%s: Doesn't support hardware pass through.\n",
4341 4342 4343 4344 4345
			iommu->name);
		return -ENXIO;
	}
	if (!ecap_sc_support(iommu->ecap) &&
	    domain_update_iommu_snooping(iommu)) {
J
Joerg Roedel 已提交
4346
		pr_warn("%s: Doesn't support snooping.\n",
4347 4348 4349 4350 4351
			iommu->name);
		return -ENXIO;
	}
	sp = domain_update_iommu_superpage(iommu) - 1;
	if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
J
Joerg Roedel 已提交
4352
		pr_warn("%s: Doesn't support large page.\n",
4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369
			iommu->name);
		return -ENXIO;
	}

	/*
	 * Disable translation if already enabled prior to OS handover.
	 */
	if (iommu->gcmd & DMA_GCMD_TE)
		iommu_disable_translation(iommu);

	g_iommus[iommu->seq_id] = iommu;
	ret = iommu_init_domains(iommu);
	if (ret == 0)
		ret = iommu_alloc_root_entry(iommu);
	if (ret)
		goto out;

4370 4371 4372 4373 4374
#ifdef CONFIG_INTEL_IOMMU_SVM
	if (pasid_enabled(iommu))
		intel_svm_alloc_pasid_tables(iommu);
#endif

4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385
	if (dmaru->ignored) {
		/*
		 * we always have to disable PMRs or DMA may fail on this device
		 */
		if (force_on)
			iommu_disable_protect_mem_regions(iommu);
		return 0;
	}

	intel_iommu_init_qi(iommu);
	iommu_flush_write_buffer(iommu);
4386 4387 4388 4389 4390 4391 4392 4393

#ifdef CONFIG_INTEL_IOMMU_SVM
	if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
		ret = intel_svm_enable_prq(iommu);
		if (ret)
			goto disable_iommu;
	}
#endif
4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412
	ret = dmar_set_interrupt(iommu);
	if (ret)
		goto disable_iommu;

	iommu_set_root_entry(iommu);
	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
	iommu_enable_translation(iommu);

	iommu_disable_protect_mem_regions(iommu);
	return 0;

disable_iommu:
	disable_dmar_iommu(iommu);
out:
	free_dmar_iommu(iommu);
	return ret;
}

4413 4414
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
	int ret = 0;
	struct intel_iommu *iommu = dmaru->iommu;

	if (!intel_iommu_enabled)
		return 0;
	if (iommu == NULL)
		return -EINVAL;

	if (insert) {
		ret = intel_iommu_add(dmaru);
	} else {
		disable_dmar_iommu(iommu);
		free_dmar_iommu(iommu);
	}

	return ret;
4431 4432
}

4433 4434 4435 4436 4437 4438 4439 4440 4441
static void intel_iommu_free_dmars(void)
{
	struct dmar_rmrr_unit *rmrru, *rmrr_n;
	struct dmar_atsr_unit *atsru, *atsr_n;

	list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
		list_del(&rmrru->list);
		dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
		kfree(rmrru);
4442 4443
	}

4444 4445 4446 4447
	list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
		list_del(&atsru->list);
		intel_iommu_free_atsr(atsru);
	}
4448 4449 4450 4451
}

int dmar_find_matched_atsr_unit(struct pci_dev *dev)
{
4452
	int i, ret = 1;
4453
	struct pci_bus *bus;
4454 4455
	struct pci_dev *bridge = NULL;
	struct device *tmp;
4456 4457 4458 4459 4460
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	dev = pci_physfn(dev);
	for (bus = dev->bus; bus; bus = bus->parent) {
4461
		bridge = bus->self;
4462 4463 4464 4465 4466
		/* If it's an integrated device, allow ATS */
		if (!bridge)
			return 1;
		/* Connected via non-PCIe: no ATS */
		if (!pci_is_pcie(bridge) ||
4467
		    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4468
			return 0;
4469
		/* If we found the root port, look it up in the ATSR */
4470
		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4471 4472 4473
			break;
	}

4474
	rcu_read_lock();
4475 4476 4477 4478 4479
	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (atsr->segment != pci_domain_nr(dev->bus))
			continue;

4480
		for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4481
			if (tmp == &bridge->dev)
4482
				goto out;
4483 4484

		if (atsru->include_all)
4485
			goto out;
4486
	}
4487 4488
	ret = 0;
out:
4489
	rcu_read_unlock();
4490

4491
	return ret;
4492 4493
}

4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
	int ret = 0;
	struct dmar_rmrr_unit *rmrru;
	struct dmar_atsr_unit *atsru;
	struct acpi_dmar_atsr *atsr;
	struct acpi_dmar_reserved_memory *rmrr;

	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
		return 0;

	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
		rmrr = container_of(rmrru->hdr,
				    struct acpi_dmar_reserved_memory, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				rmrr->segment, rmrru->devices,
				rmrru->devices_cnt);
4513
			if(ret < 0)
4514
				return ret;
4515
		} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4516 4517
			dmar_remove_dev_scope(info, rmrr->segment,
				rmrru->devices, rmrru->devices_cnt);
4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534
		}
	}

	list_for_each_entry(atsru, &dmar_atsr_units, list) {
		if (atsru->include_all)
			continue;

		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
					(void *)atsr + atsr->header.length,
					atsr->segment, atsru->devices,
					atsru->devices_cnt);
			if (ret > 0)
				break;
			else if(ret < 0)
				return ret;
4535
		} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4536 4537 4538 4539 4540 4541 4542 4543 4544
			if (dmar_remove_dev_scope(info, atsr->segment,
					atsru->devices, atsru->devices_cnt))
				break;
		}
	}

	return 0;
}

F
Fenghua Yu 已提交
4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556
/*
 * Here we only respond to action of unbound device from driver.
 *
 * Added device is not attached to its DMAR domain here yet. That will happen
 * when mapping the device to iova.
 */
static int device_notifier(struct notifier_block *nb,
				  unsigned long action, void *data)
{
	struct device *dev = data;
	struct dmar_domain *domain;

4557
	if (iommu_dummy(dev))
4558 4559
		return 0;

4560
	if (action != BUS_NOTIFY_REMOVED_DEVICE)
4561 4562
		return 0;

4563
	domain = find_domain(dev);
F
Fenghua Yu 已提交
4564 4565 4566
	if (!domain)
		return 0;

4567
	dmar_remove_one_dev_info(domain, dev);
4568
	if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4569
		domain_exit(domain);
4570

F
Fenghua Yu 已提交
4571 4572 4573 4574 4575 4576 4577
	return 0;
}

static struct notifier_block device_nb = {
	.notifier_call = device_notifier,
};

4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589
static int intel_iommu_memory_notifier(struct notifier_block *nb,
				       unsigned long val, void *v)
{
	struct memory_notify *mhp = v;
	unsigned long long start, end;
	unsigned long start_vpfn, last_vpfn;

	switch (val) {
	case MEM_GOING_ONLINE:
		start = mhp->start_pfn << PAGE_SHIFT;
		end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
		if (iommu_domain_identity_map(si_domain, start, end)) {
J
Joerg Roedel 已提交
4590
			pr_warn("Failed to build identity map for [%llx-%llx]\n",
4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603
				start, end);
			return NOTIFY_BAD;
		}
		break;

	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
		start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
		last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
		while (start_vpfn <= last_vpfn) {
			struct iova *iova;
			struct dmar_drhd_unit *drhd;
			struct intel_iommu *iommu;
4604
			struct page *freelist;
4605 4606 4607

			iova = find_iova(&si_domain->iovad, start_vpfn);
			if (iova == NULL) {
J
Joerg Roedel 已提交
4608
				pr_debug("Failed get IOVA for PFN %lx\n",
4609 4610 4611 4612 4613 4614 4615
					 start_vpfn);
				break;
			}

			iova = split_and_remove_iova(&si_domain->iovad, iova,
						     start_vpfn, last_vpfn);
			if (iova == NULL) {
J
Joerg Roedel 已提交
4616
				pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4617 4618 4619 4620
					start_vpfn, last_vpfn);
				return NOTIFY_BAD;
			}

4621 4622 4623
			freelist = domain_unmap(si_domain, iova->pfn_lo,
					       iova->pfn_hi);

4624 4625
			rcu_read_lock();
			for_each_active_iommu(iommu, drhd)
4626
				iommu_flush_iotlb_psi(iommu, si_domain,
4627
					iova->pfn_lo, iova_size(iova),
4628
					!freelist, 0);
4629
			rcu_read_unlock();
4630
			dma_free_pagelist(freelist);
4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645

			start_vpfn = iova->pfn_hi + 1;
			free_iova_mem(iova);
		}
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block intel_iommu_memory_nb = {
	.notifier_call = intel_iommu_memory_notifier,
	.priority = 0
};

4646 4647 4648 4649 4650 4651 4652
static void free_all_cpu_cached_iovas(unsigned int cpu)
{
	int i;

	for (i = 0; i < g_num_of_iommus; i++) {
		struct intel_iommu *iommu = g_iommus[i];
		struct dmar_domain *domain;
4653
		int did;
4654 4655 4656 4657

		if (!iommu)
			continue;

4658
		for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4659
			domain = get_iommu_domain(iommu, (u16)did);
4660 4661 4662 4663 4664 4665 4666 4667

			if (!domain)
				continue;
			free_cpu_cached_iovas(cpu, &domain->iovad);
		}
	}
}

4668 4669 4670 4671 4672 4673 4674 4675
static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
				    unsigned long action, void *v)
{
	unsigned int cpu = (unsigned long)v;

	switch (action) {
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
4676
		free_all_cpu_cached_iovas(cpu);
4677 4678 4679 4680 4681 4682 4683 4684 4685
		flush_unmaps_timeout(cpu);
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block intel_iommu_cpu_nb = {
	.notifier_call = intel_iommu_cpu_notifier,
};
4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724

static ssize_t intel_iommu_show_version(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	u32 ver = readl(iommu->reg + DMAR_VER_REG);
	return sprintf(buf, "%d:%d\n",
		       DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
}
static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);

static ssize_t intel_iommu_show_address(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->reg_phys);
}
static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);

static ssize_t intel_iommu_show_cap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->cap);
}
static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);

static ssize_t intel_iommu_show_ecap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->ecap);
}
static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);

4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743
static ssize_t intel_iommu_show_ndoms(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
}
static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);

static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
						  cap_ndoms(iommu->cap)));
}
static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);

4744 4745 4746 4747 4748
static struct attribute *intel_iommu_attrs[] = {
	&dev_attr_version.attr,
	&dev_attr_address.attr,
	&dev_attr_cap.attr,
	&dev_attr_ecap.attr,
4749 4750
	&dev_attr_domains_supported.attr,
	&dev_attr_domains_used.attr,
4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
	NULL,
};

static struct attribute_group intel_iommu_group = {
	.name = "intel-iommu",
	.attrs = intel_iommu_attrs,
};

const struct attribute_group *intel_iommu_groups[] = {
	&intel_iommu_group,
	NULL,
};

4764 4765
int __init intel_iommu_init(void)
{
4766
	int ret = -ENODEV;
4767
	struct dmar_drhd_unit *drhd;
4768
	struct intel_iommu *iommu;
4769

4770 4771 4772
	/* VT-d is required for a TXT/tboot launch, so enforce that */
	force_on = tboot_force_iommu();

4773 4774 4775 4776 4777 4778 4779
	if (iommu_init_mempool()) {
		if (force_on)
			panic("tboot: Failed to initialize iommu memory\n");
		return -ENOMEM;
	}

	down_write(&dmar_global_lock);
4780 4781 4782
	if (dmar_table_init()) {
		if (force_on)
			panic("tboot: Failed to initialize DMAR table\n");
4783
		goto out_free_dmar;
4784
	}
4785

4786
	if (dmar_dev_scope_init() < 0) {
4787 4788
		if (force_on)
			panic("tboot: Failed to initialize DMAR device scope\n");
4789
		goto out_free_dmar;
4790
	}
4791

4792
	if (no_iommu || dmar_disabled)
4793
		goto out_free_dmar;
4794

4795
	if (list_empty(&dmar_rmrr_units))
J
Joerg Roedel 已提交
4796
		pr_info("No RMRR found\n");
4797 4798

	if (list_empty(&dmar_atsr_units))
J
Joerg Roedel 已提交
4799
		pr_info("No ATSR found\n");
4800

4801 4802 4803
	if (dmar_init_reserved_ranges()) {
		if (force_on)
			panic("tboot: Failed to reserve iommu ranges\n");
4804
		goto out_free_reserved_range;
4805
	}
4806 4807 4808

	init_no_remapping_devices();

4809
	ret = init_dmars();
4810
	if (ret) {
4811 4812
		if (force_on)
			panic("tboot: Failed to initialize DMARs\n");
J
Joerg Roedel 已提交
4813
		pr_err("Initialization failed\n");
4814
		goto out_free_reserved_range;
4815
	}
4816
	up_write(&dmar_global_lock);
J
Joerg Roedel 已提交
4817
	pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4818

4819 4820 4821
#ifdef CONFIG_SWIOTLB
	swiotlb = 0;
#endif
4822
	dma_ops = &intel_dma_ops;
F
Fenghua Yu 已提交
4823

4824
	init_iommu_pm_ops();
4825

4826 4827 4828
	for_each_active_iommu(iommu, drhd)
		iommu->iommu_dev = iommu_device_create(NULL, iommu,
						       intel_iommu_groups,
4829
						       "%s", iommu->name);
4830

4831
	bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
F
Fenghua Yu 已提交
4832
	bus_register_notifier(&pci_bus_type, &device_nb);
4833 4834
	if (si_domain && !hw_pass_through)
		register_memory_notifier(&intel_iommu_memory_nb);
4835
	register_hotcpu_notifier(&intel_iommu_cpu_nb);
F
Fenghua Yu 已提交
4836

4837 4838
	intel_iommu_enabled = 1;

4839
	return 0;
4840 4841 4842 4843 4844

out_free_reserved_range:
	put_iova_domain(&reserved_iova_list);
out_free_dmar:
	intel_iommu_free_dmars();
4845 4846
	up_write(&dmar_global_lock);
	iommu_exit_mempool();
4847
	return ret;
4848
}
4849

4850
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4851 4852 4853
{
	struct intel_iommu *iommu = opaque;

4854
	domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4855 4856 4857 4858 4859 4860 4861 4862 4863
	return 0;
}

/*
 * NB - intel-iommu lacks any sort of reference counting for the users of
 * dependent devices.  If multiple endpoints have intersecting dependent
 * devices, unbinding the driver from any one of them will possibly leave
 * the others unable to operate.
 */
4864
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4865
{
4866
	if (!iommu || !dev || !dev_is_pci(dev))
4867 4868
		return;

4869
	pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4870 4871
}

4872
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4873 4874 4875 4876
{
	struct intel_iommu *iommu;
	unsigned long flags;

4877 4878
	assert_spin_locked(&device_domain_lock);

4879
	if (WARN_ON(!info))
4880 4881
		return;

4882
	iommu = info->iommu;
4883

4884 4885 4886 4887
	if (info->dev) {
		iommu_disable_dev_iotlb(info);
		domain_context_clear(iommu, info->dev);
	}
4888

4889
	unlink_domain_info(info);
4890

4891
	spin_lock_irqsave(&iommu->lock, flags);
4892
	domain_detach_iommu(info->domain, iommu);
4893
	spin_unlock_irqrestore(&iommu->lock, flags);
4894

4895
	free_devinfo_mem(info);
4896 4897
}

4898 4899 4900
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
				     struct device *dev)
{
4901
	struct device_domain_info *info;
4902
	unsigned long flags;
4903

4904
	spin_lock_irqsave(&device_domain_lock, flags);
4905 4906
	info = dev->archdata.iommu;
	__dmar_remove_one_dev_info(info);
4907
	spin_unlock_irqrestore(&device_domain_lock, flags);
4908 4909
}

4910
static int md_domain_init(struct dmar_domain *domain, int guest_width)
4911 4912 4913
{
	int adjust_width;

4914 4915
	init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
			DMA_32BIT_PFN);
4916 4917 4918 4919 4920 4921 4922 4923
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	domain->agaw = width_to_agaw(adjust_width);

	domain->iommu_coherency = 0;
4924
	domain->iommu_snooping = 0;
4925
	domain->iommu_superpage = 0;
4926
	domain->max_addr = 0;
4927 4928

	/* always allocate the top pgd */
4929
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4930 4931 4932 4933 4934 4935
	if (!domain->pgd)
		return -ENOMEM;
	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
	return 0;
}

4936
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
K
Kay, Allen M 已提交
4937
{
4938
	struct dmar_domain *dmar_domain;
4939 4940 4941 4942
	struct iommu_domain *domain;

	if (type != IOMMU_DOMAIN_UNMANAGED)
		return NULL;
K
Kay, Allen M 已提交
4943

4944
	dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4945
	if (!dmar_domain) {
J
Joerg Roedel 已提交
4946
		pr_err("Can't allocate dmar_domain\n");
4947
		return NULL;
K
Kay, Allen M 已提交
4948
	}
4949
	if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
J
Joerg Roedel 已提交
4950
		pr_err("Domain initialization failed\n");
4951
		domain_exit(dmar_domain);
4952
		return NULL;
K
Kay, Allen M 已提交
4953
	}
4954
	domain_update_iommu_cap(dmar_domain);
4955

4956
	domain = &dmar_domain->domain;
4957 4958 4959 4960
	domain->geometry.aperture_start = 0;
	domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
	domain->geometry.force_aperture = true;

4961
	return domain;
K
Kay, Allen M 已提交
4962 4963
}

4964
static void intel_iommu_domain_free(struct iommu_domain *domain)
K
Kay, Allen M 已提交
4965
{
4966
	domain_exit(to_dmar_domain(domain));
K
Kay, Allen M 已提交
4967 4968
}

4969 4970
static int intel_iommu_attach_device(struct iommu_domain *domain,
				     struct device *dev)
K
Kay, Allen M 已提交
4971
{
4972
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4973 4974
	struct intel_iommu *iommu;
	int addr_width;
4975
	u8 bus, devfn;
4976

4977 4978 4979 4980 4981
	if (device_is_rmrr_locked(dev)) {
		dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
		return -EPERM;
	}

4982 4983
	/* normally dev is not mapped */
	if (unlikely(domain_context_mapped(dev))) {
4984 4985
		struct dmar_domain *old_domain;

4986
		old_domain = find_domain(dev);
4987
		if (old_domain) {
4988
			rcu_read_lock();
4989
			dmar_remove_one_dev_info(old_domain, dev);
4990
			rcu_read_unlock();
4991 4992 4993 4994

			if (!domain_type_is_vm_or_si(old_domain) &&
			     list_empty(&old_domain->devices))
				domain_exit(old_domain);
4995 4996 4997
		}
	}

4998
	iommu = device_to_iommu(dev, &bus, &devfn);
4999 5000 5001 5002 5003
	if (!iommu)
		return -ENODEV;

	/* check if this iommu agaw is sufficient for max mapped address */
	addr_width = agaw_to_width(iommu->agaw);
5004 5005 5006 5007
	if (addr_width > cap_mgaw(iommu->cap))
		addr_width = cap_mgaw(iommu->cap);

	if (dmar_domain->max_addr > (1LL << addr_width)) {
J
Joerg Roedel 已提交
5008
		pr_err("%s: iommu width (%d) is not "
5009
		       "sufficient for the mapped address (%llx)\n",
5010
		       __func__, addr_width, dmar_domain->max_addr);
5011 5012
		return -EFAULT;
	}
5013 5014 5015 5016 5017 5018 5019 5020 5021 5022
	dmar_domain->gaw = addr_width;

	/*
	 * Knock out extra levels of page tables if necessary
	 */
	while (iommu->agaw < dmar_domain->agaw) {
		struct dma_pte *pte;

		pte = dmar_domain->pgd;
		if (dma_pte_present(pte)) {
5023 5024
			dmar_domain->pgd = (struct dma_pte *)
				phys_to_virt(dma_pte_addr(pte));
5025
			free_pgtable_page(pte);
5026 5027 5028
		}
		dmar_domain->agaw--;
	}
5029

5030
	return domain_add_dev_info(dmar_domain, dev);
K
Kay, Allen M 已提交
5031 5032
}

5033 5034
static void intel_iommu_detach_device(struct iommu_domain *domain,
				      struct device *dev)
K
Kay, Allen M 已提交
5035
{
5036
	dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5037
}
5038

5039 5040
static int intel_iommu_map(struct iommu_domain *domain,
			   unsigned long iova, phys_addr_t hpa,
5041
			   size_t size, int iommu_prot)
5042
{
5043
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5044
	u64 max_addr;
5045
	int prot = 0;
5046
	int ret;
5047

5048 5049 5050 5051
	if (iommu_prot & IOMMU_READ)
		prot |= DMA_PTE_READ;
	if (iommu_prot & IOMMU_WRITE)
		prot |= DMA_PTE_WRITE;
5052 5053
	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
		prot |= DMA_PTE_SNP;
5054

5055
	max_addr = iova + size;
5056
	if (dmar_domain->max_addr < max_addr) {
5057 5058 5059
		u64 end;

		/* check if minimum agaw is sufficient for mapped address */
5060
		end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5061
		if (end < max_addr) {
J
Joerg Roedel 已提交
5062
			pr_err("%s: iommu width (%d) is not "
5063
			       "sufficient for the mapped address (%llx)\n",
5064
			       __func__, dmar_domain->gaw, max_addr);
5065 5066
			return -EFAULT;
		}
5067
		dmar_domain->max_addr = max_addr;
5068
	}
5069 5070
	/* Round up size to next multiple of PAGE_SIZE, if it and
	   the low bits of hpa would take us onto the next page */
5071
	size = aligned_nrpages(hpa, size);
5072 5073
	ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
				 hpa >> VTD_PAGE_SHIFT, size, prot);
5074
	return ret;
K
Kay, Allen M 已提交
5075 5076
}

5077
static size_t intel_iommu_unmap(struct iommu_domain *domain,
5078
				unsigned long iova, size_t size)
K
Kay, Allen M 已提交
5079
{
5080
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5081 5082 5083 5084
	struct page *freelist = NULL;
	struct intel_iommu *iommu;
	unsigned long start_pfn, last_pfn;
	unsigned int npages;
5085
	int iommu_id, level = 0;
5086 5087 5088

	/* Cope with horrid API which requires us to unmap more than the
	   size argument if it happens to be a large-page mapping. */
5089
	BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5090 5091 5092

	if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
		size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5093

5094 5095 5096 5097 5098 5099 5100
	start_pfn = iova >> VTD_PAGE_SHIFT;
	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;

	freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);

	npages = last_pfn - start_pfn + 1;

5101
	for_each_domain_iommu(iommu_id, dmar_domain) {
5102
		iommu = g_iommus[iommu_id];
5103

5104 5105
		iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
				      start_pfn, npages, !freelist, 0);
5106 5107 5108
	}

	dma_free_pagelist(freelist);
5109

5110 5111
	if (dmar_domain->max_addr == iova + size)
		dmar_domain->max_addr = iova;
5112

5113
	return size;
K
Kay, Allen M 已提交
5114 5115
}

5116
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5117
					    dma_addr_t iova)
K
Kay, Allen M 已提交
5118
{
5119
	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
K
Kay, Allen M 已提交
5120
	struct dma_pte *pte;
5121
	int level = 0;
5122
	u64 phys = 0;
K
Kay, Allen M 已提交
5123

5124
	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
K
Kay, Allen M 已提交
5125
	if (pte)
5126
		phys = dma_pte_addr(pte);
K
Kay, Allen M 已提交
5127

5128
	return phys;
K
Kay, Allen M 已提交
5129
}
5130

5131
static bool intel_iommu_capable(enum iommu_cap cap)
S
Sheng Yang 已提交
5132 5133
{
	if (cap == IOMMU_CAP_CACHE_COHERENCY)
5134
		return domain_update_iommu_snooping(NULL) == 1;
5135
	if (cap == IOMMU_CAP_INTR_REMAP)
5136
		return irq_remapping_enabled == 1;
S
Sheng Yang 已提交
5137

5138
	return false;
S
Sheng Yang 已提交
5139 5140
}

5141 5142
static int intel_iommu_add_device(struct device *dev)
{
5143
	struct intel_iommu *iommu;
5144
	struct iommu_group *group;
5145
	u8 bus, devfn;
5146

5147 5148
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
5149 5150
		return -ENODEV;

5151
	iommu_device_link(iommu->iommu_dev, dev);
5152

5153
	group = iommu_group_get_for_dev(dev);
5154

5155 5156
	if (IS_ERR(group))
		return PTR_ERR(group);
5157

5158
	iommu_group_put(group);
5159
	return 0;
5160
}
5161

5162 5163
static void intel_iommu_remove_device(struct device *dev)
{
5164 5165 5166 5167 5168 5169 5170
	struct intel_iommu *iommu;
	u8 bus, devfn;

	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return;

5171
	iommu_group_remove_device(dev);
5172 5173

	iommu_device_unlink(iommu->iommu_dev, dev);
5174 5175
}

5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227
#ifdef CONFIG_INTEL_IOMMU_SVM
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{
	struct device_domain_info *info;
	struct context_entry *context;
	struct dmar_domain *domain;
	unsigned long flags;
	u64 ctx_lo;
	int ret;

	domain = get_valid_domain_for_dev(sdev->dev);
	if (!domain)
		return -EINVAL;

	spin_lock_irqsave(&device_domain_lock, flags);
	spin_lock(&iommu->lock);

	ret = -EINVAL;
	info = sdev->dev->archdata.iommu;
	if (!info || !info->pasid_supported)
		goto out;

	context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
	if (WARN_ON(!context))
		goto out;

	ctx_lo = context[0].lo;

	sdev->did = domain->iommu_did[iommu->seq_id];
	sdev->sid = PCI_DEVID(info->bus, info->devfn);

	if (!(ctx_lo & CONTEXT_PASIDE)) {
		context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
		context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
		wmb();
		/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
		 * extended to permit requests-with-PASID if the PASIDE bit
		 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
		 * however, the PASIDE bit is ignored and requests-with-PASID
		 * are unconditionally blocked. Which makes less sense.
		 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
		 * "guest mode" translation types depending on whether ATS
		 * is available or not. Annoyingly, we can't use the new
		 * modes *unless* PASIDE is set. */
		if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
			ctx_lo &= ~CONTEXT_TT_MASK;
			if (info->ats_supported)
				ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
			else
				ctx_lo |= CONTEXT_TT_PT_PASID << 2;
		}
		ctx_lo |= CONTEXT_PASIDE;
5228 5229
		if (iommu->pasid_state_table)
			ctx_lo |= CONTEXT_DINVE;
5230 5231
		if (info->pri_supported)
			ctx_lo |= CONTEXT_PRS;
5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270
		context[0].lo = ctx_lo;
		wmb();
		iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
					   DMA_CCMD_MASK_NOBIT,
					   DMA_CCMD_DEVICE_INVL);
	}

	/* Enable PASID support in the device, if it wasn't already */
	if (!info->pasid_enabled)
		iommu_enable_dev_iotlb(info);

	if (info->ats_enabled) {
		sdev->dev_iotlb = 1;
		sdev->qdep = info->ats_qdep;
		if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
			sdev->qdep = 0;
	}
	ret = 0;

 out:
	spin_unlock(&iommu->lock);
	spin_unlock_irqrestore(&device_domain_lock, flags);

	return ret;
}

struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
	struct intel_iommu *iommu;
	u8 bus, devfn;

	if (iommu_dummy(dev)) {
		dev_warn(dev,
			 "No IOMMU translation for device; cannot enable SVM\n");
		return NULL;
	}

	iommu = device_to_iommu(dev, &bus, &devfn);
	if ((!iommu)) {
5271
		dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5272 5273 5274 5275
		return NULL;
	}

	if (!iommu->pasid_table) {
5276
		dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5277 5278 5279 5280 5281 5282 5283
		return NULL;
	}

	return iommu;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */

5284
static const struct iommu_ops intel_iommu_ops = {
5285
	.capable	= intel_iommu_capable,
5286 5287
	.domain_alloc	= intel_iommu_domain_alloc,
	.domain_free	= intel_iommu_domain_free,
5288 5289
	.attach_dev	= intel_iommu_attach_device,
	.detach_dev	= intel_iommu_detach_device,
5290 5291
	.map		= intel_iommu_map,
	.unmap		= intel_iommu_unmap,
O
Olav Haugan 已提交
5292
	.map_sg		= default_iommu_map_sg,
5293
	.iova_to_phys	= intel_iommu_iova_to_phys,
5294 5295
	.add_device	= intel_iommu_add_device,
	.remove_device	= intel_iommu_remove_device,
5296
	.device_group   = pci_device_group,
5297
	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
5298
};
5299

5300 5301 5302
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{
	/* G4x/GM45 integrated gfx dmar support is totally busted. */
J
Joerg Roedel 已提交
5303
	pr_info("Disabling IOMMU for graphics on this chipset\n");
5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314
	dmar_map_gfx = 0;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);

5315
static void quirk_iommu_rwbf(struct pci_dev *dev)
5316 5317 5318
{
	/*
	 * Mobile 4 Series Chipset neglects to set RWBF capability,
5319
	 * but needs it. Same seems to hold for the desktop versions.
5320
	 */
J
Joerg Roedel 已提交
5321
	pr_info("Forcing write-buffer flush capability\n");
5322 5323 5324 5325
	rwbf_quirk = 1;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5326 5327 5328 5329 5330 5331
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5332

5333 5334 5335 5336 5337 5338 5339 5340 5341 5342
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK	(0xf << 8)
#define GGC_MEMORY_SIZE_NONE	(0x0 << 8)
#define GGC_MEMORY_SIZE_1M	(0x1 << 8)
#define GGC_MEMORY_SIZE_2M	(0x3 << 8)
#define GGC_MEMORY_VT_ENABLED	(0x8 << 8)
#define GGC_MEMORY_SIZE_2M_VT	(0x9 << 8)
#define GGC_MEMORY_SIZE_3M_VT	(0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT	(0xb << 8)

5343
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5344 5345 5346
{
	unsigned short ggc;

5347
	if (pci_read_config_word(dev, GGC, &ggc))
5348 5349
		return;

5350
	if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
J
Joerg Roedel 已提交
5351
		pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5352
		dmar_map_gfx = 0;
5353 5354
	} else if (dmar_map_gfx) {
		/* we have to ensure the gfx device is idle before we flush */
J
Joerg Roedel 已提交
5355
		pr_info("Disabling batched IOTLB flush on Ironlake\n");
5356 5357
		intel_iommu_strict = 1;
       }
5358 5359 5360 5361 5362 5363
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416
/* On Tylersburg chipsets, some BIOSes have been known to enable the
   ISOCH DMAR unit for the Azalia sound device, but not give it any
   TLB entries, which causes it to deadlock. Check for that.  We do
   this in a function called from init_dmars(), instead of in a PCI
   quirk, because we don't want to print the obnoxious "BIOS broken"
   message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{
	struct pci_dev *pdev;
	uint32_t vtisochctrl;

	/* If there's no Azalia in the system anyway, forget it. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
	if (!pdev)
		return;
	pci_dev_put(pdev);

	/* System Management Registers. Might be hidden, in which case
	   we can't do the sanity check. But that's OK, because the
	   known-broken BIOSes _don't_ actually hide it, so far. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
	if (!pdev)
		return;

	if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
		pci_dev_put(pdev);
		return;
	}

	pci_dev_put(pdev);

	/* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
	if (vtisochctrl & 1)
		return;

	/* Drop all bits other than the number of TLB entries */
	vtisochctrl &= 0x1c;

	/* If we have the recommended number of TLB entries (16), fine. */
	if (vtisochctrl == 0x10)
		return;

	/* Zero TLB entries? You get to ride the short bus to school. */
	if (!vtisochctrl) {
		WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		iommu_identity_mapping |= IDENTMAP_AZALIA;
		return;
	}
J
Joerg Roedel 已提交
5417 5418

	pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5419 5420
	       vtisochctrl);
}