intel-iommu.c 115.8 KB
Newer Older
1
/*
2
 * Copyright © 2006-2014 Intel Corporation.
3 4 5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
13 14 15 16 17
 * Authors: David Woodhouse <dwmw2@infradead.org>,
 *          Ashok Raj <ashok.raj@intel.com>,
 *          Shaohua Li <shaohua.li@intel.com>,
 *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
 *          Fenghua Yu <fenghua.yu@intel.com>
18 19 20 21
 */

#include <linux/init.h>
#include <linux/bitmap.h>
M
mark gross 已提交
22
#include <linux/debugfs.h>
23
#include <linux/export.h>
24 25 26 27 28 29 30 31
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
32
#include <linux/memory.h>
M
mark gross 已提交
33
#include <linux/timer.h>
K
Kay, Allen M 已提交
34
#include <linux/iova.h>
35
#include <linux/iommu.h>
K
Kay, Allen M 已提交
36
#include <linux/intel-iommu.h>
37
#include <linux/syscore_ops.h>
38
#include <linux/tboot.h>
39
#include <linux/dmi.h>
40
#include <linux/pci-ats.h>
T
Tejun Heo 已提交
41
#include <linux/memblock.h>
A
Akinobu Mita 已提交
42
#include <linux/dma-contiguous.h>
43
#include <asm/irq_remapping.h>
44
#include <asm/cacheflush.h>
45
#include <asm/iommu.h>
46

47 48
#include "irq_remapping.h"

F
Fenghua Yu 已提交
49 50 51
#define ROOT_SIZE		VTD_PAGE_SIZE
#define CONTEXT_SIZE		VTD_PAGE_SIZE

52 53
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55 56 57 58 59 60 61

#define IOAPIC_RANGE_START	(0xfee00000)
#define IOAPIC_RANGE_END	(0xfeefffff)
#define IOVA_START_ADDR		(0x1000)

#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48

F
Fenghua Yu 已提交
62
#define MAX_AGAW_WIDTH 64
63
#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
F
Fenghua Yu 已提交
64

65 66 67 68 69 70 71 72
#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)

/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw)	((unsigned long) min_t(uint64_t, \
				__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw)	(((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73

74
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
75
#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))
76
#define DMA_64BIT_PFN		IOVA_PFN(DMA_BIT_MASK(64))
M
mark gross 已提交
77

78 79 80 81
/* page table handling */
#define LEVEL_STRIDE		(9)
#define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */
#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)

100 101 102 103 104 105 106
static inline int agaw_to_level(int agaw)
{
	return agaw + 2;
}

static inline int agaw_to_width(int agaw)
{
107
	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
108 109 110 111
}

static inline int width_to_agaw(int width)
{
112
	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
}

static inline unsigned int level_to_offset_bits(int level)
{
	return (level - 1) * LEVEL_STRIDE;
}

static inline int pfn_level_offset(unsigned long pfn, int level)
{
	return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}

static inline unsigned long level_mask(int level)
{
	return -1UL << level_to_offset_bits(level);
}

static inline unsigned long level_size(int level)
{
	return 1UL << level_to_offset_bits(level);
}

static inline unsigned long align_to_level(unsigned long pfn, int level)
{
	return (pfn + level_size(level) - 1) & level_mask(level);
}
139

140 141
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
142
	return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
143 144
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
   are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
{
	return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
}

static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
	return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
	return mm_to_dma_pfn(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
	return page_to_dma_pfn(virt_to_page(p));
}

W
Weidong Han 已提交
165 166 167
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;

168
static void __init check_tylersburg_isoch(void);
169 170
static int rwbf_quirk;

171 172 173 174 175 176
/*
 * set to 1 to panic kernel if can't successfully enable VT-d
 * (used when kernel is launched w/ TXT)
 */
static int force_on = 0;

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
/*
 * 0: Present
 * 1-11: Reserved
 * 12-63: Context Ptr (12 - (haw-1))
 * 64-127: Reserved
 */
struct root_entry {
	u64	val;
	u64	rsvd1;
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
static inline bool root_present(struct root_entry *root)
{
	return (root->val & 1);
}
static inline void set_root_present(struct root_entry *root)
{
	root->val |= 1;
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
	root->val |= value & VTD_PAGE_MASK;
}

static inline struct context_entry *
get_context_addr_from_root(struct root_entry *root)
{
	return (struct context_entry *)
		(root_present(root)?phys_to_virt(
		root->val & VTD_PAGE_MASK) :
		NULL);
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
/*
 * low 64 bits:
 * 0: present
 * 1: fault processing disable
 * 2-3: translation type
 * 12-63: address space root
 * high 64 bits:
 * 0-2: address width
 * 3-6: aval
 * 8-23: domain id
 */
struct context_entry {
	u64 lo;
	u64 hi;
};
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269

static inline bool context_present(struct context_entry *context)
{
	return (context->lo & 1);
}
static inline void context_set_present(struct context_entry *context)
{
	context->lo |= 1;
}

static inline void context_set_fault_enable(struct context_entry *context)
{
	context->lo &= (((u64)-1) << 2) | 1;
}

static inline void context_set_translation_type(struct context_entry *context,
						unsigned long value)
{
	context->lo &= (((u64)-1) << 4) | 3;
	context->lo |= (value & 3) << 2;
}

static inline void context_set_address_root(struct context_entry *context,
					    unsigned long value)
{
	context->lo |= value & VTD_PAGE_MASK;
}

static inline void context_set_address_width(struct context_entry *context,
					     unsigned long value)
{
	context->hi |= value & 7;
}

static inline void context_set_domain_id(struct context_entry *context,
					 unsigned long value)
{
	context->hi |= (value & ((1 << 16) - 1)) << 8;
}

static inline void context_clear_entry(struct context_entry *context)
{
	context->lo = 0;
	context->hi = 0;
}
270

271 272 273 274 275
/*
 * 0: readable
 * 1: writable
 * 2-6: reserved
 * 7: super page
276 277
 * 8-10: available
 * 11: snoop behavior
278 279 280 281 282 283
 * 12-63: Host physcial address
 */
struct dma_pte {
	u64 val;
};

284 285 286 287 288 289 290
static inline void dma_clear_pte(struct dma_pte *pte)
{
	pte->val = 0;
}

static inline u64 dma_pte_addr(struct dma_pte *pte)
{
291 292 293 294
#ifdef CONFIG_64BIT
	return pte->val & VTD_PAGE_MASK;
#else
	/* Must have a full atomic 64-bit read */
295
	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
296
#endif
297 298 299 300 301 302
}

static inline bool dma_pte_present(struct dma_pte *pte)
{
	return (pte->val & 3) != 0;
}
303

304 305 306 307 308
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
	return (pte->val & (1 << 7));
}

309 310 311 312 313
static inline int first_pte_in_page(struct dma_pte *pte)
{
	return !((unsigned long)pte & ~VTD_PAGE_MASK);
}

314 315 316 317 318 319
/*
 * This domain is a statically identity mapping domain.
 *	1. This domain creats a static 1:1 mapping to all usable memory.
 * 	2. It maps to each iommu if successful.
 *	3. Each iommu mapps to this domain if successful.
 */
320 321
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
322

W
Weidong Han 已提交
323
/* devices under the same p2p bridge are owned in one domain */
324
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
W
Weidong Han 已提交
325

326 327 328 329 330
/* domain represents a virtual machine, more than one devices
 * across iommus may be owned in one domain, e.g. kvm guest.
 */
#define DOMAIN_FLAG_VIRTUAL_MACHINE	(1 << 1)

331 332 333
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY	(1 << 2)

334 335 336 337 338 339 340
/* define the limit of IOMMUs supported in each domain */
#ifdef	CONFIG_X86
# define	IOMMU_UNITS_SUPPORTED	MAX_IO_APICS
#else
# define	IOMMU_UNITS_SUPPORTED	64
#endif

341 342
struct dmar_domain {
	int	id;			/* domain id */
343
	int	nid;			/* node id */
344 345
	DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
					/* bitmap of iommus this domain uses*/
346 347 348 349 350 351 352 353 354 355

	struct list_head devices; 	/* all devices' list */
	struct iova_domain iovad;	/* iova's that belong to this domain */

	struct dma_pte	*pgd;		/* virtual address */
	int		gaw;		/* max guest address width */

	/* adjusted guest address width, 0 is level 2 30-bit */
	int		agaw;

W
Weidong Han 已提交
356
	int		flags;		/* flags to find out type of domain */
W
Weidong Han 已提交
357 358

	int		iommu_coherency;/* indicate coherency of iommu access */
359
	int		iommu_snooping; /* indicate snooping control feature*/
360
	int		iommu_count;	/* reference count of iommu */
361 362 363
	int		iommu_superpage;/* Level of superpages supported:
					   0 == 4KiB (no superpages), 1 == 2MiB,
					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
364
	spinlock_t	iommu_lock;	/* protect iommu set in domain */
365
	u64		max_addr;	/* maximum mapped address */
366 367
};

368 369 370 371
/* PCI domain-device relationship */
struct device_domain_info {
	struct list_head link;	/* link to domain siblings */
	struct list_head global; /* link to global list */
372
	u8 bus;			/* PCI bus number */
373
	u8 devfn;		/* PCI devfn number */
374
	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Y
Yu Zhao 已提交
375
	struct intel_iommu *iommu; /* IOMMU used by this device */
376 377 378
	struct dmar_domain *domain; /* pointer to domain */
};

379 380 381 382 383
struct dmar_rmrr_unit {
	struct list_head list;		/* list of rmrr units	*/
	struct acpi_dmar_header *hdr;	/* ACPI header		*/
	u64	base_address;		/* reserved base address*/
	u64	end_address;		/* reserved end address */
384
	struct dmar_dev_scope *devices;	/* target devices */
385 386 387 388 389 390
	int	devices_cnt;		/* target device count */
};

struct dmar_atsr_unit {
	struct list_head list;		/* list of ATSR units */
	struct acpi_dmar_header *hdr;	/* ACPI header */
391
	struct dmar_dev_scope *devices;	/* target devices */
392 393 394 395 396 397 398 399 400 401
	int devices_cnt;		/* target device count */
	u8 include_all:1;		/* include all ports */
};

static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);

#define for_each_rmrr_units(rmrr) \
	list_for_each_entry(rmrr, &dmar_rmrr_units, list)

M
mark gross 已提交
402 403
static void flush_unmaps_timeout(unsigned long data);

404
static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
M
mark gross 已提交
405

406 407 408 409 410
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
	int next;
	struct iova *iova[HIGH_WATER_MARK];
	struct dmar_domain *domain[HIGH_WATER_MARK];
411
	struct page *freelist[HIGH_WATER_MARK];
412 413 414 415
};

static struct deferred_flush_tables *deferred_flush;

M
mark gross 已提交
416 417 418 419 420 421 422 423 424
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;

static DEFINE_SPINLOCK(async_umap_flush_lock);
static LIST_HEAD(unmaps_to_do);

static int timer_on;
static long list_size;

425
static void domain_exit(struct dmar_domain *domain);
426
static void domain_remove_dev_info(struct dmar_domain *domain);
427
static void domain_remove_one_dev_info(struct dmar_domain *domain,
428
				       struct device *dev);
429
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
430
					   struct device *dev);
431

432
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
433 434 435
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
436
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
437

438 439 440
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);

441
static int dmar_map_gfx = 1;
442
static int dmar_forcedac;
M
mark gross 已提交
443
static int intel_iommu_strict;
444
static int intel_iommu_superpage = 1;
445

446 447 448
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);

449 450 451 452
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);

453 454
static struct iommu_ops intel_iommu_ops;

455 456 457 458 459
static int __init intel_iommu_setup(char *str)
{
	if (!str)
		return -EINVAL;
	while (*str) {
460 461 462 463
		if (!strncmp(str, "on", 2)) {
			dmar_disabled = 0;
			printk(KERN_INFO "Intel-IOMMU: enabled\n");
		} else if (!strncmp(str, "off", 3)) {
464
			dmar_disabled = 1;
465
			printk(KERN_INFO "Intel-IOMMU: disabled\n");
466 467 468 469
		} else if (!strncmp(str, "igfx_off", 8)) {
			dmar_map_gfx = 0;
			printk(KERN_INFO
				"Intel-IOMMU: disable GFX device mapping\n");
470
		} else if (!strncmp(str, "forcedac", 8)) {
M
mark gross 已提交
471
			printk(KERN_INFO
472 473
				"Intel-IOMMU: Forcing DAC for PCI devices\n");
			dmar_forcedac = 1;
M
mark gross 已提交
474 475 476 477
		} else if (!strncmp(str, "strict", 6)) {
			printk(KERN_INFO
				"Intel-IOMMU: disable batched IOTLB flush\n");
			intel_iommu_strict = 1;
478 479 480 481
		} else if (!strncmp(str, "sp_off", 6)) {
			printk(KERN_INFO
				"Intel-IOMMU: disable supported super page\n");
			intel_iommu_superpage = 0;
482 483 484 485 486 487 488 489 490 491 492 493 494 495
		}

		str += strcspn(str, ",");
		while (*str == ',')
			str++;
	}
	return 0;
}
__setup("intel_iommu=", intel_iommu_setup);

static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static struct kmem_cache *iommu_iova_cache;

496
static inline void *alloc_pgtable_page(int node)
497
{
498 499
	struct page *page;
	void *vaddr = NULL;
500

501 502 503
	page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
	if (page)
		vaddr = page_address(page);
504
	return vaddr;
505 506 507 508 509 510 511 512 513
}

static inline void free_pgtable_page(void *vaddr)
{
	free_page((unsigned long)vaddr);
}

static inline void *alloc_domain_mem(void)
{
514
	return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
515 516
}

K
Kay, Allen M 已提交
517
static void free_domain_mem(void *vaddr)
518 519 520 521 522 523
{
	kmem_cache_free(iommu_domain_cache, vaddr);
}

static inline void * alloc_devinfo_mem(void)
{
524
	return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
525 526 527 528 529 530 531 532 533
}

static inline void free_devinfo_mem(void *vaddr)
{
	kmem_cache_free(iommu_devinfo_cache, vaddr);
}

struct iova *alloc_iova_mem(void)
{
534
	return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
535 536 537 538 539 540 541
}

void free_iova_mem(struct iova *iova)
{
	kmem_cache_free(iommu_iova_cache, iova);
}

W
Weidong Han 已提交
542

F
Fenghua Yu 已提交
543
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
W
Weidong Han 已提交
544 545 546 547 548
{
	unsigned long sagaw;
	int agaw = -1;

	sagaw = cap_sagaw(iommu->cap);
F
Fenghua Yu 已提交
549
	for (agaw = width_to_agaw(max_gaw);
W
Weidong Han 已提交
550 551 552 553 554 555 556 557
	     agaw >= 0; agaw--) {
		if (test_bit(agaw, &sagaw))
			break;
	}

	return agaw;
}

F
Fenghua Yu 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
/*
 * Calculate max SAGAW for each iommu.
 */
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}

/*
 * calculate agaw for each iommu.
 * "SAGAW" may be different across iommus, use a default agaw, and
 * get a supported less agaw for iommus that don't support the default agaw.
 */
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
	return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}

576
/* This functionin only returns single iommu in a domain */
577 578 579 580
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
	int iommu_id;

581
	/* si_domain and vm domain should not get here. */
582
	BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
583
	BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
584

585
	iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
586 587 588 589 590 591
	if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
		return NULL;

	return g_iommus[iommu_id];
}

W
Weidong Han 已提交
592 593
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
594 595 596
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	int i, found = 0;
597

598
	domain->iommu_coherency = 1;
W
Weidong Han 已提交
599

600
	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
601
		found = 1;
W
Weidong Han 已提交
602 603 604 605 606
		if (!ecap_coherent(g_iommus[i]->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
607 608 609 610 611 612 613 614 615 616 617 618
	if (found)
		return;

	/* No hardware attached; use lowest common denominator */
	rcu_read_lock();
	for_each_active_iommu(iommu, drhd) {
		if (!ecap_coherent(iommu->ecap)) {
			domain->iommu_coherency = 0;
			break;
		}
	}
	rcu_read_unlock();
W
Weidong Han 已提交
619 620
}

621 622 623 624 625 626
static void domain_update_iommu_snooping(struct dmar_domain *domain)
{
	int i;

	domain->iommu_snooping = 1;

627
	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
628 629 630 631 632 633 634
		if (!ecap_sc_support(g_iommus[i]->ecap)) {
			domain->iommu_snooping = 0;
			break;
		}
	}
}

635 636
static void domain_update_iommu_superpage(struct dmar_domain *domain)
{
637 638 639
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	int mask = 0xf;
640 641 642 643 644 645

	if (!intel_iommu_superpage) {
		domain->iommu_superpage = 0;
		return;
	}

646
	/* set iommu_superpage to the smallest common denominator */
647
	rcu_read_lock();
648 649
	for_each_active_iommu(iommu, drhd) {
		mask &= cap_super_page_val(iommu->cap);
650 651 652 653
		if (!mask) {
			break;
		}
	}
654 655
	rcu_read_unlock();

656 657 658
	domain->iommu_superpage = fls(mask);
}

659 660 661 662 663
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
	domain_update_iommu_coherency(domain);
	domain_update_iommu_snooping(domain);
664
	domain_update_iommu_superpage(domain);
665 666
}

667
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
668 669
{
	struct dmar_drhd_unit *drhd = NULL;
670
	struct intel_iommu *iommu;
671 672 673
	struct device *tmp;
	struct pci_dev *ptmp, *pdev = NULL;
	u16 segment;
674 675
	int i;

676 677 678 679 680 681
	if (dev_is_pci(dev)) {
		pdev = to_pci_dev(dev);
		segment = pci_domain_nr(pdev->bus);
	} else if (ACPI_COMPANION(dev))
		dev = &ACPI_COMPANION(dev)->dev;

682
	rcu_read_lock();
683
	for_each_active_iommu(iommu, drhd) {
684
		if (pdev && segment != drhd->segment)
685
			continue;
686

687
		for_each_active_dev_scope(drhd->devices,
688 689 690 691
					  drhd->devices_cnt, i, tmp) {
			if (tmp == dev) {
				*bus = drhd->devices[i].bus;
				*devfn = drhd->devices[i].devfn;
692
				goto out;
693 694 695 696 697 698 699 700 701 702
			}

			if (!pdev || !dev_is_pci(tmp))
				continue;

			ptmp = to_pci_dev(tmp);
			if (ptmp->subordinate &&
			    ptmp->subordinate->number <= pdev->bus->number &&
			    ptmp->subordinate->busn_res.end >= pdev->bus->number)
				goto got_pdev;
703
		}
704

705 706 707 708
		if (pdev && drhd->include_all) {
		got_pdev:
			*bus = pdev->bus->number;
			*devfn = pdev->devfn;
709
			goto out;
710
		}
711
	}
712
	iommu = NULL;
713
 out:
714
	rcu_read_unlock();
715

716
	return iommu;
717 718
}

W
Weidong Han 已提交
719 720 721 722 723 724 725
static void domain_flush_cache(struct dmar_domain *domain,
			       void *addr, int size)
{
	if (!domain->iommu_coherency)
		clflush_cache_range(addr, size);
}

726 727 728 729 730 731 732 733 734 735 736 737 738
/* Gets context entry for a given bus and devfn */
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
		u8 bus, u8 devfn)
{
	struct root_entry *root;
	struct context_entry *context;
	unsigned long phy_addr;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
	root = &iommu->root_entry[bus];
	context = get_context_addr_from_root(root);
	if (!context) {
739 740
		context = (struct context_entry *)
				alloc_pgtable_page(iommu->node);
741 742 743 744
		if (!context) {
			spin_unlock_irqrestore(&iommu->lock, flags);
			return NULL;
		}
F
Fenghua Yu 已提交
745
		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
		phy_addr = virt_to_phys((void *)context);
		set_root_value(root, phy_addr);
		set_root_present(root);
		__iommu_flush_cache(iommu, root, sizeof(*root));
	}
	spin_unlock_irqrestore(&iommu->lock, flags);
	return &context[devfn];
}

static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct root_entry *root;
	struct context_entry *context;
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
	root = &iommu->root_entry[bus];
	context = get_context_addr_from_root(root);
	if (!context) {
		ret = 0;
		goto out;
	}
769
	ret = context_present(&context[devfn]);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
out:
	spin_unlock_irqrestore(&iommu->lock, flags);
	return ret;
}

static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
	struct root_entry *root;
	struct context_entry *context;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);
	root = &iommu->root_entry[bus];
	context = get_context_addr_from_root(root);
	if (context) {
785
		context_clear_entry(&context[devfn]);
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		__iommu_flush_cache(iommu, &context[devfn], \
			sizeof(*context));
	}
	spin_unlock_irqrestore(&iommu->lock, flags);
}

static void free_context_table(struct intel_iommu *iommu)
{
	struct root_entry *root;
	int i;
	unsigned long flags;
	struct context_entry *context;

	spin_lock_irqsave(&iommu->lock, flags);
	if (!iommu->root_entry) {
		goto out;
	}
	for (i = 0; i < ROOT_ENTRY_NR; i++) {
		root = &iommu->root_entry[i];
		context = get_context_addr_from_root(root);
		if (context)
			free_pgtable_page(context);
	}
	free_pgtable_page(iommu->root_entry);
	iommu->root_entry = NULL;
out:
	spin_unlock_irqrestore(&iommu->lock, flags);
}

815
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
816
				      unsigned long pfn, int *target_level)
817
{
818
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
819 820
	struct dma_pte *parent, *pte = NULL;
	int level = agaw_to_level(domain->agaw);
821
	int offset;
822 823

	BUG_ON(!domain->pgd);
824 825 826 827 828

	if (addr_width < BITS_PER_LONG && pfn >> addr_width)
		/* Address beyond IOMMU's addressing capabilities. */
		return NULL;

829 830
	parent = domain->pgd;

831
	while (1) {
832 833
		void *tmp_page;

834
		offset = pfn_level_offset(pfn, level);
835
		pte = &parent[offset];
836
		if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
837
			break;
838
		if (level == *target_level)
839 840
			break;

841
		if (!dma_pte_present(pte)) {
842 843
			uint64_t pteval;

844
			tmp_page = alloc_pgtable_page(domain->nid);
845

846
			if (!tmp_page)
847
				return NULL;
848

849
			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
850
			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
851 852 853 854 855 856 857
			if (cmpxchg64(&pte->val, 0ULL, pteval)) {
				/* Someone else set it while we were thinking; use theirs. */
				free_pgtable_page(tmp_page);
			} else {
				dma_pte_addr(pte);
				domain_flush_cache(domain, pte, sizeof(*pte));
			}
858
		}
859 860 861
		if (level == 1)
			break;

862
		parent = phys_to_virt(dma_pte_addr(pte));
863 864 865
		level--;
	}

866 867 868
	if (!*target_level)
		*target_level = level;

869 870 871
	return pte;
}

872

873
/* return address's pte at specific level */
874 875
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
					 unsigned long pfn,
876
					 int level, int *large_page)
877 878 879 880 881 882 883
{
	struct dma_pte *parent, *pte = NULL;
	int total = agaw_to_level(domain->agaw);
	int offset;

	parent = domain->pgd;
	while (level <= total) {
884
		offset = pfn_level_offset(pfn, total);
885 886 887 888
		pte = &parent[offset];
		if (level == total)
			return pte;

889 890
		if (!dma_pte_present(pte)) {
			*large_page = total;
891
			break;
892 893 894 895 896 897 898
		}

		if (pte->val & DMA_PTE_LARGE_PAGE) {
			*large_page = total;
			return pte;
		}

899
		parent = phys_to_virt(dma_pte_addr(pte));
900 901 902 903 904 905
		total--;
	}
	return NULL;
}

/* clear last level pte, a tlb flush should be followed */
906
static void dma_pte_clear_range(struct dmar_domain *domain,
907 908
				unsigned long start_pfn,
				unsigned long last_pfn)
909
{
910
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
911
	unsigned int large_page = 1;
912
	struct dma_pte *first_pte, *pte;
913

914
	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
915
	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
916
	BUG_ON(start_pfn > last_pfn);
917

918
	/* we don't need lock here; nobody else touches the iova range */
919
	do {
920 921
		large_page = 1;
		first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
922
		if (!pte) {
923
			start_pfn = align_to_level(start_pfn + 1, large_page + 1);
924 925
			continue;
		}
926
		do {
927
			dma_clear_pte(pte);
928
			start_pfn += lvl_to_nr_pages(large_page);
929
			pte++;
930 931
		} while (start_pfn <= last_pfn && !first_pte_in_page(pte));

932 933
		domain_flush_cache(domain, first_pte,
				   (void *)pte - (void *)first_pte);
934 935

	} while (start_pfn && start_pfn <= last_pfn);
936 937
}

938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
static void dma_pte_free_level(struct dmar_domain *domain, int level,
			       struct dma_pte *pte, unsigned long pfn,
			       unsigned long start_pfn, unsigned long last_pfn)
{
	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;
		struct dma_pte *level_pte;

		if (!dma_pte_present(pte) || dma_pte_superpage(pte))
			goto next;

		level_pfn = pfn & level_mask(level - 1);
		level_pte = phys_to_virt(dma_pte_addr(pte));

		if (level > 2)
			dma_pte_free_level(domain, level - 1, level_pte,
					   level_pfn, start_pfn, last_pfn);

		/* If range covers entire pagetable, free it */
		if (!(start_pfn > level_pfn ||
961
		      last_pfn < level_pfn + level_size(level) - 1)) {
962 963 964 965 966 967 968 969 970
			dma_clear_pte(pte);
			domain_flush_cache(domain, pte, sizeof(*pte));
			free_pgtable_page(level_pte);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}

971 972
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
973 974
				   unsigned long start_pfn,
				   unsigned long last_pfn)
975
{
976
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
977

978 979
	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
980
	BUG_ON(start_pfn > last_pfn);
981

982
	/* We don't need lock here; nobody else touches the iova range */
983 984
	dma_pte_free_level(domain, agaw_to_level(domain->agaw),
			   domain->pgd, 0, start_pfn, last_pfn);
985

986
	/* free pgd */
987
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
988 989 990 991 992
		free_pgtable_page(domain->pgd);
		domain->pgd = NULL;
	}
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
/* When a page at a given level is being unlinked from its parent, we don't
   need to *modify* it at all. All we need to do is make a list of all the
   pages which can be freed just as soon as we've flushed the IOTLB and we
   know the hardware page-walk will no longer touch them.
   The 'pte' argument is the *parent* PTE, pointing to the page that is to
   be freed. */
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
					    int level, struct dma_pte *pte,
					    struct page *freelist)
{
	struct page *pg;

	pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
	pg->freelist = freelist;
	freelist = pg;

	if (level == 1)
		return freelist;

1012 1013
	pte = page_address(pg);
	do {
1014 1015 1016
		if (dma_pte_present(pte) && !dma_pte_superpage(pte))
			freelist = dma_pte_list_pagetables(domain, level - 1,
							   pte, freelist);
1017 1018
		pte++;
	} while (!first_pte_in_page(pte));
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

	return freelist;
}

static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
					struct dma_pte *pte, unsigned long pfn,
					unsigned long start_pfn,
					unsigned long last_pfn,
					struct page *freelist)
{
	struct dma_pte *first_pte = NULL, *last_pte = NULL;

	pfn = max(start_pfn, pfn);
	pte = &pte[pfn_level_offset(pfn, level)];

	do {
		unsigned long level_pfn;

		if (!dma_pte_present(pte))
			goto next;

		level_pfn = pfn & level_mask(level);

		/* If range covers entire pagetable, free it */
		if (start_pfn <= level_pfn &&
		    last_pfn >= level_pfn + level_size(level) - 1) {
			/* These suborbinate page tables are going away entirely. Don't
			   bother to clear them; we're just going to *free* them. */
			if (level > 1 && !dma_pte_superpage(pte))
				freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);

			dma_clear_pte(pte);
			if (!first_pte)
				first_pte = pte;
			last_pte = pte;
		} else if (level > 1) {
			/* Recurse down into a level that isn't *entirely* obsolete */
			freelist = dma_pte_clear_level(domain, level - 1,
						       phys_to_virt(dma_pte_addr(pte)),
						       level_pfn, start_pfn, last_pfn,
						       freelist);
		}
next:
		pfn += level_size(level);
	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);

	if (first_pte)
		domain_flush_cache(domain, first_pte,
				   (void *)++last_pte - (void *)first_pte);

	return freelist;
}

/* We can't just free the pages because the IOMMU may still be walking
   the page tables, and may have cached the intermediate levels. The
   pages can only be freed after the IOTLB flush has been done. */
struct page *domain_unmap(struct dmar_domain *domain,
			  unsigned long start_pfn,
			  unsigned long last_pfn)
{
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
	struct page *freelist = NULL;

	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
	BUG_ON(start_pfn > last_pfn);

	/* we don't need lock here; nobody else touches the iova range */
	freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
				       domain->pgd, 0, start_pfn, last_pfn, NULL);

	/* free pgd */
	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
		struct page *pgd_page = virt_to_page(domain->pgd);
		pgd_page->freelist = freelist;
		freelist = pgd_page;

		domain->pgd = NULL;
	}

	return freelist;
}

void dma_free_pagelist(struct page *freelist)
{
	struct page *pg;

	while ((pg = freelist)) {
		freelist = pg->freelist;
		free_pgtable_page(page_address(pg));
	}
}

1112 1113 1114 1115 1116 1117
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
	struct root_entry *root;
	unsigned long flags;

1118
	root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1119 1120 1121
	if (!root)
		return -ENOMEM;

F
Fenghua Yu 已提交
1122
	__iommu_flush_cache(iommu, root, ROOT_SIZE);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

	spin_lock_irqsave(&iommu->lock, flags);
	iommu->root_entry = root;
	spin_unlock_irqrestore(&iommu->lock, flags);

	return 0;
}

static void iommu_set_root_entry(struct intel_iommu *iommu)
{
	void *addr;
1134
	u32 sts;
1135 1136 1137 1138
	unsigned long flag;

	addr = iommu->root_entry;

1139
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1140 1141
	dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));

1142
	writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1143 1144 1145

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1146
		      readl, (sts & DMA_GSTS_RTPS), sts);
1147

1148
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1149 1150 1151 1152 1153 1154 1155
}

static void iommu_flush_write_buffer(struct intel_iommu *iommu)
{
	u32 val;
	unsigned long flag;

1156
	if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1157 1158
		return;

1159
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1160
	writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1161 1162 1163

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1164
		      readl, (!(val & DMA_GSTS_WBFS)), val);
1165

1166
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1167 1168 1169
}

/* return value determine if we need a write buffer flush */
1170 1171 1172
static void __iommu_flush_context(struct intel_iommu *iommu,
				  u16 did, u16 source_id, u8 function_mask,
				  u64 type)
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
{
	u64 val = 0;
	unsigned long flag;

	switch (type) {
	case DMA_CCMD_GLOBAL_INVL:
		val = DMA_CCMD_GLOBAL_INVL;
		break;
	case DMA_CCMD_DOMAIN_INVL:
		val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
		break;
	case DMA_CCMD_DEVICE_INVL:
		val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
			| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
		break;
	default:
		BUG();
	}
	val |= DMA_CCMD_ICC;

1193
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1194 1195 1196 1197 1198 1199
	dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
		dmar_readq, (!(val & DMA_CCMD_ICC)), val);

1200
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1201 1202 1203
}

/* return value determine if we need a write buffer flush */
1204 1205
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
				u64 addr, unsigned int size_order, u64 type)
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
{
	int tlb_offset = ecap_iotlb_offset(iommu->ecap);
	u64 val = 0, val_iva = 0;
	unsigned long flag;

	switch (type) {
	case DMA_TLB_GLOBAL_FLUSH:
		/* global flush doesn't need set IVA_REG */
		val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
		break;
	case DMA_TLB_DSI_FLUSH:
		val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
		break;
	case DMA_TLB_PSI_FLUSH:
		val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1221
		/* IH bit is passed in as part of address */
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
		val_iva = size_order | addr;
		break;
	default:
		BUG();
	}
	/* Note: set drain read/write */
#if 0
	/*
	 * This is probably to be super secure.. Looks like we can
	 * ignore it without any impact.
	 */
	if (cap_read_drain(iommu->cap))
		val |= DMA_TLB_READ_DRAIN;
#endif
	if (cap_write_drain(iommu->cap))
		val |= DMA_TLB_WRITE_DRAIN;

1239
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1240 1241 1242 1243 1244 1245 1246 1247 1248
	/* Note: Only uses first TLB reg currently */
	if (val_iva)
		dmar_writeq(iommu->reg + tlb_offset, val_iva);
	dmar_writeq(iommu->reg + tlb_offset + 8, val);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, tlb_offset + 8,
		dmar_readq, (!(val & DMA_TLB_IVT)), val);

1249
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1250 1251 1252 1253 1254 1255

	/* check IOTLB invalidation granularity */
	if (DMA_TLB_IAIG(val) == 0)
		printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
	if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
		pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
F
Fenghua Yu 已提交
1256 1257
			(unsigned long long)DMA_TLB_IIRG(type),
			(unsigned long long)DMA_TLB_IAIG(val));
1258 1259
}

1260 1261 1262
static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
			 u8 bus, u8 devfn)
Y
Yu Zhao 已提交
1263 1264 1265 1266
{
	int found = 0;
	unsigned long flags;
	struct device_domain_info *info;
1267
	struct pci_dev *pdev;
Y
Yu Zhao 已提交
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282

	if (!ecap_dev_iotlb_support(iommu->ecap))
		return NULL;

	if (!iommu->qi)
		return NULL;

	spin_lock_irqsave(&device_domain_lock, flags);
	list_for_each_entry(info, &domain->devices, link)
		if (info->bus == bus && info->devfn == devfn) {
			found = 1;
			break;
		}
	spin_unlock_irqrestore(&device_domain_lock, flags);

1283
	if (!found || !info->dev || !dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1284 1285
		return NULL;

1286 1287 1288
	pdev = to_pci_dev(info->dev);

	if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Y
Yu Zhao 已提交
1289 1290
		return NULL;

1291
	if (!dmar_find_matched_atsr_unit(pdev))
Y
Yu Zhao 已提交
1292 1293 1294 1295 1296 1297
		return NULL;

	return info;
}

static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1298
{
1299
	if (!info || !dev_is_pci(info->dev))
Y
Yu Zhao 已提交
1300 1301
		return;

1302
	pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Y
Yu Zhao 已提交
1303 1304 1305 1306
}

static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
1307 1308
	if (!info->dev || !dev_is_pci(info->dev) ||
	    !pci_ats_enabled(to_pci_dev(info->dev)))
Y
Yu Zhao 已提交
1309 1310
		return;

1311
	pci_disable_ats(to_pci_dev(info->dev));
Y
Yu Zhao 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
}

static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
				  u64 addr, unsigned mask)
{
	u16 sid, qdep;
	unsigned long flags;
	struct device_domain_info *info;

	spin_lock_irqsave(&device_domain_lock, flags);
	list_for_each_entry(info, &domain->devices, link) {
1323 1324 1325 1326 1327 1328
		struct pci_dev *pdev;
		if (!info->dev || !dev_is_pci(info->dev))
			continue;

		pdev = to_pci_dev(info->dev);
		if (!pci_ats_enabled(pdev))
Y
Yu Zhao 已提交
1329 1330 1331
			continue;

		sid = info->bus << 8 | info->devfn;
1332
		qdep = pci_ats_queue_depth(pdev);
Y
Yu Zhao 已提交
1333 1334 1335 1336 1337
		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
	}
	spin_unlock_irqrestore(&device_domain_lock, flags);
}

1338
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1339
				  unsigned long pfn, unsigned int pages, int ih, int map)
1340
{
1341
	unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1342
	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1343 1344 1345

	BUG_ON(pages == 0);

1346 1347
	if (ih)
		ih = 1 << 6;
1348
	/*
1349 1350
	 * Fallback to domain selective flush if no PSI support or the size is
	 * too big.
1351 1352 1353
	 * PSI requires page size to be 2 ^ x, and the base address is naturally
	 * aligned to the size
	 */
1354 1355
	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
		iommu->flush.flush_iotlb(iommu, did, 0, 0,
1356
						DMA_TLB_DSI_FLUSH);
1357
	else
1358
		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1359
						DMA_TLB_PSI_FLUSH);
1360 1361

	/*
1362 1363
	 * In caching mode, changes of pages from non-present to present require
	 * flush. However, device IOTLB doesn't need to be flushed in this case.
1364
	 */
1365
	if (!cap_caching_mode(iommu->cap) || !map)
Y
Yu Zhao 已提交
1366
		iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1367 1368
}

M
mark gross 已提交
1369 1370 1371 1372 1373
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
	u32 pmen;
	unsigned long flags;

1374
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
M
mark gross 已提交
1375 1376 1377 1378 1379 1380 1381 1382
	pmen = readl(iommu->reg + DMAR_PMEN_REG);
	pmen &= ~DMA_PMEN_EPM;
	writel(pmen, iommu->reg + DMAR_PMEN_REG);

	/* wait for the protected region status bit to clear */
	IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
		readl, !(pmen & DMA_PMEN_PRS), pmen);

1383
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
M
mark gross 已提交
1384 1385
}

1386 1387 1388 1389 1390
static int iommu_enable_translation(struct intel_iommu *iommu)
{
	u32 sts;
	unsigned long flags;

1391
	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1392 1393
	iommu->gcmd |= DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1394 1395 1396

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1397
		      readl, (sts & DMA_GSTS_TES), sts);
1398

1399
	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1400 1401 1402 1403 1404 1405 1406 1407
	return 0;
}

static int iommu_disable_translation(struct intel_iommu *iommu)
{
	u32 sts;
	unsigned long flag;

1408
	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1409 1410 1411 1412 1413
	iommu->gcmd &= ~DMA_GCMD_TE;
	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);

	/* Make sure hardware complete it */
	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1414
		      readl, (!(sts & DMA_GSTS_TES)), sts);
1415

1416
	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1417 1418 1419
	return 0;
}

1420

1421 1422 1423 1424 1425 1426
static int iommu_init_domains(struct intel_iommu *iommu)
{
	unsigned long ndomains;
	unsigned long nlongs;

	ndomains = cap_ndoms(iommu->cap);
1427 1428
	pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
		 iommu->seq_id, ndomains);
1429 1430
	nlongs = BITS_TO_LONGS(ndomains);

1431 1432
	spin_lock_init(&iommu->lock);

1433 1434 1435 1436 1437
	/* TBD: there might be 64K domains,
	 * consider other allocation for future chip
	 */
	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
	if (!iommu->domain_ids) {
1438 1439
		pr_err("IOMMU%d: allocating domain id array failed\n",
		       iommu->seq_id);
1440 1441 1442 1443 1444
		return -ENOMEM;
	}
	iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
			GFP_KERNEL);
	if (!iommu->domains) {
1445 1446 1447 1448
		pr_err("IOMMU%d: allocating domain array failed\n",
		       iommu->seq_id);
		kfree(iommu->domain_ids);
		iommu->domain_ids = NULL;
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
		return -ENOMEM;
	}

	/*
	 * if Caching mode is set, then invalid translations are tagged
	 * with domainid 0. Hence we need to pre-allocate it.
	 */
	if (cap_caching_mode(iommu->cap))
		set_bit(0, iommu->domain_ids);
	return 0;
}

1461
static void free_dmar_iommu(struct intel_iommu *iommu)
1462 1463
{
	struct dmar_domain *domain;
1464
	int i, count;
1465
	unsigned long flags;
1466

1467
	if ((iommu->domains) && (iommu->domain_ids)) {
1468
		for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1469 1470 1471 1472 1473 1474 1475
			/*
			 * Domain id 0 is reserved for invalid translation
			 * if hardware supports caching mode.
			 */
			if (cap_caching_mode(iommu->cap) && i == 0)
				continue;

1476 1477 1478 1479
			domain = iommu->domains[i];
			clear_bit(i, iommu->domain_ids);

			spin_lock_irqsave(&domain->iommu_lock, flags);
1480 1481
			count = --domain->iommu_count;
			spin_unlock_irqrestore(&domain->iommu_lock, flags);
1482 1483
			if (count == 0)
				domain_exit(domain);
1484
		}
1485 1486 1487 1488 1489 1490 1491
	}

	if (iommu->gcmd & DMA_GCMD_TE)
		iommu_disable_translation(iommu);

	kfree(iommu->domains);
	kfree(iommu->domain_ids);
1492 1493
	iommu->domains = NULL;
	iommu->domain_ids = NULL;
1494

W
Weidong Han 已提交
1495 1496
	g_iommus[iommu->seq_id] = NULL;

1497 1498 1499 1500
	/* free context mapping */
	free_context_table(iommu);
}

1501
static struct dmar_domain *alloc_domain(bool vm)
1502
{
1503 1504
	/* domain id for virtual machine, it won't be set in context */
	static atomic_t vm_domid = ATOMIC_INIT(0);
1505 1506 1507 1508 1509 1510
	struct dmar_domain *domain;

	domain = alloc_domain_mem();
	if (!domain)
		return NULL;

1511
	domain->nid = -1;
1512
	domain->iommu_count = 0;
1513
	memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1514
	domain->flags = 0;
1515 1516 1517 1518 1519 1520
	spin_lock_init(&domain->iommu_lock);
	INIT_LIST_HEAD(&domain->devices);
	if (vm) {
		domain->id = atomic_inc_return(&vm_domid);
		domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
	}
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531

	return domain;
}

static int iommu_attach_domain(struct dmar_domain *domain,
			       struct intel_iommu *iommu)
{
	int num;
	unsigned long ndomains;
	unsigned long flags;

1532 1533 1534
	ndomains = cap_ndoms(iommu->cap);

	spin_lock_irqsave(&iommu->lock, flags);
1535

1536 1537 1538 1539
	num = find_first_zero_bit(iommu->domain_ids, ndomains);
	if (num >= ndomains) {
		spin_unlock_irqrestore(&iommu->lock, flags);
		printk(KERN_ERR "IOMMU: no free domain ids\n");
1540
		return -ENOMEM;
1541 1542 1543
	}

	domain->id = num;
1544
	domain->iommu_count++;
1545
	set_bit(num, iommu->domain_ids);
1546
	set_bit(iommu->seq_id, domain->iommu_bmp);
1547 1548 1549
	iommu->domains[num] = domain;
	spin_unlock_irqrestore(&iommu->lock, flags);

1550
	return 0;
1551 1552
}

1553 1554
static void iommu_detach_domain(struct dmar_domain *domain,
				struct intel_iommu *iommu)
1555 1556
{
	unsigned long flags;
1557
	int num, ndomains;
1558

1559
	spin_lock_irqsave(&iommu->lock, flags);
1560
	ndomains = cap_ndoms(iommu->cap);
1561
	for_each_set_bit(num, iommu->domain_ids, ndomains) {
1562
		if (iommu->domains[num] == domain) {
1563 1564
			clear_bit(num, iommu->domain_ids);
			iommu->domains[num] = NULL;
1565 1566 1567
			break;
		}
	}
1568
	spin_unlock_irqrestore(&iommu->lock, flags);
1569 1570 1571
}

static struct iova_domain reserved_iova_list;
M
Mark Gross 已提交
1572
static struct lock_class_key reserved_rbtree_key;
1573

1574
static int dmar_init_reserved_ranges(void)
1575 1576 1577 1578 1579
{
	struct pci_dev *pdev = NULL;
	struct iova *iova;
	int i;

D
David Miller 已提交
1580
	init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1581

M
Mark Gross 已提交
1582 1583 1584
	lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
		&reserved_rbtree_key);

1585 1586 1587
	/* IOAPIC ranges shouldn't be accessed by DMA */
	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
		IOVA_PFN(IOAPIC_RANGE_END));
1588
	if (!iova) {
1589
		printk(KERN_ERR "Reserve IOAPIC range failed\n");
1590 1591
		return -ENODEV;
	}
1592 1593 1594 1595 1596 1597 1598 1599 1600

	/* Reserve all PCI MMIO to avoid peer-to-peer access */
	for_each_pci_dev(pdev) {
		struct resource *r;

		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
			r = &pdev->resource[i];
			if (!r->flags || !(r->flags & IORESOURCE_MEM))
				continue;
1601 1602 1603
			iova = reserve_iova(&reserved_iova_list,
					    IOVA_PFN(r->start),
					    IOVA_PFN(r->end));
1604
			if (!iova) {
1605
				printk(KERN_ERR "Reserve iova failed\n");
1606 1607
				return -ENODEV;
			}
1608 1609
		}
	}
1610
	return 0;
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
}

static void domain_reserve_special_ranges(struct dmar_domain *domain)
{
	copy_reserved_iova(&reserved_iova_list, &domain->iovad);
}

static inline int guestwidth_to_adjustwidth(int gaw)
{
	int agaw;
	int r = (gaw - 12) % 9;

	if (r == 0)
		agaw = gaw;
	else
		agaw = gaw + 9 - r;
	if (agaw > 64)
		agaw = 64;
	return agaw;
}

static int domain_init(struct dmar_domain *domain, int guest_width)
{
	struct intel_iommu *iommu;
	int adjust_width, agaw;
	unsigned long sagaw;

D
David Miller 已提交
1638
	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1639 1640 1641
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
1642
	iommu = domain_get_iommu(domain);
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
	if (guest_width > cap_mgaw(iommu->cap))
		guest_width = cap_mgaw(iommu->cap);
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	agaw = width_to_agaw(adjust_width);
	sagaw = cap_sagaw(iommu->cap);
	if (!test_bit(agaw, &sagaw)) {
		/* hardware doesn't support it, choose a bigger one */
		pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
		agaw = find_next_bit(&sagaw, 5, agaw);
		if (agaw >= 5)
			return -ENODEV;
	}
	domain->agaw = agaw;

W
Weidong Han 已提交
1658 1659 1660 1661 1662
	if (ecap_coherent(iommu->ecap))
		domain->iommu_coherency = 1;
	else
		domain->iommu_coherency = 0;

1663 1664 1665 1666 1667
	if (ecap_sc_support(iommu->ecap))
		domain->iommu_snooping = 1;
	else
		domain->iommu_snooping = 0;

1668 1669 1670 1671 1672
	if (intel_iommu_superpage)
		domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
	else
		domain->iommu_superpage = 0;

1673
	domain->nid = iommu->node;
1674

1675
	/* always allocate the top pgd */
1676
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1677 1678
	if (!domain->pgd)
		return -ENOMEM;
F
Fenghua Yu 已提交
1679
	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1680 1681 1682 1683 1684
	return 0;
}

static void domain_exit(struct dmar_domain *domain)
{
1685 1686
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
1687
	struct page *freelist = NULL;
1688 1689 1690 1691 1692

	/* Domain 0 is reserved, so dont process it */
	if (!domain)
		return;

1693 1694 1695 1696
	/* Flush any lazy unmaps that may reference this domain */
	if (!intel_iommu_strict)
		flush_unmaps_timeout(0);

1697
	/* remove associated devices */
1698
	domain_remove_dev_info(domain);
1699

1700 1701 1702
	/* destroy iovas */
	put_iova_domain(&domain->iovad);

1703
	freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1704

1705
	/* clear attached or cached domains */
1706
	rcu_read_lock();
1707
	for_each_active_iommu(iommu, drhd)
1708 1709
		if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
		    test_bit(iommu->seq_id, domain->iommu_bmp))
1710
			iommu_detach_domain(domain, iommu);
1711
	rcu_read_unlock();
1712

1713 1714
	dma_free_pagelist(freelist);

1715 1716 1717
	free_domain_mem(domain);
}

1718 1719 1720
static int domain_context_mapping_one(struct dmar_domain *domain,
				      struct intel_iommu *iommu,
				      u8 bus, u8 devfn, int translation)
1721 1722 1723
{
	struct context_entry *context;
	unsigned long flags;
1724 1725 1726 1727 1728
	struct dma_pte *pgd;
	unsigned long num;
	unsigned long ndomains;
	int id;
	int agaw;
Y
Yu Zhao 已提交
1729
	struct device_domain_info *info = NULL;
1730 1731 1732

	pr_debug("Set context mapping for %02x:%02x.%d\n",
		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
F
Fenghua Yu 已提交
1733

1734
	BUG_ON(!domain->pgd);
F
Fenghua Yu 已提交
1735 1736
	BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
	       translation != CONTEXT_TT_MULTI_LEVEL);
W
Weidong Han 已提交
1737

1738 1739 1740 1741
	context = device_to_context_entry(iommu, bus, devfn);
	if (!context)
		return -ENOMEM;
	spin_lock_irqsave(&iommu->lock, flags);
1742
	if (context_present(context)) {
1743 1744 1745 1746
		spin_unlock_irqrestore(&iommu->lock, flags);
		return 0;
	}

1747 1748 1749
	id = domain->id;
	pgd = domain->pgd;

1750 1751
	if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
	    domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1752 1753 1754 1755
		int found = 0;

		/* find an available domain id for this device in iommu */
		ndomains = cap_ndoms(iommu->cap);
1756
		for_each_set_bit(num, iommu->domain_ids, ndomains) {
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
			if (iommu->domains[num] == domain) {
				id = num;
				found = 1;
				break;
			}
		}

		if (found == 0) {
			num = find_first_zero_bit(iommu->domain_ids, ndomains);
			if (num >= ndomains) {
				spin_unlock_irqrestore(&iommu->lock, flags);
				printk(KERN_ERR "IOMMU: no free domain ids\n");
				return -EFAULT;
			}

			set_bit(num, iommu->domain_ids);
			iommu->domains[num] = domain;
			id = num;
		}

		/* Skip top levels of page tables for
		 * iommu which has less agaw than default.
1779
		 * Unnecessary for PT mode.
1780
		 */
1781 1782 1783 1784 1785 1786 1787
		if (translation != CONTEXT_TT_PASS_THROUGH) {
			for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
				pgd = phys_to_virt(dma_pte_addr(pgd));
				if (!dma_pte_present(pgd)) {
					spin_unlock_irqrestore(&iommu->lock, flags);
					return -ENOMEM;
				}
1788 1789 1790 1791 1792
			}
		}
	}

	context_set_domain_id(context, id);
F
Fenghua Yu 已提交
1793

Y
Yu Zhao 已提交
1794
	if (translation != CONTEXT_TT_PASS_THROUGH) {
1795
		info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Y
Yu Zhao 已提交
1796 1797 1798
		translation = info ? CONTEXT_TT_DEV_IOTLB :
				     CONTEXT_TT_MULTI_LEVEL;
	}
F
Fenghua Yu 已提交
1799 1800 1801 1802
	/*
	 * In pass through mode, AW must be programmed to indicate the largest
	 * AGAW value supported by hardware. And ASR is ignored by hardware.
	 */
Y
Yu Zhao 已提交
1803
	if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
F
Fenghua Yu 已提交
1804
		context_set_address_width(context, iommu->msagaw);
Y
Yu Zhao 已提交
1805 1806 1807 1808
	else {
		context_set_address_root(context, virt_to_phys(pgd));
		context_set_address_width(context, iommu->agaw);
	}
F
Fenghua Yu 已提交
1809 1810

	context_set_translation_type(context, translation);
1811 1812
	context_set_fault_enable(context);
	context_set_present(context);
W
Weidong Han 已提交
1813
	domain_flush_cache(domain, context, sizeof(*context));
1814

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
	/*
	 * It's a non-present to present mapping. If hardware doesn't cache
	 * non-present entry we only need to flush the write-buffer. If the
	 * _does_ cache non-present entries, then it does so in the special
	 * domain #0, which we have to flush:
	 */
	if (cap_caching_mode(iommu->cap)) {
		iommu->flush.flush_context(iommu, 0,
					   (((u16)bus) << 8) | devfn,
					   DMA_CCMD_MASK_NOBIT,
					   DMA_CCMD_DEVICE_INVL);
1826
		iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1827
	} else {
1828
		iommu_flush_write_buffer(iommu);
1829
	}
Y
Yu Zhao 已提交
1830
	iommu_enable_dev_iotlb(info);
1831
	spin_unlock_irqrestore(&iommu->lock, flags);
1832 1833

	spin_lock_irqsave(&domain->iommu_lock, flags);
1834
	if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1835
		domain->iommu_count++;
1836 1837
		if (domain->iommu_count == 1)
			domain->nid = iommu->node;
1838
		domain_update_iommu_cap(domain);
1839 1840
	}
	spin_unlock_irqrestore(&domain->iommu_lock, flags);
1841 1842 1843
	return 0;
}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
struct domain_context_mapping_data {
	struct dmar_domain *domain;
	struct intel_iommu *iommu;
	int translation;
};

static int domain_context_mapping_cb(struct pci_dev *pdev,
				     u16 alias, void *opaque)
{
	struct domain_context_mapping_data *data = opaque;

	return domain_context_mapping_one(data->domain, data->iommu,
					  PCI_BUS_NUM(alias), alias & 0xff,
					  data->translation);
}

1860
static int
1861 1862
domain_context_mapping(struct dmar_domain *domain, struct device *dev,
		       int translation)
1863
{
1864
	struct intel_iommu *iommu;
1865
	u8 bus, devfn;
1866
	struct domain_context_mapping_data data;
1867

1868
	iommu = device_to_iommu(dev, &bus, &devfn);
1869 1870
	if (!iommu)
		return -ENODEV;
1871

1872 1873
	if (!dev_is_pci(dev))
		return domain_context_mapping_one(domain, iommu, bus, devfn,
F
Fenghua Yu 已提交
1874
						  translation);
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889

	data.domain = domain;
	data.iommu = iommu;
	data.translation = translation;

	return pci_for_each_dma_alias(to_pci_dev(dev),
				      &domain_context_mapping_cb, &data);
}

static int domain_context_mapped_cb(struct pci_dev *pdev,
				    u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1890 1891
}

1892
static int domain_context_mapped(struct device *dev)
1893
{
W
Weidong Han 已提交
1894
	struct intel_iommu *iommu;
1895
	u8 bus, devfn;
W
Weidong Han 已提交
1896

1897
	iommu = device_to_iommu(dev, &bus, &devfn);
W
Weidong Han 已提交
1898 1899
	if (!iommu)
		return -ENODEV;
1900

1901 1902
	if (!dev_is_pci(dev))
		return device_context_mapped(iommu, bus, devfn);
1903

1904 1905
	return !pci_for_each_dma_alias(to_pci_dev(dev),
				       domain_context_mapped_cb, iommu);
1906 1907
}

1908 1909 1910 1911 1912 1913 1914 1915
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
					    size_t size)
{
	host_addr &= ~PAGE_MASK;
	return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain,
					  unsigned long iov_pfn,
					  unsigned long phy_pfn,
					  unsigned long pages)
{
	int support, level = 1;
	unsigned long pfnmerge;

	support = domain->iommu_superpage;

	/* To use a large page, the virtual *and* physical addresses
	   must be aligned to 2MiB/1GiB/etc. Lower bits set in either
	   of them will mean we have to use smaller pages. So just
	   merge them and check both at once. */
	pfnmerge = iov_pfn | phy_pfn;

	while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
		pages >>= VTD_STRIDE_SHIFT;
		if (!pages)
			break;
		pfnmerge >>= VTD_STRIDE_SHIFT;
		level++;
		support--;
	}
	return level;
}

1944 1945 1946
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
			    struct scatterlist *sg, unsigned long phys_pfn,
			    unsigned long nr_pages, int prot)
1947 1948
{
	struct dma_pte *first_pte = NULL, *pte = NULL;
1949
	phys_addr_t uninitialized_var(pteval);
1950
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1951
	unsigned long sg_res;
1952 1953
	unsigned int largepage_lvl = 0;
	unsigned long lvl_pages = 0;
1954 1955 1956 1957 1958 1959 1960 1961

	BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);

	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
		return -EINVAL;

	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;

1962 1963 1964 1965 1966 1967 1968
	if (sg)
		sg_res = 0;
	else {
		sg_res = nr_pages + 1;
		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
	}

1969
	while (nr_pages > 0) {
1970 1971
		uint64_t tmp;

1972
		if (!sg_res) {
1973
			sg_res = aligned_nrpages(sg->offset, sg->length);
1974 1975 1976
			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
			sg->dma_length = sg->length;
			pteval = page_to_phys(sg_page(sg)) | prot;
1977
			phys_pfn = pteval >> VTD_PAGE_SHIFT;
1978
		}
1979

1980
		if (!pte) {
1981 1982
			largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);

1983
			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
1984 1985
			if (!pte)
				return -ENOMEM;
1986
			/* It is large page*/
1987
			if (largepage_lvl > 1) {
1988
				pteval |= DMA_PTE_LARGE_PAGE;
1989 1990 1991 1992 1993 1994 1995
				/* Ensure that old small page tables are removed to make room
				   for superpage, if they exist. */
				dma_pte_clear_range(domain, iov_pfn,
						    iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
				dma_pte_free_pagetable(domain, iov_pfn,
						       iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
			} else {
1996
				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1997
			}
1998

1999 2000 2001 2002
		}
		/* We don't need lock here, nobody else
		 * touches the iova range
		 */
2003
		tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2004
		if (tmp) {
2005
			static int dumps = 5;
2006 2007
			printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
			       iov_pfn, tmp, (unsigned long long)pteval);
2008 2009 2010 2011 2012 2013
			if (dumps) {
				dumps--;
				debug_dma_dump_mappings(NULL);
			}
			WARN_ON(1);
		}
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036

		lvl_pages = lvl_to_nr_pages(largepage_lvl);

		BUG_ON(nr_pages < lvl_pages);
		BUG_ON(sg_res < lvl_pages);

		nr_pages -= lvl_pages;
		iov_pfn += lvl_pages;
		phys_pfn += lvl_pages;
		pteval += lvl_pages * VTD_PAGE_SIZE;
		sg_res -= lvl_pages;

		/* If the next PTE would be the first in a new page, then we
		   need to flush the cache on the entries we've just written.
		   And then we'll need to recalculate 'pte', so clear it and
		   let it get set again in the if (!pte) block above.

		   If we're done (!nr_pages) we need to flush the cache too.

		   Also if we've been setting superpages, we may need to
		   recalculate 'pte' and switch back to smaller pages for the
		   end of the mapping, if the trailing size is not enough to
		   use another superpage (i.e. sg_res < lvl_pages). */
2037
		pte++;
2038 2039
		if (!nr_pages || first_pte_in_page(pte) ||
		    (largepage_lvl > 1 && sg_res < lvl_pages)) {
2040 2041 2042 2043
			domain_flush_cache(domain, first_pte,
					   (void *)pte - (void *)first_pte);
			pte = NULL;
		}
2044 2045

		if (!sg_res && nr_pages)
2046 2047 2048 2049 2050
			sg = sg_next(sg);
	}
	return 0;
}

2051 2052 2053
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				    struct scatterlist *sg, unsigned long nr_pages,
				    int prot)
2054
{
2055 2056
	return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
2057

2058 2059 2060 2061 2062
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				     unsigned long phys_pfn, unsigned long nr_pages,
				     int prot)
{
	return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2063 2064
}

2065
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2066
{
2067 2068
	if (!iommu)
		return;
2069 2070 2071

	clear_context_table(iommu, bus, devfn);
	iommu->flush.flush_context(iommu, 0, 0, 0,
2072
					   DMA_CCMD_GLOBAL_INVL);
2073
	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2074 2075
}

2076 2077 2078 2079 2080 2081
static inline void unlink_domain_info(struct device_domain_info *info)
{
	assert_spin_locked(&device_domain_lock);
	list_del(&info->link);
	list_del(&info->global);
	if (info->dev)
2082
		info->dev->archdata.iommu = NULL;
2083 2084
}

2085 2086 2087
static void domain_remove_dev_info(struct dmar_domain *domain)
{
	struct device_domain_info *info;
2088
	unsigned long flags, flags2;
2089 2090 2091 2092 2093

	spin_lock_irqsave(&device_domain_lock, flags);
	while (!list_empty(&domain->devices)) {
		info = list_entry(domain->devices.next,
			struct device_domain_info, link);
2094
		unlink_domain_info(info);
2095 2096
		spin_unlock_irqrestore(&device_domain_lock, flags);

Y
Yu Zhao 已提交
2097
		iommu_disable_dev_iotlb(info);
2098
		iommu_detach_dev(info->iommu, info->bus, info->devfn);
2099

2100
		if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
2101
			iommu_detach_dependent_devices(info->iommu, info->dev);
2102 2103 2104 2105
			/* clear this iommu in iommu_bmp, update iommu count
			 * and capabilities
			 */
			spin_lock_irqsave(&domain->iommu_lock, flags2);
2106
			if (test_and_clear_bit(info->iommu->seq_id,
2107 2108 2109 2110 2111 2112 2113 2114
					       domain->iommu_bmp)) {
				domain->iommu_count--;
				domain_update_iommu_cap(domain);
			}
			spin_unlock_irqrestore(&domain->iommu_lock, flags2);
		}

		free_devinfo_mem(info);
2115 2116 2117 2118 2119 2120 2121
		spin_lock_irqsave(&device_domain_lock, flags);
	}
	spin_unlock_irqrestore(&device_domain_lock, flags);
}

/*
 * find_domain
2122
 * Note: we use struct device->archdata.iommu stores the info
2123
 */
2124
static struct dmar_domain *find_domain(struct device *dev)
2125 2126 2127 2128
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
2129
	info = dev->archdata.iommu;
2130 2131 2132 2133 2134
	if (info)
		return info->domain;
	return NULL;
}

2135
static inline struct device_domain_info *
2136 2137 2138 2139 2140
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
	struct device_domain_info *info;

	list_for_each_entry(info, &device_domain_list, global)
2141
		if (info->iommu->segment == segment && info->bus == bus &&
2142
		    info->devfn == devfn)
2143
			return info;
2144 2145 2146 2147

	return NULL;
}

2148
static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2149
						int bus, int devfn,
2150 2151
						struct device *dev,
						struct dmar_domain *domain)
2152
{
2153
	struct dmar_domain *found = NULL;
2154 2155 2156 2157 2158
	struct device_domain_info *info;
	unsigned long flags;

	info = alloc_devinfo_mem();
	if (!info)
2159
		return NULL;
2160 2161 2162 2163 2164

	info->bus = bus;
	info->devfn = devfn;
	info->dev = dev;
	info->domain = domain;
2165
	info->iommu = iommu;
2166 2167 2168 2169 2170
	if (!dev)
		domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;

	spin_lock_irqsave(&device_domain_lock, flags);
	if (dev)
2171
		found = find_domain(dev);
2172 2173
	else {
		struct device_domain_info *info2;
2174
		info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2175 2176 2177
		if (info2)
			found = info2->domain;
	}
2178 2179 2180
	if (found) {
		spin_unlock_irqrestore(&device_domain_lock, flags);
		free_devinfo_mem(info);
2181 2182
		/* Caller must free the original domain */
		return found;
2183 2184
	}

2185 2186 2187 2188 2189 2190 2191
	list_add(&info->link, &domain->devices);
	list_add(&info->global, &device_domain_list);
	if (dev)
		dev->archdata.iommu = info;
	spin_unlock_irqrestore(&device_domain_lock, flags);

	return domain;
2192 2193
}

2194 2195 2196 2197 2198 2199
static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
{
	*(u16 *)opaque = alias;
	return 0;
}

2200
/* domain is initialized */
2201
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2202
{
2203 2204
	struct dmar_domain *domain, *tmp;
	struct intel_iommu *iommu;
2205
	struct device_domain_info *info;
2206
	u16 dma_alias;
2207
	unsigned long flags;
2208
	u8 bus, devfn;
2209

2210
	domain = find_domain(dev);
2211 2212 2213
	if (domain)
		return domain;

2214 2215 2216 2217
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return NULL;

2218 2219
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2220

2221 2222 2223 2224 2225 2226 2227 2228 2229
		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);

		spin_lock_irqsave(&device_domain_lock, flags);
		info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
						      PCI_BUS_NUM(dma_alias),
						      dma_alias & 0xff);
		if (info) {
			iommu = info->iommu;
			domain = info->domain;
2230
		}
2231
		spin_unlock_irqrestore(&device_domain_lock, flags);
2232

2233 2234 2235 2236
		/* DMA alias already has a domain, uses it */
		if (info)
			goto found_domain;
	}
2237

2238
	/* Allocate and initialize new domain for the device */
2239
	domain = alloc_domain(false);
2240
	if (!domain)
2241 2242
		return NULL;

2243
	if (iommu_attach_domain(domain, iommu)) {
2244
		free_domain_mem(domain);
2245 2246 2247 2248 2249 2250
		return NULL;
	}

	if (domain_init(domain, gaw)) {
		domain_exit(domain);
		return NULL;
2251
	}
2252

2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
	/* register PCI DMA alias device */
	if (dev_is_pci(dev)) {
		tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
					   dma_alias & 0xff, NULL, domain);

		if (!tmp || tmp != domain) {
			domain_exit(domain);
			domain = tmp;
		}

2263
		if (!domain)
2264
			return NULL;
2265 2266 2267
	}

found_domain:
2268 2269 2270 2271 2272 2273
	tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);

	if (!tmp || tmp != domain) {
		domain_exit(domain);
		domain = tmp;
	}
2274 2275

	return domain;
2276 2277
}

2278
static int iommu_identity_mapping;
2279 2280 2281
#define IDENTMAP_ALL		1
#define IDENTMAP_GFX		2
#define IDENTMAP_AZALIA		4
2282

2283 2284 2285
static int iommu_domain_identity_map(struct dmar_domain *domain,
				     unsigned long long start,
				     unsigned long long end)
2286
{
2287 2288 2289 2290 2291
	unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
	unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;

	if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
			  dma_to_mm_pfn(last_vpfn))) {
2292
		printk(KERN_ERR "IOMMU: reserve iova failed\n");
2293
		return -ENOMEM;
2294 2295
	}

2296 2297
	pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
		 start, end, domain->id);
2298 2299 2300 2301
	/*
	 * RMRR range might have overlap with physical memory range,
	 * clear it first
	 */
2302
	dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2303

2304 2305
	return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
				  last_vpfn - first_vpfn + 1,
2306
				  DMA_PTE_READ|DMA_PTE_WRITE);
2307 2308
}

2309
static int iommu_prepare_identity_map(struct device *dev,
2310 2311 2312 2313 2314 2315
				      unsigned long long start,
				      unsigned long long end)
{
	struct dmar_domain *domain;
	int ret;

2316
	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2317 2318 2319
	if (!domain)
		return -ENOMEM;

2320 2321 2322 2323 2324 2325
	/* For _hardware_ passthrough, don't bother. But for software
	   passthrough, we do it anyway -- it may indicate a memory
	   range which is reserved in E820, so which didn't get set
	   up to start with in si_domain */
	if (domain == si_domain && hw_pass_through) {
		printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2326
		       dev_name(dev), start, end);
2327 2328 2329 2330 2331
		return 0;
	}

	printk(KERN_INFO
	       "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2332
	       dev_name(dev), start, end);
2333
	
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
	if (end < start) {
		WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
			"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
			dmi_get_system_info(DMI_BIOS_VENDOR),
			dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		ret = -EIO;
		goto error;
	}

2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
	if (end >> agaw_to_width(domain->agaw)) {
		WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     agaw_to_width(domain->agaw),
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		ret = -EIO;
		goto error;
	}
2354

2355
	ret = iommu_domain_identity_map(domain, start, end);
2356 2357 2358 2359
	if (ret)
		goto error;

	/* context entry init */
2360
	ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2361 2362 2363 2364 2365 2366
	if (ret)
		goto error;

	return 0;

 error:
2367 2368 2369 2370 2371
	domain_exit(domain);
	return ret;
}

static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2372
					 struct device *dev)
2373
{
2374
	if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2375
		return 0;
2376 2377
	return iommu_prepare_identity_map(dev, rmrr->base_address,
					  rmrr->end_address);
2378 2379
}

2380
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2381 2382 2383 2384 2385 2386 2387 2388 2389
static inline void iommu_prepare_isa(void)
{
	struct pci_dev *pdev;
	int ret;

	pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
	if (!pdev)
		return;

2390
	printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2391
	ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2392 2393

	if (ret)
2394 2395
		printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
		       "floppy might not work\n");
2396 2397 2398 2399 2400 2401 2402

}
#else
static inline void iommu_prepare_isa(void)
{
	return;
}
2403
#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2404

2405
static int md_domain_init(struct dmar_domain *domain, int guest_width);
2406

2407
static int __init si_domain_init(int hw)
2408 2409 2410
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
2411
	int nid, ret = 0;
2412

2413
	si_domain = alloc_domain(false);
2414 2415 2416
	if (!si_domain)
		return -EFAULT;

2417 2418
	si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;

2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
	for_each_active_iommu(iommu, drhd) {
		ret = iommu_attach_domain(si_domain, iommu);
		if (ret) {
			domain_exit(si_domain);
			return -EFAULT;
		}
	}

	if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
		domain_exit(si_domain);
		return -EFAULT;
	}

2432 2433
	pr_debug("IOMMU: identity mapping domain is domain %d\n",
		 si_domain->id);
2434

2435 2436 2437
	if (hw)
		return 0;

2438
	for_each_online_node(nid) {
2439 2440 2441 2442 2443 2444 2445 2446 2447
		unsigned long start_pfn, end_pfn;
		int i;

		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
			ret = iommu_domain_identity_map(si_domain,
					PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
			if (ret)
				return ret;
		}
2448 2449
	}

2450 2451 2452
	return 0;
}

2453
static int identity_mapping(struct device *dev)
2454 2455 2456 2457 2458 2459
{
	struct device_domain_info *info;

	if (likely(!iommu_identity_mapping))
		return 0;

2460
	info = dev->archdata.iommu;
2461 2462
	if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
		return (info->domain == si_domain);
2463 2464 2465 2466 2467

	return 0;
}

static int domain_add_dev_info(struct dmar_domain *domain,
2468
			       struct device *dev, int translation)
2469
{
2470
	struct dmar_domain *ndomain;
2471
	struct intel_iommu *iommu;
2472
	u8 bus, devfn;
2473
	int ret;
2474

2475
	iommu = device_to_iommu(dev, &bus, &devfn);
2476 2477 2478
	if (!iommu)
		return -ENODEV;

2479
	ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2480 2481
	if (ndomain != domain)
		return -EBUSY;
2482

2483
	ret = domain_context_mapping(domain, dev, translation);
2484
	if (ret) {
2485
		domain_remove_one_dev_info(domain, dev);
2486 2487 2488
		return ret;
	}

2489 2490 2491
	return 0;
}

2492
static bool device_has_rmrr(struct device *dev)
2493 2494
{
	struct dmar_rmrr_unit *rmrr;
2495
	struct device *tmp;
2496 2497
	int i;

2498
	rcu_read_lock();
2499
	for_each_rmrr_units(rmrr) {
2500 2501 2502 2503 2504 2505
		/*
		 * Return TRUE if this RMRR contains the device that
		 * is passed in.
		 */
		for_each_active_dev_scope(rmrr->devices,
					  rmrr->devices_cnt, i, tmp)
2506
			if (tmp == dev) {
2507
				rcu_read_unlock();
2508
				return true;
2509
			}
2510
	}
2511
	rcu_read_unlock();
2512 2513 2514
	return false;
}

2515
static int iommu_should_identity_map(struct device *dev, int startup)
2516
{
2517

2518 2519
	if (dev_is_pci(dev)) {
		struct pci_dev *pdev = to_pci_dev(dev);
2520

2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
		/*
		 * We want to prevent any device associated with an RMRR from
		 * getting placed into the SI Domain. This is done because
		 * problems exist when devices are moved in and out of domains
		 * and their respective RMRR info is lost. We exempt USB devices
		 * from this process due to their usage of RMRRs that are known
		 * to not be needed after BIOS hand-off to OS.
		 */
		if (device_has_rmrr(dev) &&
		    (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
			return 0;
2532

2533 2534
		if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
			return 1;
2535

2536 2537
		if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
			return 1;
2538

2539
		if (!(iommu_identity_mapping & IDENTMAP_ALL))
2540
			return 0;
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564

		/*
		 * We want to start off with all devices in the 1:1 domain, and
		 * take them out later if we find they can't access all of memory.
		 *
		 * However, we can't do this for PCI devices behind bridges,
		 * because all PCI devices behind the same bridge will end up
		 * with the same source-id on their transactions.
		 *
		 * Practically speaking, we can't change things around for these
		 * devices at run-time, because we can't be sure there'll be no
		 * DMA transactions in flight for any of their siblings.
		 *
		 * So PCI devices (unless they're on the root bus) as well as
		 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
		 * the 1:1 domain, just in _case_ one of their siblings turns out
		 * not to be able to map all of memory.
		 */
		if (!pci_is_pcie(pdev)) {
			if (!pci_is_root_bus(pdev->bus))
				return 0;
			if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
				return 0;
		} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2565
			return 0;
2566 2567 2568 2569
	} else {
		if (device_has_rmrr(dev))
			return 0;
	}
2570

2571
	/*
2572
	 * At boot time, we don't yet know if devices will be 64-bit capable.
2573
	 * Assume that they will — if they turn out not to be, then we can
2574 2575
	 * take them out of the 1:1 domain later.
	 */
2576 2577 2578 2579 2580
	if (!startup) {
		/*
		 * If the device's dma_mask is less than the system's memory
		 * size then this is not a candidate for identity mapping.
		 */
2581
		u64 dma_mask = *dev->dma_mask;
2582

2583 2584 2585
		if (dev->coherent_dma_mask &&
		    dev->coherent_dma_mask < dma_mask)
			dma_mask = dev->coherent_dma_mask;
2586

2587
		return dma_mask >= dma_get_required_mask(dev);
2588
	}
2589 2590 2591 2592

	return 1;
}

2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
{
	int ret;

	if (!iommu_should_identity_map(dev, 1))
		return 0;

	ret = domain_add_dev_info(si_domain, dev,
				  hw ? CONTEXT_TT_PASS_THROUGH :
				       CONTEXT_TT_MULTI_LEVEL);
	if (!ret)
		pr_info("IOMMU: %s identity mapping for device %s\n",
			hw ? "hardware" : "software", dev_name(dev));
	else if (ret == -ENODEV)
		/* device not associated with an iommu */
		ret = 0;

	return ret;
}


2614
static int __init iommu_prepare_static_identity_mapping(int hw)
2615 2616
{
	struct pci_dev *pdev = NULL;
2617 2618 2619 2620 2621
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;
	struct device *dev;
	int i;
	int ret = 0;
2622

2623
	ret = si_domain_init(hw);
2624 2625 2626 2627
	if (ret)
		return -EFAULT;

	for_each_pci_dev(pdev) {
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
		ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
		if (ret)
			return ret;
	}

	for_each_active_iommu(iommu, drhd)
		for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
			struct acpi_device_physical_node *pn;
			struct acpi_device *adev;

			if (dev->bus != &acpi_bus_type)
				continue;
				
			adev= to_acpi_device(dev);
			mutex_lock(&adev->physical_node_lock);
			list_for_each_entry(pn, &adev->physical_node_list, node) {
				ret = dev_prepare_static_identity_mapping(pn->dev, hw);
				if (ret)
					break;
2647
			}
2648 2649 2650
			mutex_unlock(&adev->physical_node_lock);
			if (ret)
				return ret;
2651
		}
2652 2653 2654 2655

	return 0;
}

2656
static int __init init_dmars(void)
2657 2658 2659
{
	struct dmar_drhd_unit *drhd;
	struct dmar_rmrr_unit *rmrr;
2660
	struct device *dev;
2661
	struct intel_iommu *iommu;
2662
	int i, ret;
2663

2664 2665 2666 2667 2668 2669 2670
	/*
	 * for each drhd
	 *    allocate root
	 *    initialize and program root entry to not present
	 * endfor
	 */
	for_each_drhd_unit(drhd) {
M
mark gross 已提交
2671 2672 2673 2674 2675
		/*
		 * lock not needed as this is only incremented in the single
		 * threaded kernel __init code path all other access are read
		 * only
		 */
2676 2677 2678 2679 2680 2681
		if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
			g_num_of_iommus++;
			continue;
		}
		printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
			  IOMMU_UNITS_SUPPORTED);
M
mark gross 已提交
2682 2683
	}

W
Weidong Han 已提交
2684 2685 2686 2687 2688 2689 2690 2691
	g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
			GFP_KERNEL);
	if (!g_iommus) {
		printk(KERN_ERR "Allocating global iommu array failed\n");
		ret = -ENOMEM;
		goto error;
	}

2692 2693 2694
	deferred_flush = kzalloc(g_num_of_iommus *
		sizeof(struct deferred_flush_tables), GFP_KERNEL);
	if (!deferred_flush) {
M
mark gross 已提交
2695
		ret = -ENOMEM;
2696
		goto free_g_iommus;
M
mark gross 已提交
2697 2698
	}

2699
	for_each_active_iommu(iommu, drhd) {
W
Weidong Han 已提交
2700
		g_iommus[iommu->seq_id] = iommu;
2701

2702 2703
		ret = iommu_init_domains(iommu);
		if (ret)
2704
			goto free_iommu;
2705

2706 2707 2708
		/*
		 * TBD:
		 * we could share the same root & context tables
L
Lucas De Marchi 已提交
2709
		 * among all IOMMU's. Need to Split it later.
2710 2711 2712 2713
		 */
		ret = iommu_alloc_root_entry(iommu);
		if (ret) {
			printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2714
			goto free_iommu;
2715
		}
F
Fenghua Yu 已提交
2716
		if (!ecap_pass_through(iommu->ecap))
2717
			hw_pass_through = 0;
2718 2719
	}

2720 2721 2722
	/*
	 * Start from the sane iommu hardware state.
	 */
2723
	for_each_active_iommu(iommu, drhd) {
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
		/*
		 * If the queued invalidation is already initialized by us
		 * (for example, while enabling interrupt-remapping) then
		 * we got the things already rolling from a sane state.
		 */
		if (iommu->qi)
			continue;

		/*
		 * Clear any previous faults.
		 */
		dmar_fault(-1, iommu);
		/*
		 * Disable queued invalidation if supported and already enabled
		 * before OS handover.
		 */
		dmar_disable_qi(iommu);
	}

2743
	for_each_active_iommu(iommu, drhd) {
2744 2745 2746 2747 2748 2749 2750
		if (dmar_enable_qi(iommu)) {
			/*
			 * Queued Invalidate not enabled, use Register Based
			 * Invalidate
			 */
			iommu->flush.flush_context = __iommu_flush_context;
			iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Y
Yinghai Lu 已提交
2751
			printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2752
			       "invalidation\n",
Y
Yinghai Lu 已提交
2753
				iommu->seq_id,
2754
			       (unsigned long long)drhd->reg_base_addr);
2755 2756 2757
		} else {
			iommu->flush.flush_context = qi_flush_context;
			iommu->flush.flush_iotlb = qi_flush_iotlb;
Y
Yinghai Lu 已提交
2758
			printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2759
			       "invalidation\n",
Y
Yinghai Lu 已提交
2760
				iommu->seq_id,
2761
			       (unsigned long long)drhd->reg_base_addr);
2762 2763 2764
		}
	}

2765
	if (iommu_pass_through)
2766 2767
		iommu_identity_mapping |= IDENTMAP_ALL;

2768
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2769
	iommu_identity_mapping |= IDENTMAP_GFX;
2770
#endif
2771 2772 2773

	check_tylersburg_isoch();

2774
	/*
2775 2776 2777
	 * If pass through is not set or not enabled, setup context entries for
	 * identity mappings for rmrr, gfx, and isa and may fall back to static
	 * identity mapping if iommu_identity_mapping is set.
2778
	 */
2779 2780
	if (iommu_identity_mapping) {
		ret = iommu_prepare_static_identity_mapping(hw_pass_through);
F
Fenghua Yu 已提交
2781
		if (ret) {
2782
			printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2783
			goto free_iommu;
2784 2785 2786
		}
	}
	/*
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
	 * For each rmrr
	 *   for each dev attached to rmrr
	 *   do
	 *     locate drhd for dev, alloc domain for dev
	 *     allocate free domain
	 *     allocate page table entries for rmrr
	 *     if context not allocated for bus
	 *           allocate and init context
	 *           set present in root table for this bus
	 *     init context with domain, translation etc
	 *    endfor
	 * endfor
2799
	 */
2800 2801
	printk(KERN_INFO "IOMMU: Setting RMRR:\n");
	for_each_rmrr_units(rmrr) {
2802 2803
		/* some BIOS lists non-exist devices in DMAR table. */
		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2804
					  i, dev) {
2805
			ret = iommu_prepare_rmrr_dev(rmrr, dev);
2806 2807 2808
			if (ret)
				printk(KERN_ERR
				       "IOMMU: mapping reserved region failed\n");
2809
		}
F
Fenghua Yu 已提交
2810
	}
2811

2812 2813
	iommu_prepare_isa();

2814 2815 2816 2817 2818 2819 2820
	/*
	 * for each drhd
	 *   enable fault log
	 *   global invalidate context cache
	 *   global invalidate iotlb
	 *   enable translation
	 */
2821
	for_each_iommu(iommu, drhd) {
2822 2823 2824 2825 2826 2827
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
2828
				iommu_disable_protect_mem_regions(iommu);
2829
			continue;
2830
		}
2831 2832 2833

		iommu_flush_write_buffer(iommu);

2834 2835
		ret = dmar_set_interrupt(iommu);
		if (ret)
2836
			goto free_iommu;
2837

2838 2839
		iommu_set_root_entry(iommu);

2840
		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2841
		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
M
mark gross 已提交
2842

2843 2844
		ret = iommu_enable_translation(iommu);
		if (ret)
2845
			goto free_iommu;
2846 2847

		iommu_disable_protect_mem_regions(iommu);
2848 2849 2850
	}

	return 0;
2851 2852

free_iommu:
2853
	for_each_active_iommu(iommu, drhd)
2854
		free_dmar_iommu(iommu);
2855
	kfree(deferred_flush);
2856
free_g_iommus:
W
Weidong Han 已提交
2857
	kfree(g_iommus);
2858
error:
2859 2860 2861
	return ret;
}

2862
/* This takes a number of _MM_ pages, not VTD pages */
2863 2864 2865
static struct iova *intel_alloc_iova(struct device *dev,
				     struct dmar_domain *domain,
				     unsigned long nrpages, uint64_t dma_mask)
2866 2867 2868
{
	struct iova *iova = NULL;

2869 2870 2871 2872
	/* Restrict dma_mask to the width that the iommu can handle */
	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);

	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2873 2874
		/*
		 * First try to allocate an io virtual address in
2875
		 * DMA_BIT_MASK(32) and if that fails then try allocating
J
Joe Perches 已提交
2876
		 * from higher range
2877
		 */
2878 2879 2880 2881 2882 2883 2884 2885
		iova = alloc_iova(&domain->iovad, nrpages,
				  IOVA_PFN(DMA_BIT_MASK(32)), 1);
		if (iova)
			return iova;
	}
	iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
	if (unlikely(!iova)) {
		printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2886
		       nrpages, dev_name(dev));
2887 2888 2889 2890 2891 2892
		return NULL;
	}

	return iova;
}

2893
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2894 2895 2896 2897
{
	struct dmar_domain *domain;
	int ret;

2898
	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2899
	if (!domain) {
2900 2901
		printk(KERN_ERR "Allocating domain for %s failed",
		       dev_name(dev));
A
Al Viro 已提交
2902
		return NULL;
2903 2904 2905
	}

	/* make sure context mapping is ok */
2906 2907
	if (unlikely(!domain_context_mapped(dev))) {
		ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2908
		if (ret) {
2909 2910
			printk(KERN_ERR "Domain context map for %s failed",
			       dev_name(dev));
A
Al Viro 已提交
2911
			return NULL;
2912
		}
2913 2914
	}

2915 2916 2917
	return domain;
}

2918
static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2919 2920 2921 2922
{
	struct device_domain_info *info;

	/* No lock here, assumes no domain exit in normal case */
2923
	info = dev->archdata.iommu;
2924 2925 2926 2927 2928 2929
	if (likely(info))
		return info->domain;

	return __get_valid_domain_for_dev(dev);
}

2930
static int iommu_dummy(struct device *dev)
2931
{
2932
	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2933 2934
}

2935
/* Check if the dev needs to go through non-identity map and unmap process.*/
2936
static int iommu_no_mapping(struct device *dev)
2937 2938 2939
{
	int found;

2940
	if (iommu_dummy(dev))
2941 2942
		return 1;

2943
	if (!iommu_identity_mapping)
2944
		return 0;
2945

2946
	found = identity_mapping(dev);
2947
	if (found) {
2948
		if (iommu_should_identity_map(dev, 0))
2949 2950 2951 2952 2953 2954
			return 1;
		else {
			/*
			 * 32 bit DMA is removed from si_domain and fall back
			 * to non-identity mapping.
			 */
2955
			domain_remove_one_dev_info(si_domain, dev);
2956
			printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2957
			       dev_name(dev));
2958 2959 2960 2961 2962 2963 2964
			return 0;
		}
	} else {
		/*
		 * In case of a detached 64 bit DMA device from vm, the device
		 * is put into si_domain for identity mapping.
		 */
2965
		if (iommu_should_identity_map(dev, 0)) {
2966
			int ret;
2967
			ret = domain_add_dev_info(si_domain, dev,
2968 2969 2970
						  hw_pass_through ?
						  CONTEXT_TT_PASS_THROUGH :
						  CONTEXT_TT_MULTI_LEVEL);
2971 2972
			if (!ret) {
				printk(KERN_INFO "64bit %s uses identity mapping\n",
2973
				       dev_name(dev));
2974 2975 2976 2977 2978
				return 1;
			}
		}
	}

2979
	return 0;
2980 2981
}

2982
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
2983
				     size_t size, int dir, u64 dma_mask)
2984 2985
{
	struct dmar_domain *domain;
F
Fenghua Yu 已提交
2986
	phys_addr_t start_paddr;
2987 2988
	struct iova *iova;
	int prot = 0;
I
Ingo Molnar 已提交
2989
	int ret;
2990
	struct intel_iommu *iommu;
2991
	unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2992 2993

	BUG_ON(dir == DMA_NONE);
2994

2995
	if (iommu_no_mapping(dev))
I
Ingo Molnar 已提交
2996
		return paddr;
2997

2998
	domain = get_valid_domain_for_dev(dev);
2999 3000 3001
	if (!domain)
		return 0;

3002
	iommu = domain_get_iommu(domain);
3003
	size = aligned_nrpages(paddr, size);
3004

3005
	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3006 3007 3008
	if (!iova)
		goto error;

3009 3010 3011 3012 3013
	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3014
			!cap_zlr(iommu->cap))
3015 3016 3017 3018
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;
	/*
I
Ingo Molnar 已提交
3019
	 * paddr - (paddr + size) might be partial page, we should map the whole
3020
	 * page.  Note: if two part of one page are separately mapped, we
I
Ingo Molnar 已提交
3021
	 * might have two guest_addr mapping to the same host paddr, but this
3022 3023
	 * is not a big problem
	 */
3024
	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3025
				 mm_to_dma_pfn(paddr_pfn), size, prot);
3026 3027 3028
	if (ret)
		goto error;

3029 3030
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3031
		iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3032
	else
3033
		iommu_flush_write_buffer(iommu);
3034

3035 3036 3037
	start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
	start_paddr += paddr & ~PAGE_MASK;
	return start_paddr;
3038 3039

error:
3040 3041
	if (iova)
		__free_iova(&domain->iovad, iova);
3042
	printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3043
		dev_name(dev), size, (unsigned long long)paddr, dir);
3044 3045 3046
	return 0;
}

3047 3048 3049 3050
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
				 struct dma_attrs *attrs)
3051
{
3052
	return __intel_map_single(dev, page_to_phys(page) + offset, size,
3053
				  dir, *dev->dma_mask);
3054 3055
}

M
mark gross 已提交
3056 3057
static void flush_unmaps(void)
{
3058
	int i, j;
M
mark gross 已提交
3059 3060 3061 3062 3063

	timer_on = 0;

	/* just flush them all */
	for (i = 0; i < g_num_of_iommus; i++) {
3064 3065 3066
		struct intel_iommu *iommu = g_iommus[i];
		if (!iommu)
			continue;
3067

3068 3069 3070
		if (!deferred_flush[i].next)
			continue;

3071 3072 3073
		/* In caching mode, global flushes turn emulation expensive */
		if (!cap_caching_mode(iommu->cap))
			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Y
Yu Zhao 已提交
3074
					 DMA_TLB_GLOBAL_FLUSH);
3075
		for (j = 0; j < deferred_flush[i].next; j++) {
Y
Yu Zhao 已提交
3076 3077
			unsigned long mask;
			struct iova *iova = deferred_flush[i].iova[j];
3078 3079 3080 3081 3082
			struct dmar_domain *domain = deferred_flush[i].domain[j];

			/* On real hardware multiple invalidations are expensive */
			if (cap_caching_mode(iommu->cap))
				iommu_flush_iotlb_psi(iommu, domain->id,
3083 3084
					iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
					!deferred_flush[i].freelist[j], 0);
3085 3086 3087 3088 3089
			else {
				mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
				iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
						(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
			}
Y
Yu Zhao 已提交
3090
			__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3091 3092
			if (deferred_flush[i].freelist[j])
				dma_free_pagelist(deferred_flush[i].freelist[j]);
3093
		}
3094
		deferred_flush[i].next = 0;
M
mark gross 已提交
3095 3096 3097 3098 3099 3100 3101
	}

	list_size = 0;
}

static void flush_unmaps_timeout(unsigned long data)
{
3102 3103 3104
	unsigned long flags;

	spin_lock_irqsave(&async_umap_flush_lock, flags);
M
mark gross 已提交
3105
	flush_unmaps();
3106
	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
M
mark gross 已提交
3107 3108
}

3109
static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
M
mark gross 已提交
3110 3111
{
	unsigned long flags;
3112
	int next, iommu_id;
3113
	struct intel_iommu *iommu;
M
mark gross 已提交
3114 3115

	spin_lock_irqsave(&async_umap_flush_lock, flags);
3116 3117 3118
	if (list_size == HIGH_WATER_MARK)
		flush_unmaps();

3119 3120
	iommu = domain_get_iommu(dom);
	iommu_id = iommu->seq_id;
3121

3122 3123 3124
	next = deferred_flush[iommu_id].next;
	deferred_flush[iommu_id].domain[next] = dom;
	deferred_flush[iommu_id].iova[next] = iova;
3125
	deferred_flush[iommu_id].freelist[next] = freelist;
3126
	deferred_flush[iommu_id].next++;
M
mark gross 已提交
3127 3128 3129 3130 3131 3132 3133 3134 3135

	if (!timer_on) {
		mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
		timer_on = 1;
	}
	list_size++;
	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}

3136 3137 3138
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
			     size_t size, enum dma_data_direction dir,
			     struct dma_attrs *attrs)
3139
{
3140
	struct dmar_domain *domain;
3141
	unsigned long start_pfn, last_pfn;
3142
	struct iova *iova;
3143
	struct intel_iommu *iommu;
3144
	struct page *freelist;
3145

3146
	if (iommu_no_mapping(dev))
3147
		return;
3148

3149
	domain = find_domain(dev);
3150 3151
	BUG_ON(!domain);

3152 3153
	iommu = domain_get_iommu(domain);

3154
	iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3155 3156
	if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
		      (unsigned long long)dev_addr))
3157 3158
		return;

3159 3160
	start_pfn = mm_to_dma_pfn(iova->pfn_lo);
	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3161

3162
	pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3163
		 dev_name(dev), start_pfn, last_pfn);
3164

3165
	freelist = domain_unmap(domain, start_pfn, last_pfn);
3166

M
mark gross 已提交
3167
	if (intel_iommu_strict) {
3168
		iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3169
				      last_pfn - start_pfn + 1, !freelist, 0);
M
mark gross 已提交
3170 3171
		/* free iova */
		__free_iova(&domain->iovad, iova);
3172
		dma_free_pagelist(freelist);
M
mark gross 已提交
3173
	} else {
3174
		add_unmap(domain, iova, freelist);
M
mark gross 已提交
3175 3176 3177 3178 3179
		/*
		 * queue up the release of the unmap to save the 1/6th of the
		 * cpu used up by the iotlb flush operation...
		 */
	}
3180 3181
}

3182
static void *intel_alloc_coherent(struct device *dev, size_t size,
3183 3184
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
3185
{
A
Akinobu Mita 已提交
3186
	struct page *page = NULL;
3187 3188
	int order;

F
Fenghua Yu 已提交
3189
	size = PAGE_ALIGN(size);
3190
	order = get_order(size);
3191

3192
	if (!iommu_no_mapping(dev))
3193
		flags &= ~(GFP_DMA | GFP_DMA32);
3194 3195
	else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
		if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3196 3197 3198 3199
			flags |= GFP_DMA;
		else
			flags |= GFP_DMA32;
	}
3200

A
Akinobu Mita 已提交
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214
	if (flags & __GFP_WAIT) {
		unsigned int count = size >> PAGE_SHIFT;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (page && iommu_no_mapping(dev) &&
		    page_to_phys(page) + size > dev->coherent_dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}

	if (!page)
		page = alloc_pages(flags, order);
	if (!page)
3215
		return NULL;
A
Akinobu Mita 已提交
3216
	memset(page_address(page), 0, size);
3217

A
Akinobu Mita 已提交
3218
	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3219
					 DMA_BIDIRECTIONAL,
3220
					 dev->coherent_dma_mask);
3221
	if (*dma_handle)
A
Akinobu Mita 已提交
3222 3223 3224 3225
		return page_address(page);
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);

3226 3227 3228
	return NULL;
}

3229
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3230
				dma_addr_t dma_handle, struct dma_attrs *attrs)
3231 3232
{
	int order;
A
Akinobu Mita 已提交
3233
	struct page *page = virt_to_page(vaddr);
3234

F
Fenghua Yu 已提交
3235
	size = PAGE_ALIGN(size);
3236 3237
	order = get_order(size);

3238
	intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
A
Akinobu Mita 已提交
3239 3240
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, order);
3241 3242
}

3243
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3244 3245
			   int nelems, enum dma_data_direction dir,
			   struct dma_attrs *attrs)
3246 3247
{
	struct dmar_domain *domain;
3248
	unsigned long start_pfn, last_pfn;
3249
	struct iova *iova;
3250
	struct intel_iommu *iommu;
3251
	struct page *freelist;
3252

3253
	if (iommu_no_mapping(dev))
3254 3255
		return;

3256
	domain = find_domain(dev);
3257 3258 3259
	BUG_ON(!domain);

	iommu = domain_get_iommu(domain);
3260

F
FUJITA Tomonori 已提交
3261
	iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3262 3263
	if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
		      (unsigned long long)sglist[0].dma_address))
3264 3265
		return;

3266 3267
	start_pfn = mm_to_dma_pfn(iova->pfn_lo);
	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3268

3269
	freelist = domain_unmap(domain, start_pfn, last_pfn);
3270

3271 3272
	if (intel_iommu_strict) {
		iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3273
				      last_pfn - start_pfn + 1, !freelist, 0);
3274 3275
		/* free iova */
		__free_iova(&domain->iovad, iova);
3276
		dma_free_pagelist(freelist);
3277
	} else {
3278
		add_unmap(domain, iova, freelist);
3279 3280 3281 3282 3283
		/*
		 * queue up the release of the unmap to save the 1/6th of the
		 * cpu used up by the iotlb flush operation...
		 */
	}
3284 3285 3286
}

static int intel_nontranslate_map_sg(struct device *hddev,
F
FUJITA Tomonori 已提交
3287
	struct scatterlist *sglist, int nelems, int dir)
3288 3289
{
	int i;
F
FUJITA Tomonori 已提交
3290
	struct scatterlist *sg;
3291

F
FUJITA Tomonori 已提交
3292
	for_each_sg(sglist, sg, nelems, i) {
F
FUJITA Tomonori 已提交
3293
		BUG_ON(!sg_page(sg));
3294
		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
F
FUJITA Tomonori 已提交
3295
		sg->dma_length = sg->length;
3296 3297 3298 3299
	}
	return nelems;
}

3300
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3301
			enum dma_data_direction dir, struct dma_attrs *attrs)
3302 3303 3304
{
	int i;
	struct dmar_domain *domain;
3305 3306 3307 3308
	size_t size = 0;
	int prot = 0;
	struct iova *iova = NULL;
	int ret;
F
FUJITA Tomonori 已提交
3309
	struct scatterlist *sg;
3310
	unsigned long start_vpfn;
3311
	struct intel_iommu *iommu;
3312 3313

	BUG_ON(dir == DMA_NONE);
3314 3315
	if (iommu_no_mapping(dev))
		return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3316

3317
	domain = get_valid_domain_for_dev(dev);
3318 3319 3320
	if (!domain)
		return 0;

3321 3322
	iommu = domain_get_iommu(domain);

3323
	for_each_sg(sglist, sg, nelems, i)
3324
		size += aligned_nrpages(sg->offset, sg->length);
3325

3326 3327
	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
				*dev->dma_mask);
3328
	if (!iova) {
F
FUJITA Tomonori 已提交
3329
		sglist->dma_length = 0;
3330 3331 3332 3333 3334 3335 3336 3337
		return 0;
	}

	/*
	 * Check if DMAR supports zero-length reads on write only
	 * mappings..
	 */
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3338
			!cap_zlr(iommu->cap))
3339 3340 3341 3342
		prot |= DMA_PTE_READ;
	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
		prot |= DMA_PTE_WRITE;

3343
	start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3344

3345
	ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3346 3347 3348 3349 3350 3351 3352 3353 3354 3355
	if (unlikely(ret)) {
		/*  clear the page */
		dma_pte_clear_range(domain, start_vpfn,
				    start_vpfn + size - 1);
		/* free page tables */
		dma_pte_free_pagetable(domain, start_vpfn,
				       start_vpfn + size - 1);
		/* free iova */
		__free_iova(&domain->iovad, iova);
		return 0;
3356 3357
	}

3358 3359
	/* it's a non-present to present mapping. Only flush if caching mode */
	if (cap_caching_mode(iommu->cap))
3360
		iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3361
	else
3362
		iommu_flush_write_buffer(iommu);
3363

3364 3365 3366
	return nelems;
}

3367 3368 3369 3370 3371
static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return !dma_addr;
}

3372
struct dma_map_ops intel_dma_ops = {
3373 3374
	.alloc = intel_alloc_coherent,
	.free = intel_free_coherent,
3375 3376
	.map_sg = intel_map_sg,
	.unmap_sg = intel_unmap_sg,
3377 3378
	.map_page = intel_map_page,
	.unmap_page = intel_unmap_page,
3379
	.mapping_error = intel_mapping_error,
3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463
};

static inline int iommu_domain_cache_init(void)
{
	int ret = 0;

	iommu_domain_cache = kmem_cache_create("iommu_domain",
					 sizeof(struct dmar_domain),
					 0,
					 SLAB_HWCACHE_ALIGN,

					 NULL);
	if (!iommu_domain_cache) {
		printk(KERN_ERR "Couldn't create iommu_domain cache\n");
		ret = -ENOMEM;
	}

	return ret;
}

static inline int iommu_devinfo_cache_init(void)
{
	int ret = 0;

	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
					 sizeof(struct device_domain_info),
					 0,
					 SLAB_HWCACHE_ALIGN,
					 NULL);
	if (!iommu_devinfo_cache) {
		printk(KERN_ERR "Couldn't create devinfo cache\n");
		ret = -ENOMEM;
	}

	return ret;
}

static inline int iommu_iova_cache_init(void)
{
	int ret = 0;

	iommu_iova_cache = kmem_cache_create("iommu_iova",
					 sizeof(struct iova),
					 0,
					 SLAB_HWCACHE_ALIGN,
					 NULL);
	if (!iommu_iova_cache) {
		printk(KERN_ERR "Couldn't create iova cache\n");
		ret = -ENOMEM;
	}

	return ret;
}

static int __init iommu_init_mempool(void)
{
	int ret;
	ret = iommu_iova_cache_init();
	if (ret)
		return ret;

	ret = iommu_domain_cache_init();
	if (ret)
		goto domain_error;

	ret = iommu_devinfo_cache_init();
	if (!ret)
		return ret;

	kmem_cache_destroy(iommu_domain_cache);
domain_error:
	kmem_cache_destroy(iommu_iova_cache);

	return -ENOMEM;
}

static void __init iommu_exit_mempool(void)
{
	kmem_cache_destroy(iommu_devinfo_cache);
	kmem_cache_destroy(iommu_domain_cache);
	kmem_cache_destroy(iommu_iova_cache);

}

3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
	struct dmar_drhd_unit *drhd;
	u32 vtbar;
	int rc;

	/* We know that this device on this chipset has its own IOMMU.
	 * If we find it under a different IOMMU, then the BIOS is lying
	 * to us. Hope that the IOMMU for this device is actually
	 * disabled, and it needs no translation...
	 */
	rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
	if (rc) {
		/* "can't" happen */
		dev_info(&pdev->dev, "failed to run vt-d quirk\n");
		return;
	}
	vtbar &= 0xffff0000;

	/* we know that the this iommu should be at offset 0xa000 from vtbar */
	drhd = dmar_find_matched_drhd_unit(pdev);
	if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
			    TAINT_FIRMWARE_WORKAROUND,
			    "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
		pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);

3492 3493 3494
static void __init init_no_remapping_devices(void)
{
	struct dmar_drhd_unit *drhd;
3495
	struct device *dev;
3496
	int i;
3497 3498 3499

	for_each_drhd_unit(drhd) {
		if (!drhd->include_all) {
3500 3501 3502
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
				break;
3503
			/* ignore DMAR unit if no devices exist */
3504 3505 3506 3507 3508
			if (i == drhd->devices_cnt)
				drhd->ignored = 1;
		}
	}

3509 3510
	for_each_active_drhd_unit(drhd) {
		if (drhd->include_all)
3511 3512
			continue;

3513 3514
		for_each_active_dev_scope(drhd->devices,
					  drhd->devices_cnt, i, dev)
3515
			if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3516 3517 3518 3519
				break;
		if (i < drhd->devices_cnt)
			continue;

3520 3521 3522 3523 3524 3525
		/* This IOMMU has *only* gfx devices. Either bypass it or
		   set the gfx_mapped flag, as appropriate */
		if (dmar_map_gfx) {
			intel_iommu_gfx_mapped = 1;
		} else {
			drhd->ignored = 1;
3526 3527
			for_each_active_dev_scope(drhd->devices,
						  drhd->devices_cnt, i, dev)
3528
				dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3529 3530 3531 3532
		}
	}
}

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542
#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;

	for_each_active_iommu(iommu, drhd)
		if (iommu->qi)
			dmar_reenable_qi(iommu);

3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
	for_each_iommu(iommu, drhd) {
		if (drhd->ignored) {
			/*
			 * we always have to disable PMRs or DMA may fail on
			 * this device
			 */
			if (force_on)
				iommu_disable_protect_mem_regions(iommu);
			continue;
		}
	
3554 3555 3556 3557 3558
		iommu_flush_write_buffer(iommu);

		iommu_set_root_entry(iommu);

		iommu->flush.flush_context(iommu, 0, 0, 0,
3559
					   DMA_CCMD_GLOBAL_INVL);
3560
		iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3561
					 DMA_TLB_GLOBAL_FLUSH);
3562 3563
		if (iommu_enable_translation(iommu))
			return 1;
3564
		iommu_disable_protect_mem_regions(iommu);
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576
	}

	return 0;
}

static void iommu_flush_all(void)
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu;

	for_each_active_iommu(iommu, drhd) {
		iommu->flush.flush_context(iommu, 0, 0, 0,
3577
					   DMA_CCMD_GLOBAL_INVL);
3578
		iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3579
					 DMA_TLB_GLOBAL_FLUSH);
3580 3581 3582
	}
}

3583
static int iommu_suspend(void)
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	for_each_active_iommu(iommu, drhd) {
		iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
						 GFP_ATOMIC);
		if (!iommu->iommu_state)
			goto nomem;
	}

	iommu_flush_all();

	for_each_active_iommu(iommu, drhd) {
		iommu_disable_translation(iommu);

3601
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611

		iommu->iommu_state[SR_DMAR_FECTL_REG] =
			readl(iommu->reg + DMAR_FECTL_REG);
		iommu->iommu_state[SR_DMAR_FEDATA_REG] =
			readl(iommu->reg + DMAR_FEDATA_REG);
		iommu->iommu_state[SR_DMAR_FEADDR_REG] =
			readl(iommu->reg + DMAR_FEADDR_REG);
		iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
			readl(iommu->reg + DMAR_FEUADDR_REG);

3612
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
	}
	return 0;

nomem:
	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);

	return -ENOMEM;
}

3623
static void iommu_resume(void)
3624 3625 3626 3627 3628 3629
{
	struct dmar_drhd_unit *drhd;
	struct intel_iommu *iommu = NULL;
	unsigned long flag;

	if (init_iommu_hw()) {
3630 3631 3632 3633
		if (force_on)
			panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
		else
			WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3634
		return;
3635 3636 3637 3638
	}

	for_each_active_iommu(iommu, drhd) {

3639
		raw_spin_lock_irqsave(&iommu->register_lock, flag);
3640 3641 3642 3643 3644 3645 3646 3647 3648 3649

		writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
			iommu->reg + DMAR_FECTL_REG);
		writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
			iommu->reg + DMAR_FEDATA_REG);
		writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
			iommu->reg + DMAR_FEADDR_REG);
		writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
			iommu->reg + DMAR_FEUADDR_REG);

3650
		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3651 3652 3653 3654 3655 3656
	}

	for_each_active_iommu(iommu, drhd)
		kfree(iommu->iommu_state);
}

3657
static struct syscore_ops iommu_syscore_ops = {
3658 3659 3660 3661
	.resume		= iommu_resume,
	.suspend	= iommu_suspend,
};

3662
static void __init init_iommu_pm_ops(void)
3663
{
3664
	register_syscore_ops(&iommu_syscore_ops);
3665 3666 3667
}

#else
3668
static inline void init_iommu_pm_ops(void) {}
3669 3670
#endif	/* CONFIG_PM */

3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684

int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
{
	struct acpi_dmar_reserved_memory *rmrr;
	struct dmar_rmrr_unit *rmrru;

	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
	if (!rmrru)
		return -ENOMEM;

	rmrru->hdr = header;
	rmrr = (struct acpi_dmar_reserved_memory *)header;
	rmrru->base_address = rmrr->base_address;
	rmrru->end_address = rmrr->end_address;
3685 3686 3687 3688 3689 3690 3691
	rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				&rmrru->devices_cnt);
	if (rmrru->devices_cnt && rmrru->devices == NULL) {
		kfree(rmrru);
		return -ENOMEM;
	}
3692

3693
	list_add(&rmrru->list, &dmar_rmrr_units);
3694

3695
	return 0;
3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
}

int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
{
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
	atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
	if (!atsru)
		return -ENOMEM;

	atsru->hdr = hdr;
	atsru->include_all = atsr->flags & 0x1;
3710 3711 3712 3713 3714 3715 3716 3717 3718
	if (!atsru->include_all) {
		atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
				(void *)atsr + atsr->header.length,
				&atsru->devices_cnt);
		if (atsru->devices_cnt && atsru->devices == NULL) {
			kfree(atsru);
			return -ENOMEM;
		}
	}
3719

3720
	list_add_rcu(&atsru->list, &dmar_atsr_units);
3721 3722 3723 3724

	return 0;
}

3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
	dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
	kfree(atsru);
}

static void intel_iommu_free_dmars(void)
{
	struct dmar_rmrr_unit *rmrru, *rmrr_n;
	struct dmar_atsr_unit *atsru, *atsr_n;

	list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
		list_del(&rmrru->list);
		dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
		kfree(rmrru);
3740 3741
	}

3742 3743 3744 3745
	list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
		list_del(&atsru->list);
		intel_iommu_free_atsr(atsru);
	}
3746 3747 3748 3749
}

int dmar_find_matched_atsr_unit(struct pci_dev *dev)
{
3750
	int i, ret = 1;
3751
	struct pci_bus *bus;
3752 3753
	struct pci_dev *bridge = NULL;
	struct device *tmp;
3754 3755 3756 3757 3758
	struct acpi_dmar_atsr *atsr;
	struct dmar_atsr_unit *atsru;

	dev = pci_physfn(dev);
	for (bus = dev->bus; bus; bus = bus->parent) {
3759
		bridge = bus->self;
3760
		if (!bridge || !pci_is_pcie(bridge) ||
3761
		    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3762
			return 0;
3763
		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3764 3765
			break;
	}
3766 3767
	if (!bridge)
		return 0;
3768

3769
	rcu_read_lock();
3770 3771 3772 3773 3774
	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (atsr->segment != pci_domain_nr(dev->bus))
			continue;

3775
		for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3776
			if (tmp == &bridge->dev)
3777
				goto out;
3778 3779

		if (atsru->include_all)
3780
			goto out;
3781
	}
3782 3783
	ret = 0;
out:
3784
	rcu_read_unlock();
3785

3786
	return ret;
3787 3788
}

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
	int ret = 0;
	struct dmar_rmrr_unit *rmrru;
	struct dmar_atsr_unit *atsru;
	struct acpi_dmar_atsr *atsr;
	struct acpi_dmar_reserved_memory *rmrr;

	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
		return 0;

	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
		rmrr = container_of(rmrru->hdr,
				    struct acpi_dmar_reserved_memory, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
				((void *)rmrr) + rmrr->header.length,
				rmrr->segment, rmrru->devices,
				rmrru->devices_cnt);
3808
			if(ret < 0)
3809 3810
				return ret;
		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3811 3812
			dmar_remove_dev_scope(info, rmrr->segment,
				rmrru->devices, rmrru->devices_cnt);
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
		}
	}

	list_for_each_entry(atsru, &dmar_atsr_units, list) {
		if (atsru->include_all)
			continue;

		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
			ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
					(void *)atsr + atsr->header.length,
					atsr->segment, atsru->devices,
					atsru->devices_cnt);
			if (ret > 0)
				break;
			else if(ret < 0)
				return ret;
		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
			if (dmar_remove_dev_scope(info, atsr->segment,
					atsru->devices, atsru->devices_cnt))
				break;
		}
	}

	return 0;
}

F
Fenghua Yu 已提交
3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851
/*
 * Here we only respond to action of unbound device from driver.
 *
 * Added device is not attached to its DMAR domain here yet. That will happen
 * when mapping the device to iova.
 */
static int device_notifier(struct notifier_block *nb,
				  unsigned long action, void *data)
{
	struct device *dev = data;
	struct dmar_domain *domain;

3852
	if (iommu_dummy(dev))
3853 3854
		return 0;

3855 3856 3857 3858
	if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
	    action != BUS_NOTIFY_DEL_DEVICE)
		return 0;

3859
	domain = find_domain(dev);
F
Fenghua Yu 已提交
3860 3861 3862
	if (!domain)
		return 0;

3863
	down_read(&dmar_global_lock);
3864
	domain_remove_one_dev_info(domain, dev);
3865 3866 3867 3868
	if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
	    !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
	    list_empty(&domain->devices))
		domain_exit(domain);
3869
	up_read(&dmar_global_lock);
3870

F
Fenghua Yu 已提交
3871 3872 3873 3874 3875 3876 3877
	return 0;
}

static struct notifier_block device_nb = {
	.notifier_call = device_notifier,
};

3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903
static int intel_iommu_memory_notifier(struct notifier_block *nb,
				       unsigned long val, void *v)
{
	struct memory_notify *mhp = v;
	unsigned long long start, end;
	unsigned long start_vpfn, last_vpfn;

	switch (val) {
	case MEM_GOING_ONLINE:
		start = mhp->start_pfn << PAGE_SHIFT;
		end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
		if (iommu_domain_identity_map(si_domain, start, end)) {
			pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
				start, end);
			return NOTIFY_BAD;
		}
		break;

	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
		start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
		last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
		while (start_vpfn <= last_vpfn) {
			struct iova *iova;
			struct dmar_drhd_unit *drhd;
			struct intel_iommu *iommu;
3904
			struct page *freelist;
3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920

			iova = find_iova(&si_domain->iovad, start_vpfn);
			if (iova == NULL) {
				pr_debug("dmar: failed get IOVA for PFN %lx\n",
					 start_vpfn);
				break;
			}

			iova = split_and_remove_iova(&si_domain->iovad, iova,
						     start_vpfn, last_vpfn);
			if (iova == NULL) {
				pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
					start_vpfn, last_vpfn);
				return NOTIFY_BAD;
			}

3921 3922 3923
			freelist = domain_unmap(si_domain, iova->pfn_lo,
					       iova->pfn_hi);

3924 3925 3926 3927
			rcu_read_lock();
			for_each_active_iommu(iommu, drhd)
				iommu_flush_iotlb_psi(iommu, si_domain->id,
					iova->pfn_lo,
3928 3929
					iova->pfn_hi - iova->pfn_lo + 1,
					!freelist, 0);
3930
			rcu_read_unlock();
3931
			dma_free_pagelist(freelist);
3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946

			start_vpfn = iova->pfn_hi + 1;
			free_iova_mem(iova);
		}
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block intel_iommu_memory_nb = {
	.notifier_call = intel_iommu_memory_notifier,
	.priority = 0
};

3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003

static ssize_t intel_iommu_show_version(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	u32 ver = readl(iommu->reg + DMAR_VER_REG);
	return sprintf(buf, "%d:%d\n",
		       DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
}
static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);

static ssize_t intel_iommu_show_address(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->reg_phys);
}
static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);

static ssize_t intel_iommu_show_cap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->cap);
}
static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);

static ssize_t intel_iommu_show_ecap(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct intel_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->ecap);
}
static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);

static struct attribute *intel_iommu_attrs[] = {
	&dev_attr_version.attr,
	&dev_attr_address.attr,
	&dev_attr_cap.attr,
	&dev_attr_ecap.attr,
	NULL,
};

static struct attribute_group intel_iommu_group = {
	.name = "intel-iommu",
	.attrs = intel_iommu_attrs,
};

const struct attribute_group *intel_iommu_groups[] = {
	&intel_iommu_group,
	NULL,
};

4004 4005
int __init intel_iommu_init(void)
{
4006
	int ret = -ENODEV;
4007
	struct dmar_drhd_unit *drhd;
4008
	struct intel_iommu *iommu;
4009

4010 4011 4012
	/* VT-d is required for a TXT/tboot launch, so enforce that */
	force_on = tboot_force_iommu();

4013 4014 4015 4016 4017 4018 4019
	if (iommu_init_mempool()) {
		if (force_on)
			panic("tboot: Failed to initialize iommu memory\n");
		return -ENOMEM;
	}

	down_write(&dmar_global_lock);
4020 4021 4022
	if (dmar_table_init()) {
		if (force_on)
			panic("tboot: Failed to initialize DMAR table\n");
4023
		goto out_free_dmar;
4024
	}
4025

4026 4027 4028
	/*
	 * Disable translation if already enabled prior to OS handover.
	 */
4029
	for_each_active_iommu(iommu, drhd)
4030 4031 4032
		if (iommu->gcmd & DMA_GCMD_TE)
			iommu_disable_translation(iommu);

4033
	if (dmar_dev_scope_init() < 0) {
4034 4035
		if (force_on)
			panic("tboot: Failed to initialize DMAR device scope\n");
4036
		goto out_free_dmar;
4037
	}
4038

4039
	if (no_iommu || dmar_disabled)
4040
		goto out_free_dmar;
4041

4042 4043 4044 4045 4046 4047
	if (list_empty(&dmar_rmrr_units))
		printk(KERN_INFO "DMAR: No RMRR found\n");

	if (list_empty(&dmar_atsr_units))
		printk(KERN_INFO "DMAR: No ATSR found\n");

4048 4049 4050
	if (dmar_init_reserved_ranges()) {
		if (force_on)
			panic("tboot: Failed to reserve iommu ranges\n");
4051
		goto out_free_reserved_range;
4052
	}
4053 4054 4055

	init_no_remapping_devices();

4056
	ret = init_dmars();
4057
	if (ret) {
4058 4059
		if (force_on)
			panic("tboot: Failed to initialize DMARs\n");
4060
		printk(KERN_ERR "IOMMU: dmar init failed\n");
4061
		goto out_free_reserved_range;
4062
	}
4063
	up_write(&dmar_global_lock);
4064 4065 4066
	printk(KERN_INFO
	"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");

M
mark gross 已提交
4067
	init_timer(&unmap_timer);
4068 4069 4070
#ifdef CONFIG_SWIOTLB
	swiotlb = 0;
#endif
4071
	dma_ops = &intel_dma_ops;
F
Fenghua Yu 已提交
4072

4073
	init_iommu_pm_ops();
4074

4075 4076 4077 4078 4079
	for_each_active_iommu(iommu, drhd)
		iommu->iommu_dev = iommu_device_create(NULL, iommu,
						       intel_iommu_groups,
						       iommu->name);

4080
	bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
F
Fenghua Yu 已提交
4081
	bus_register_notifier(&pci_bus_type, &device_nb);
4082 4083
	if (si_domain && !hw_pass_through)
		register_memory_notifier(&intel_iommu_memory_nb);
F
Fenghua Yu 已提交
4084

4085 4086
	intel_iommu_enabled = 1;

4087
	return 0;
4088 4089 4090 4091 4092

out_free_reserved_range:
	put_iova_domain(&reserved_iova_list);
out_free_dmar:
	intel_iommu_free_dmars();
4093 4094
	up_write(&dmar_global_lock);
	iommu_exit_mempool();
4095
	return ret;
4096
}
4097

4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111
static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct intel_iommu *iommu = opaque;

	iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
	return 0;
}

/*
 * NB - intel-iommu lacks any sort of reference counting for the users of
 * dependent devices.  If multiple endpoints have intersecting dependent
 * devices, unbinding the driver from any one of them will possibly leave
 * the others unable to operate.
 */
4112
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4113
					   struct device *dev)
4114
{
4115
	if (!iommu || !dev || !dev_is_pci(dev))
4116 4117
		return;

4118
	pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4119 4120
}

4121
static void domain_remove_one_dev_info(struct dmar_domain *domain,
4122
				       struct device *dev)
4123
{
4124
	struct device_domain_info *info, *tmp;
4125 4126 4127
	struct intel_iommu *iommu;
	unsigned long flags;
	int found = 0;
4128
	u8 bus, devfn;
4129

4130
	iommu = device_to_iommu(dev, &bus, &devfn);
4131 4132 4133 4134
	if (!iommu)
		return;

	spin_lock_irqsave(&device_domain_lock, flags);
4135
	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4136 4137
		if (info->iommu == iommu && info->bus == bus &&
		    info->devfn == devfn) {
4138
			unlink_domain_info(info);
4139 4140
			spin_unlock_irqrestore(&device_domain_lock, flags);

Y
Yu Zhao 已提交
4141
			iommu_disable_dev_iotlb(info);
4142
			iommu_detach_dev(iommu, info->bus, info->devfn);
4143
			iommu_detach_dependent_devices(iommu, dev);
4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
			free_devinfo_mem(info);

			spin_lock_irqsave(&device_domain_lock, flags);

			if (found)
				break;
			else
				continue;
		}

		/* if there is no other devices under the same iommu
		 * owned by this domain, clear this iommu in iommu_bmp
		 * update iommu count and coherency
		 */
4158
		if (info->iommu == iommu)
4159 4160 4161
			found = 1;
	}

4162 4163
	spin_unlock_irqrestore(&device_domain_lock, flags);

4164 4165 4166
	if (found == 0) {
		unsigned long tmp_flags;
		spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
4167
		clear_bit(iommu->seq_id, domain->iommu_bmp);
4168
		domain->iommu_count--;
4169
		domain_update_iommu_cap(domain);
4170
		spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
4171

4172 4173 4174 4175 4176 4177 4178
		if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
		    !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
			spin_lock_irqsave(&iommu->lock, tmp_flags);
			clear_bit(domain->id, iommu->domain_ids);
			iommu->domains[domain->id] = NULL;
			spin_unlock_irqrestore(&iommu->lock, tmp_flags);
		}
4179 4180 4181
	}
}

4182
static int md_domain_init(struct dmar_domain *domain, int guest_width)
4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194
{
	int adjust_width;

	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
	domain_reserve_special_ranges(domain);

	/* calculate AGAW */
	domain->gaw = guest_width;
	adjust_width = guestwidth_to_adjustwidth(guest_width);
	domain->agaw = width_to_agaw(adjust_width);

	domain->iommu_coherency = 0;
4195
	domain->iommu_snooping = 0;
4196
	domain->iommu_superpage = 0;
4197
	domain->max_addr = 0;
4198
	domain->nid = -1;
4199 4200

	/* always allocate the top pgd */
4201
	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4202 4203 4204 4205 4206 4207
	if (!domain->pgd)
		return -ENOMEM;
	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
	return 0;
}

4208
static int intel_iommu_domain_init(struct iommu_domain *domain)
K
Kay, Allen M 已提交
4209
{
4210
	struct dmar_domain *dmar_domain;
K
Kay, Allen M 已提交
4211

4212
	dmar_domain = alloc_domain(true);
4213
	if (!dmar_domain) {
K
Kay, Allen M 已提交
4214
		printk(KERN_ERR
4215 4216
			"intel_iommu_domain_init: dmar_domain == NULL\n");
		return -ENOMEM;
K
Kay, Allen M 已提交
4217
	}
4218
	if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
K
Kay, Allen M 已提交
4219
		printk(KERN_ERR
4220
			"intel_iommu_domain_init() failed\n");
4221
		domain_exit(dmar_domain);
4222
		return -ENOMEM;
K
Kay, Allen M 已提交
4223
	}
4224
	domain_update_iommu_cap(dmar_domain);
4225
	domain->priv = dmar_domain;
4226

4227 4228 4229 4230
	domain->geometry.aperture_start = 0;
	domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
	domain->geometry.force_aperture = true;

4231
	return 0;
K
Kay, Allen M 已提交
4232 4233
}

4234
static void intel_iommu_domain_destroy(struct iommu_domain *domain)
K
Kay, Allen M 已提交
4235
{
4236 4237 4238
	struct dmar_domain *dmar_domain = domain->priv;

	domain->priv = NULL;
4239
	domain_exit(dmar_domain);
K
Kay, Allen M 已提交
4240 4241
}

4242 4243
static int intel_iommu_attach_device(struct iommu_domain *domain,
				     struct device *dev)
K
Kay, Allen M 已提交
4244
{
4245
	struct dmar_domain *dmar_domain = domain->priv;
4246 4247
	struct intel_iommu *iommu;
	int addr_width;
4248
	u8 bus, devfn;
4249

4250 4251
	/* normally dev is not mapped */
	if (unlikely(domain_context_mapped(dev))) {
4252 4253
		struct dmar_domain *old_domain;

4254
		old_domain = find_domain(dev);
4255
		if (old_domain) {
4256 4257
			if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
			    dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4258
				domain_remove_one_dev_info(old_domain, dev);
4259 4260 4261 4262 4263
			else
				domain_remove_dev_info(old_domain);
		}
	}

4264
	iommu = device_to_iommu(dev, &bus, &devfn);
4265 4266 4267 4268 4269
	if (!iommu)
		return -ENODEV;

	/* check if this iommu agaw is sufficient for max mapped address */
	addr_width = agaw_to_width(iommu->agaw);
4270 4271 4272 4273 4274
	if (addr_width > cap_mgaw(iommu->cap))
		addr_width = cap_mgaw(iommu->cap);

	if (dmar_domain->max_addr > (1LL << addr_width)) {
		printk(KERN_ERR "%s: iommu width (%d) is not "
4275
		       "sufficient for the mapped address (%llx)\n",
4276
		       __func__, addr_width, dmar_domain->max_addr);
4277 4278
		return -EFAULT;
	}
4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
	dmar_domain->gaw = addr_width;

	/*
	 * Knock out extra levels of page tables if necessary
	 */
	while (iommu->agaw < dmar_domain->agaw) {
		struct dma_pte *pte;

		pte = dmar_domain->pgd;
		if (dma_pte_present(pte)) {
4289 4290
			dmar_domain->pgd = (struct dma_pte *)
				phys_to_virt(dma_pte_addr(pte));
4291
			free_pgtable_page(pte);
4292 4293 4294
		}
		dmar_domain->agaw--;
	}
4295

4296
	return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
K
Kay, Allen M 已提交
4297 4298
}

4299 4300
static void intel_iommu_detach_device(struct iommu_domain *domain,
				      struct device *dev)
K
Kay, Allen M 已提交
4301
{
4302 4303
	struct dmar_domain *dmar_domain = domain->priv;

4304
	domain_remove_one_dev_info(dmar_domain, dev);
4305
}
4306

4307 4308
static int intel_iommu_map(struct iommu_domain *domain,
			   unsigned long iova, phys_addr_t hpa,
4309
			   size_t size, int iommu_prot)
4310
{
4311
	struct dmar_domain *dmar_domain = domain->priv;
4312
	u64 max_addr;
4313
	int prot = 0;
4314
	int ret;
4315

4316 4317 4318 4319
	if (iommu_prot & IOMMU_READ)
		prot |= DMA_PTE_READ;
	if (iommu_prot & IOMMU_WRITE)
		prot |= DMA_PTE_WRITE;
4320 4321
	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
		prot |= DMA_PTE_SNP;
4322

4323
	max_addr = iova + size;
4324
	if (dmar_domain->max_addr < max_addr) {
4325 4326 4327
		u64 end;

		/* check if minimum agaw is sufficient for mapped address */
4328
		end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4329
		if (end < max_addr) {
4330
			printk(KERN_ERR "%s: iommu width (%d) is not "
4331
			       "sufficient for the mapped address (%llx)\n",
4332
			       __func__, dmar_domain->gaw, max_addr);
4333 4334
			return -EFAULT;
		}
4335
		dmar_domain->max_addr = max_addr;
4336
	}
4337 4338
	/* Round up size to next multiple of PAGE_SIZE, if it and
	   the low bits of hpa would take us onto the next page */
4339
	size = aligned_nrpages(hpa, size);
4340 4341
	ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
				 hpa >> VTD_PAGE_SHIFT, size, prot);
4342
	return ret;
K
Kay, Allen M 已提交
4343 4344
}

4345
static size_t intel_iommu_unmap(struct iommu_domain *domain,
4346
				unsigned long iova, size_t size)
K
Kay, Allen M 已提交
4347
{
4348
	struct dmar_domain *dmar_domain = domain->priv;
4349 4350 4351 4352 4353
	struct page *freelist = NULL;
	struct intel_iommu *iommu;
	unsigned long start_pfn, last_pfn;
	unsigned int npages;
	int iommu_id, num, ndomains, level = 0;
4354 4355 4356 4357 4358 4359 4360 4361

	/* Cope with horrid API which requires us to unmap more than the
	   size argument if it happens to be a large-page mapping. */
	if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
		BUG();

	if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
		size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4362

4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385
	start_pfn = iova >> VTD_PAGE_SHIFT;
	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;

	freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);

	npages = last_pfn - start_pfn + 1;

	for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
               iommu = g_iommus[iommu_id];

               /*
                * find bit position of dmar_domain
                */
               ndomains = cap_ndoms(iommu->cap);
               for_each_set_bit(num, iommu->domain_ids, ndomains) {
                       if (iommu->domains[num] == dmar_domain)
                               iommu_flush_iotlb_psi(iommu, num, start_pfn,
						     npages, !freelist, 0);
	       }

	}

	dma_free_pagelist(freelist);
4386

4387 4388
	if (dmar_domain->max_addr == iova + size)
		dmar_domain->max_addr = iova;
4389

4390
	return size;
K
Kay, Allen M 已提交
4391 4392
}

4393
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4394
					    dma_addr_t iova)
K
Kay, Allen M 已提交
4395
{
4396
	struct dmar_domain *dmar_domain = domain->priv;
K
Kay, Allen M 已提交
4397
	struct dma_pte *pte;
4398
	int level = 0;
4399
	u64 phys = 0;
K
Kay, Allen M 已提交
4400

4401
	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
K
Kay, Allen M 已提交
4402
	if (pte)
4403
		phys = dma_pte_addr(pte);
K
Kay, Allen M 已提交
4404

4405
	return phys;
K
Kay, Allen M 已提交
4406
}
4407

S
Sheng Yang 已提交
4408 4409 4410 4411 4412 4413 4414
static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
				      unsigned long cap)
{
	struct dmar_domain *dmar_domain = domain->priv;

	if (cap == IOMMU_CAP_CACHE_COHERENCY)
		return dmar_domain->iommu_snooping;
4415
	if (cap == IOMMU_CAP_INTR_REMAP)
4416
		return irq_remapping_enabled;
S
Sheng Yang 已提交
4417 4418 4419 4420

	return 0;
}

4421 4422
static int intel_iommu_add_device(struct device *dev)
{
4423
	struct intel_iommu *iommu;
4424
	struct iommu_group *group;
4425
	u8 bus, devfn;
4426

4427 4428
	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
4429 4430
		return -ENODEV;

4431 4432
	iommu_device_link(iommu->iommu_dev, dev);

4433
	group = iommu_group_get_for_dev(dev);
4434

4435 4436
	if (IS_ERR(group))
		return PTR_ERR(group);
4437

4438
	iommu_group_put(group);
4439
	return 0;
4440
}
4441

4442 4443
static void intel_iommu_remove_device(struct device *dev)
{
4444 4445 4446 4447 4448 4449 4450
	struct intel_iommu *iommu;
	u8 bus, devfn;

	iommu = device_to_iommu(dev, &bus, &devfn);
	if (!iommu)
		return;

4451
	iommu_group_remove_device(dev);
4452 4453

	iommu_device_unlink(iommu->iommu_dev, dev);
4454 4455
}

4456 4457 4458 4459 4460
static struct iommu_ops intel_iommu_ops = {
	.domain_init	= intel_iommu_domain_init,
	.domain_destroy = intel_iommu_domain_destroy,
	.attach_dev	= intel_iommu_attach_device,
	.detach_dev	= intel_iommu_detach_device,
4461 4462
	.map		= intel_iommu_map,
	.unmap		= intel_iommu_unmap,
4463
	.iova_to_phys	= intel_iommu_iova_to_phys,
S
Sheng Yang 已提交
4464
	.domain_has_cap = intel_iommu_domain_has_cap,
4465 4466
	.add_device	= intel_iommu_add_device,
	.remove_device	= intel_iommu_remove_device,
4467
	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
4468
};
4469

4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{
	/* G4x/GM45 integrated gfx dmar support is totally busted. */
	printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
	dmar_map_gfx = 0;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);

4485
static void quirk_iommu_rwbf(struct pci_dev *dev)
4486 4487 4488
{
	/*
	 * Mobile 4 Series Chipset neglects to set RWBF capability,
4489
	 * but needs it. Same seems to hold for the desktop versions.
4490 4491 4492 4493 4494 4495
	 */
	printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
	rwbf_quirk = 1;
}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4496 4497 4498 4499 4500 4501
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4502

4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK	(0xf << 8)
#define GGC_MEMORY_SIZE_NONE	(0x0 << 8)
#define GGC_MEMORY_SIZE_1M	(0x1 << 8)
#define GGC_MEMORY_SIZE_2M	(0x3 << 8)
#define GGC_MEMORY_VT_ENABLED	(0x8 << 8)
#define GGC_MEMORY_SIZE_2M_VT	(0x9 << 8)
#define GGC_MEMORY_SIZE_3M_VT	(0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT	(0xb << 8)

4513
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4514 4515 4516
{
	unsigned short ggc;

4517
	if (pci_read_config_word(dev, GGC, &ggc))
4518 4519
		return;

4520
	if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4521 4522
		printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
		dmar_map_gfx = 0;
4523 4524 4525 4526 4527
	} else if (dmar_map_gfx) {
		/* we have to ensure the gfx device is idle before we flush */
		printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
		intel_iommu_strict = 1;
       }
4528 4529 4530 4531 4532 4533
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590
/* On Tylersburg chipsets, some BIOSes have been known to enable the
   ISOCH DMAR unit for the Azalia sound device, but not give it any
   TLB entries, which causes it to deadlock. Check for that.  We do
   this in a function called from init_dmars(), instead of in a PCI
   quirk, because we don't want to print the obnoxious "BIOS broken"
   message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{
	struct pci_dev *pdev;
	uint32_t vtisochctrl;

	/* If there's no Azalia in the system anyway, forget it. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
	if (!pdev)
		return;
	pci_dev_put(pdev);

	/* System Management Registers. Might be hidden, in which case
	   we can't do the sanity check. But that's OK, because the
	   known-broken BIOSes _don't_ actually hide it, so far. */
	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
	if (!pdev)
		return;

	if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
		pci_dev_put(pdev);
		return;
	}

	pci_dev_put(pdev);

	/* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
	if (vtisochctrl & 1)
		return;

	/* Drop all bits other than the number of TLB entries */
	vtisochctrl &= 0x1c;

	/* If we have the recommended number of TLB entries (16), fine. */
	if (vtisochctrl == 0x10)
		return;

	/* Zero TLB entries? You get to ride the short bus to school. */
	if (!vtisochctrl) {
		WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
		     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
		     dmi_get_system_info(DMI_BIOS_VENDOR),
		     dmi_get_system_info(DMI_BIOS_VERSION),
		     dmi_get_system_info(DMI_PRODUCT_VERSION));
		iommu_identity_mapping |= IDENTMAP_AZALIA;
		return;
	}
	
	printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
	       vtisochctrl);
}