amd_iommu.c 33.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <joerg.roedel@amd.com>
 *         Leo Duran <leo.duran@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/proto.h>
26
#include <asm/iommu.h>
27
#include <asm/amd_iommu_types.h>
28
#include <asm/amd_iommu.h>
29 30 31

#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))

32 33
#define EXIT_LOOP_COUNT 10000000

34 35
static DEFINE_RWLOCK(amd_iommu_devtable_lock);

36 37 38 39
/* A list of preallocated protection domains */
static LIST_HEAD(iommu_pd_list);
static DEFINE_SPINLOCK(iommu_pd_list_lock);

40 41 42
/*
 * general struct to manage commands send to an IOMMU
 */
43
struct iommu_cmd {
44 45 46
	u32 data[4];
};

47 48 49
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
			     struct unity_map_entry *e);

50
/* returns !0 if the IOMMU is caching non-present entries in its TLB */
51 52
static int iommu_has_npcache(struct amd_iommu *iommu)
{
53
	return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
54 55
}

56 57 58 59 60 61
/****************************************************************************
 *
 * Interrupt handling functions
 *
 ****************************************************************************/

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static void iommu_print_event(void *__evt)
{
	u32 *event = __evt;
	int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
	int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
	int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
	int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
	u64 address = (u64)(((u64)event[3]) << 32) | event[2];

	printk(KERN_ERR "AMD IOMMU: Event logged [");

	switch (type) {
	case EVENT_TYPE_ILL_DEV:
		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       address, flags);
		break;
	case EVENT_TYPE_IO_FAULT:
		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       domid, address, flags);
		break;
	case EVENT_TYPE_DEV_TAB_ERR:
		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       address, flags);
		break;
	case EVENT_TYPE_PAGE_TAB_ERR:
		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       domid, address, flags);
		break;
	case EVENT_TYPE_ILL_CMD:
		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
		break;
	case EVENT_TYPE_CMD_HARD_ERR:
		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
		       "flags=0x%04x]\n", address, flags);
		break;
	case EVENT_TYPE_IOTLB_INV_TO:
		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
		       "address=0x%016llx]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       address);
		break;
	case EVENT_TYPE_INV_DEV_REQ:
		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
		       address, flags);
		break;
	default:
		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
	}
}

static void iommu_poll_events(struct amd_iommu *iommu)
{
	u32 head, tail;
	unsigned long flags;

	spin_lock_irqsave(&iommu->lock, flags);

	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);

	while (head != tail) {
		iommu_print_event(iommu->evt_buf + head);
		head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
	}

	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);

	spin_unlock_irqrestore(&iommu->lock, flags);
}

142 143
irqreturn_t amd_iommu_int_handler(int irq, void *data)
{
144 145 146 147 148 149
	struct amd_iommu *iommu;

	list_for_each_entry(iommu, &amd_iommu_list, list)
		iommu_poll_events(iommu);

	return IRQ_HANDLED;
150 151
}

152 153 154 155 156 157 158 159 160 161
/****************************************************************************
 *
 * IOMMU command queuing functions
 *
 ****************************************************************************/

/*
 * Writes the command to the IOMMUs command buffer and informs the
 * hardware about the new command. Must be called with iommu->lock held.
 */
162
static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
163 164 165 166 167
{
	u32 tail, head;
	u8 *target;

	tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
168
	target = iommu->cmd_buf + tail;
169 170 171 172 173 174 175 176 177 178
	memcpy_toio(target, cmd, sizeof(*cmd));
	tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
	head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
	if (tail == head)
		return -ENOMEM;
	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);

	return 0;
}

179 180 181 182
/*
 * General queuing function for commands. Takes iommu->lock and calls
 * __iommu_queue_command().
 */
183
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
184 185 186 187 188 189 190 191 192 193 194
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&iommu->lock, flags);
	ret = __iommu_queue_command(iommu, cmd);
	spin_unlock_irqrestore(&iommu->lock, flags);

	return ret;
}

195 196 197 198 199 200 201
/*
 * This function is called whenever we need to ensure that the IOMMU has
 * completed execution of all commands we sent. It sends a
 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
 * us about that by writing a value to a physical address we pass with
 * the command.
 */
202 203
static int iommu_completion_wait(struct amd_iommu *iommu)
{
204
	int ret = 0, ready = 0;
205
	unsigned status = 0;
206
	struct iommu_cmd cmd;
207
	unsigned long flags, i = 0;
208 209

	memset(&cmd, 0, sizeof(cmd));
210
	cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 212 213 214
	CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);

	iommu->need_sync = 0;

215 216 217
	spin_lock_irqsave(&iommu->lock, flags);

	ret = __iommu_queue_command(iommu, &cmd);
218 219

	if (ret)
220
		goto out;
221

222 223
	while (!ready && (i < EXIT_LOOP_COUNT)) {
		++i;
224 225 226
		/* wait for the bit to become one */
		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
		ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
227 228
	}

229 230 231 232
	/* set bit back to zero */
	status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
	writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);

233 234
	if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
		printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
235 236
out:
	spin_unlock_irqrestore(&iommu->lock, flags);
237 238 239 240

	return 0;
}

241 242 243
/*
 * Command send function for invalidating a device table entry
 */
244 245
static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
{
246
	struct iommu_cmd cmd;
247
	int ret;
248 249 250 251 252 253 254

	BUG_ON(iommu == NULL);

	memset(&cmd, 0, sizeof(cmd));
	CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
	cmd.data[0] = devid;

255 256
	ret = iommu_queue_command(iommu, &cmd);

257 258
	iommu->need_sync = 1;

259
	return ret;
260 261
}

262 263 264
/*
 * Generic command send function for invalidaing TLB entries
 */
265 266 267
static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
		u64 address, u16 domid, int pde, int s)
{
268
	struct iommu_cmd cmd;
269
	int ret;
270 271 272 273 274

	memset(&cmd, 0, sizeof(cmd));
	address &= PAGE_MASK;
	CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
	cmd.data[1] |= domid;
275
	cmd.data[2] = lower_32_bits(address);
276
	cmd.data[3] = upper_32_bits(address);
277
	if (s) /* size bit - we flush more than one 4kb page */
278
		cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
279
	if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
280 281
		cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;

282 283
	ret = iommu_queue_command(iommu, &cmd);

284 285
	iommu->need_sync = 1;

286
	return ret;
287 288
}

289 290 291 292 293
/*
 * TLB invalidation function which is called from the mapping functions.
 * It invalidates a single PTE if the range to flush is within a single
 * page. Otherwise it flushes the whole TLB of the IOMMU.
 */
294 295 296
static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
		u64 address, size_t size)
{
297
	int s = 0;
298
	unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
299 300 301

	address &= PAGE_MASK;

302 303 304 305 306 307 308
	if (pages > 1) {
		/*
		 * If we have to flush more than one page, flush all
		 * TLB entries for this domain
		 */
		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
		s = 1;
309 310
	}

311 312
	iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);

313 314
	return 0;
}
315

316 317 318 319 320 321 322 323
/* Flush the whole IO/TLB for a given protection domain */
static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
{
	u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;

	iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337
/****************************************************************************
 *
 * The functions below are used the create the page table mappings for
 * unity mapped regions.
 *
 ****************************************************************************/

/*
 * Generic mapping functions. It maps a physical address into a DMA
 * address space. It allocates the page table pages if necessary.
 * In the future it can be extended to a generic mapping function
 * supporting all features of AMD IOMMU page tables like level skipping
 * and full 64 bit address spaces.
 */
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static int iommu_map(struct protection_domain *dom,
		     unsigned long bus_addr,
		     unsigned long phys_addr,
		     int prot)
{
	u64 __pte, *pte, *page;

	bus_addr  = PAGE_ALIGN(bus_addr);
	phys_addr = PAGE_ALIGN(bus_addr);

	/* only support 512GB address spaces for now */
	if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
		return -EINVAL;

	pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];

	if (!IOMMU_PTE_PRESENT(*pte)) {
		page = (u64 *)get_zeroed_page(GFP_KERNEL);
		if (!page)
			return -ENOMEM;
		*pte = IOMMU_L2_PDE(virt_to_phys(page));
	}

	pte = IOMMU_PTE_PAGE(*pte);
	pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];

	if (!IOMMU_PTE_PRESENT(*pte)) {
		page = (u64 *)get_zeroed_page(GFP_KERNEL);
		if (!page)
			return -ENOMEM;
		*pte = IOMMU_L1_PDE(virt_to_phys(page));
	}

	pte = IOMMU_PTE_PAGE(*pte);
	pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];

	if (IOMMU_PTE_PRESENT(*pte))
		return -EBUSY;

	__pte = phys_addr | IOMMU_PTE_P;
	if (prot & IOMMU_PROT_IR)
		__pte |= IOMMU_PTE_IR;
	if (prot & IOMMU_PROT_IW)
		__pte |= IOMMU_PTE_IW;

	*pte = __pte;

	return 0;
}

388 389 390 391
/*
 * This function checks if a specific unity mapping entry is needed for
 * this specific IOMMU.
 */
392 393 394 395 396 397 398 399 400 401 402 403 404 405
static int iommu_for_unity_map(struct amd_iommu *iommu,
			       struct unity_map_entry *entry)
{
	u16 bdf, i;

	for (i = entry->devid_start; i <= entry->devid_end; ++i) {
		bdf = amd_iommu_alias_table[i];
		if (amd_iommu_rlookup_table[bdf] == iommu)
			return 1;
	}

	return 0;
}

406 407 408 409 410 411
/*
 * Init the unity mappings for a specific IOMMU in the system
 *
 * Basically iterates over all unity mapping entries and applies them to
 * the default domain DMA of that IOMMU if necessary.
 */
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
static int iommu_init_unity_mappings(struct amd_iommu *iommu)
{
	struct unity_map_entry *entry;
	int ret;

	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
		if (!iommu_for_unity_map(iommu, entry))
			continue;
		ret = dma_ops_unity_map(iommu->default_dom, entry);
		if (ret)
			return ret;
	}

	return 0;
}

428 429 430 431
/*
 * This function actually applies the mapping to the page table of the
 * dma_ops domain.
 */
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
			     struct unity_map_entry *e)
{
	u64 addr;
	int ret;

	for (addr = e->address_start; addr < e->address_end;
	     addr += PAGE_SIZE) {
		ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
		if (ret)
			return ret;
		/*
		 * if unity mapping is in aperture range mark the page
		 * as allocated in the aperture
		 */
		if (addr < dma_dom->aperture_size)
			__set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
	}

	return 0;
}

454 455 456
/*
 * Inits the unity mappings required for a specific device
 */
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
					  u16 devid)
{
	struct unity_map_entry *e;
	int ret;

	list_for_each_entry(e, &amd_iommu_unity_map, list) {
		if (!(devid >= e->devid_start && devid <= e->devid_end))
			continue;
		ret = dma_ops_unity_map(dma_dom, e);
		if (ret)
			return ret;
	}

	return 0;
}

474 475 476 477 478 479 480 481 482
/****************************************************************************
 *
 * The next functions belong to the address allocator for the dma_ops
 * interface functions. They work like the allocators in the other IOMMU
 * drivers. Its basically a bitmap which marks the allocated pages in
 * the aperture. Maybe it could be enhanced in the future to a more
 * efficient allocator.
 *
 ****************************************************************************/
483

484 485 486 487 488
/*
 * The address allocator core function.
 *
 * called with domain->lock held
 */
489 490
static unsigned long dma_ops_alloc_addresses(struct device *dev,
					     struct dma_ops_domain *dom,
491
					     unsigned int pages,
492 493
					     unsigned long align_mask,
					     u64 dma_mask)
494
{
495
	unsigned long limit;
496 497 498 499 500
	unsigned long address;
	unsigned long boundary_size;

	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
			PAGE_SIZE) >> PAGE_SHIFT;
501 502
	limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
				       dma_mask >> PAGE_SHIFT);
503

504
	if (dom->next_bit >= limit) {
505
		dom->next_bit = 0;
506 507
		dom->need_flush = true;
	}
508 509

	address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
510
				   0 , boundary_size, align_mask);
511
	if (address == -1) {
512
		address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
513
				0, boundary_size, align_mask);
514 515
		dom->need_flush = true;
	}
516 517 518 519 520 521 522 523 524 525 526 527

	if (likely(address != -1)) {
		dom->next_bit = address + pages;
		address <<= PAGE_SHIFT;
	} else
		address = bad_dma_address;

	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);

	return address;
}

528 529 530 531 532
/*
 * The address free function.
 *
 * called with domain->lock held
 */
533 534 535 536 537 538
static void dma_ops_free_addresses(struct dma_ops_domain *dom,
				   unsigned long address,
				   unsigned int pages)
{
	address >>= PAGE_SHIFT;
	iommu_area_free(dom->bitmap, address, pages);
539 540 541

	if (address + pages >= dom->next_bit)
		dom->need_flush = true;
542 543
}

544 545 546 547 548 549 550 551 552 553
/****************************************************************************
 *
 * The next functions belong to the domain allocation. A domain is
 * allocated for every IOMMU as the default domain. If device isolation
 * is enabled, every device get its own domain. The most important thing
 * about domains is the page table mapping the DMA address space they
 * contain.
 *
 ****************************************************************************/

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
static u16 domain_id_alloc(void)
{
	unsigned long flags;
	int id;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
	BUG_ON(id == 0);
	if (id > 0 && id < MAX_DOMAIN_ID)
		__set_bit(id, amd_iommu_pd_alloc_bitmap);
	else
		id = 0;
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	return id;
}

571 572 573 574
/*
 * Used to reserve address ranges in the aperture (e.g. for exclusion
 * ranges.
 */
575 576 577 578 579 580 581 582 583
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
				      unsigned long start_page,
				      unsigned int pages)
{
	unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;

	if (start_page + pages > last_page)
		pages = last_page - start_page;

584
	iommu_area_reserve(dom->bitmap, start_page, pages);
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
}

static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
{
	int i, j;
	u64 *p1, *p2, *p3;

	p1 = dma_dom->domain.pt_root;

	if (!p1)
		return;

	for (i = 0; i < 512; ++i) {
		if (!IOMMU_PTE_PRESENT(p1[i]))
			continue;

		p2 = IOMMU_PTE_PAGE(p1[i]);
		for (j = 0; j < 512; ++i) {
			if (!IOMMU_PTE_PRESENT(p2[j]))
				continue;
			p3 = IOMMU_PTE_PAGE(p2[j]);
			free_page((unsigned long)p3);
		}

		free_page((unsigned long)p2);
	}

	free_page((unsigned long)p1);
}

615 616 617 618
/*
 * Free a domain, only used if something went wrong in the
 * allocation path and we need to free an already allocated page table
 */
619 620 621 622 623 624 625 626 627 628 629 630 631 632
static void dma_ops_domain_free(struct dma_ops_domain *dom)
{
	if (!dom)
		return;

	dma_ops_free_pagetable(dom);

	kfree(dom->pte_pages);

	kfree(dom->bitmap);

	kfree(dom);
}

633 634 635 636 637
/*
 * Allocates a new protection domain usable for the dma_ops functions.
 * It also intializes the page table and the address allocator data
 * structures required for the dma_ops interface
 */
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
						   unsigned order)
{
	struct dma_ops_domain *dma_dom;
	unsigned i, num_pte_pages;
	u64 *l2_pde;
	u64 address;

	/*
	 * Currently the DMA aperture must be between 32 MB and 1GB in size
	 */
	if ((order < 25) || (order > 30))
		return NULL;

	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
	if (!dma_dom)
		return NULL;

	spin_lock_init(&dma_dom->domain.lock);

	dma_dom->domain.id = domain_id_alloc();
	if (dma_dom->domain.id == 0)
		goto free_dma_dom;
	dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
	dma_dom->domain.priv = dma_dom;
	if (!dma_dom->domain.pt_root)
		goto free_dma_dom;
	dma_dom->aperture_size = (1ULL << order);
	dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
				  GFP_KERNEL);
	if (!dma_dom->bitmap)
		goto free_dma_dom;
	/*
	 * mark the first page as allocated so we never return 0 as
	 * a valid dma-address. So we can use 0 as error value
	 */
	dma_dom->bitmap[0] = 1;
	dma_dom->next_bit = 0;

678
	dma_dom->need_flush = false;
679
	dma_dom->target_dev = 0xffff;
680

681
	/* Intialize the exclusion range if necessary */
682 683 684
	if (iommu->exclusion_start &&
	    iommu->exclusion_start < dma_dom->aperture_size) {
		unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
685 686 687
		int pages = iommu_num_pages(iommu->exclusion_start,
					    iommu->exclusion_length,
					    PAGE_SIZE);
688 689 690
		dma_ops_reserve_addresses(dma_dom, startpage, pages);
	}

691 692 693 694 695
	/*
	 * At the last step, build the page tables so we don't need to
	 * allocate page table pages in the dma_ops mapping/unmapping
	 * path.
	 */
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
	dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
			GFP_KERNEL);
	if (!dma_dom->pte_pages)
		goto free_dma_dom;

	l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
	if (l2_pde == NULL)
		goto free_dma_dom;

	dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));

	for (i = 0; i < num_pte_pages; ++i) {
		dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
		if (!dma_dom->pte_pages[i])
			goto free_dma_dom;
		address = virt_to_phys(dma_dom->pte_pages[i]);
		l2_pde[i] = IOMMU_L1_PDE(address);
	}

	return dma_dom;

free_dma_dom:
	dma_ops_domain_free(dma_dom);

	return NULL;
}

724 725 726 727
/*
 * Find out the protection domain structure for a given PCI device. This
 * will give us the pointer to the page table root for example.
 */
728 729 730 731 732 733 734 735 736 737 738 739
static struct protection_domain *domain_for_device(u16 devid)
{
	struct protection_domain *dom;
	unsigned long flags;

	read_lock_irqsave(&amd_iommu_devtable_lock, flags);
	dom = amd_iommu_pd_table[devid];
	read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	return dom;
}

740 741 742 743
/*
 * If a device is not yet associated with a domain, this function does
 * assigns it visible for the hardware
 */
744 745 746 747 748 749 750 751
static void set_device_domain(struct amd_iommu *iommu,
			      struct protection_domain *domain,
			      u16 devid)
{
	unsigned long flags;

	u64 pte_root = virt_to_phys(domain->pt_root);

752 753 754
	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
		    << DEV_ENTRY_MODE_SHIFT;
	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
755 756

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
757 758
	amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
	amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
759 760 761 762 763 764 765 766 767 768
	amd_iommu_dev_table[devid].data[2] = domain->id;

	amd_iommu_pd_table[devid] = domain;
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	iommu_queue_inv_dev_entry(iommu, devid);

	iommu->need_sync = 1;
}

769 770 771 772 773 774
/*****************************************************************************
 *
 * The next functions belong to the dma_ops mapping/unmapping code.
 *
 *****************************************************************************/

775 776 777 778 779 780 781 782 783 784 785 786
/*
 * This function checks if the driver got a valid device from the caller to
 * avoid dereferencing invalid pointers.
 */
static bool check_device(struct device *dev)
{
	if (!dev || !dev->dma_mask)
		return false;

	return true;
}

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
/*
 * In this function the list of preallocated protection domains is traversed to
 * find the domain for a specific device
 */
static struct dma_ops_domain *find_protection_domain(u16 devid)
{
	struct dma_ops_domain *entry, *ret = NULL;
	unsigned long flags;

	if (list_empty(&iommu_pd_list))
		return NULL;

	spin_lock_irqsave(&iommu_pd_list_lock, flags);

	list_for_each_entry(entry, &iommu_pd_list, list) {
		if (entry->target_dev == devid) {
			ret = entry;
			list_del(&ret->list);
			break;
		}
	}

	spin_unlock_irqrestore(&iommu_pd_list_lock, flags);

	return ret;
}

814 815 816 817 818 819 820
/*
 * In the dma_ops path we only have the struct device. This function
 * finds the corresponding IOMMU, the protection domain and the
 * requestor id for a given device.
 * If the device is not yet associated with a domain this is also done
 * in this function.
 */
821 822 823 824 825 826 827 828 829
static int get_device_resources(struct device *dev,
				struct amd_iommu **iommu,
				struct protection_domain **domain,
				u16 *bdf)
{
	struct dma_ops_domain *dma_dom;
	struct pci_dev *pcidev;
	u16 _bdf;

830 831 832 833 834 835
	*iommu = NULL;
	*domain = NULL;
	*bdf = 0xffff;

	if (dev->bus != &pci_bus_type)
		return 0;
836 837

	pcidev = to_pci_dev(dev);
838
	_bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
839

840
	/* device not translated by any IOMMU in the system? */
841
	if (_bdf > amd_iommu_last_bdf)
842 843 844 845 846 847 848 849 850
		return 0;

	*bdf = amd_iommu_alias_table[_bdf];

	*iommu = amd_iommu_rlookup_table[*bdf];
	if (*iommu == NULL)
		return 0;
	*domain = domain_for_device(*bdf);
	if (*domain == NULL) {
851 852 853
		dma_dom = find_protection_domain(*bdf);
		if (!dma_dom)
			dma_dom = (*iommu)->default_dom;
854 855 856 857 858 859 860 861 862 863
		*domain = &dma_dom->domain;
		set_device_domain(*iommu, *domain, *bdf);
		printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
				"device ", (*domain)->id);
		print_devid(_bdf, 1);
	}

	return 1;
}

864 865 866 867
/*
 * This is the generic map function. It maps one 4kb page at paddr to
 * the given address in the DMA address space for the domain.
 */
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
				     struct dma_ops_domain *dom,
				     unsigned long address,
				     phys_addr_t paddr,
				     int direction)
{
	u64 *pte, __pte;

	WARN_ON(address > dom->aperture_size);

	paddr &= PAGE_MASK;

	pte  = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
	pte += IOMMU_PTE_L0_INDEX(address);

	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;

	if (direction == DMA_TO_DEVICE)
		__pte |= IOMMU_PTE_IR;
	else if (direction == DMA_FROM_DEVICE)
		__pte |= IOMMU_PTE_IW;
	else if (direction == DMA_BIDIRECTIONAL)
		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;

	WARN_ON(*pte);

	*pte = __pte;

	return (dma_addr_t)address;
}

899 900 901
/*
 * The generic unmapping function for on page in the DMA address space.
 */
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
static void dma_ops_domain_unmap(struct amd_iommu *iommu,
				 struct dma_ops_domain *dom,
				 unsigned long address)
{
	u64 *pte;

	if (address >= dom->aperture_size)
		return;

	WARN_ON(address & 0xfffULL || address > dom->aperture_size);

	pte  = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
	pte += IOMMU_PTE_L0_INDEX(address);

	WARN_ON(!*pte);

	*pte = 0ULL;
}

921 922 923 924 925 926
/*
 * This function contains common code for mapping of a physically
 * contiguous memory region into DMA address space. It is uses by all
 * mapping functions provided by this IOMMU driver.
 * Must be called with the domain lock held.
 */
927 928 929 930 931
static dma_addr_t __map_single(struct device *dev,
			       struct amd_iommu *iommu,
			       struct dma_ops_domain *dma_dom,
			       phys_addr_t paddr,
			       size_t size,
932
			       int dir,
933 934
			       bool align,
			       u64 dma_mask)
935 936 937 938
{
	dma_addr_t offset = paddr & ~PAGE_MASK;
	dma_addr_t address, start;
	unsigned int pages;
939
	unsigned long align_mask = 0;
940 941
	int i;

942
	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
943 944
	paddr &= PAGE_MASK;

945 946 947
	if (align)
		align_mask = (1UL << get_order(size)) - 1;

948 949
	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
					  dma_mask);
950 951 952 953 954 955 956 957 958 959 960
	if (unlikely(address == bad_dma_address))
		goto out;

	start = address;
	for (i = 0; i < pages; ++i) {
		dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
		paddr += PAGE_SIZE;
		start += PAGE_SIZE;
	}
	address += offset;

961
	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
962 963 964
		iommu_flush_tlb(iommu, dma_dom->domain.id);
		dma_dom->need_flush = false;
	} else if (unlikely(iommu_has_npcache(iommu)))
965 966
		iommu_flush_pages(iommu, dma_dom->domain.id, address, size);

967 968 969 970
out:
	return address;
}

971 972 973 974
/*
 * Does the reverse of the __map_single function. Must be called with
 * the domain lock held too
 */
975 976 977 978 979 980 981 982 983 984 985 986
static void __unmap_single(struct amd_iommu *iommu,
			   struct dma_ops_domain *dma_dom,
			   dma_addr_t dma_addr,
			   size_t size,
			   int dir)
{
	dma_addr_t i, start;
	unsigned int pages;

	if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
		return;

987
	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
988 989 990 991 992 993 994 995 996
	dma_addr &= PAGE_MASK;
	start = dma_addr;

	for (i = 0; i < pages; ++i) {
		dma_ops_domain_unmap(iommu, dma_dom, start);
		start += PAGE_SIZE;
	}

	dma_ops_free_addresses(dma_dom, dma_addr, pages);
997

998
	if (amd_iommu_unmap_flush || dma_dom->need_flush) {
999
		iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
1000 1001
		dma_dom->need_flush = false;
	}
1002 1003
}

1004 1005 1006
/*
 * The exported map_single function for dma_ops.
 */
1007 1008 1009 1010 1011 1012 1013 1014
static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
			     size_t size, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;
	dma_addr_t addr;
1015
	u64 dma_mask;
1016

1017 1018 1019
	if (!check_device(dev))
		return bad_dma_address;

1020
	dma_mask = *dev->dma_mask;
1021 1022 1023 1024

	get_device_resources(dev, &iommu, &domain, &devid);

	if (iommu == NULL || domain == NULL)
1025
		/* device not handled by any AMD IOMMU */
1026 1027 1028
		return (dma_addr_t)paddr;

	spin_lock_irqsave(&domain->lock, flags);
1029 1030
	addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
			    dma_mask);
1031 1032 1033
	if (addr == bad_dma_address)
		goto out;

1034
	if (unlikely(iommu->need_sync))
1035 1036 1037 1038 1039 1040 1041 1042
		iommu_completion_wait(iommu);

out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return addr;
}

1043 1044 1045
/*
 * The exported unmap_single function for dma_ops.
 */
1046 1047 1048 1049 1050 1051 1052 1053
static void unmap_single(struct device *dev, dma_addr_t dma_addr,
			 size_t size, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;

1054 1055
	if (!check_device(dev) ||
	    !get_device_resources(dev, &iommu, &domain, &devid))
1056
		/* device not handled by any AMD IOMMU */
1057 1058 1059 1060 1061 1062
		return;

	spin_lock_irqsave(&domain->lock, flags);

	__unmap_single(iommu, domain->priv, dma_addr, size, dir);

1063
	if (unlikely(iommu->need_sync))
1064 1065 1066 1067 1068
		iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
}

1069 1070 1071 1072
/*
 * This is a special map_sg function which is used if we should map a
 * device which is not handled by an AMD IOMMU in the system.
 */
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
			   int nelems, int dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sglist, s, nelems, i) {
		s->dma_address = (dma_addr_t)sg_phys(s);
		s->dma_length  = s->length;
	}

	return nelems;
}

1087 1088 1089 1090
/*
 * The exported map_sg function for dma_ops (handles scatter-gather
 * lists).
 */
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
static int map_sg(struct device *dev, struct scatterlist *sglist,
		  int nelems, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;
	int i;
	struct scatterlist *s;
	phys_addr_t paddr;
	int mapped_elems = 0;
1102
	u64 dma_mask;
1103

1104 1105 1106
	if (!check_device(dev))
		return 0;

1107
	dma_mask = *dev->dma_mask;
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119

	get_device_resources(dev, &iommu, &domain, &devid);

	if (!iommu || !domain)
		return map_sg_no_iommu(dev, sglist, nelems, dir);

	spin_lock_irqsave(&domain->lock, flags);

	for_each_sg(sglist, s, nelems, i) {
		paddr = sg_phys(s);

		s->dma_address = __map_single(dev, iommu, domain->priv,
1120 1121
					      paddr, s->length, dir, false,
					      dma_mask);
1122 1123 1124 1125 1126 1127 1128 1129

		if (s->dma_address) {
			s->dma_length = s->length;
			mapped_elems++;
		} else
			goto unmap;
	}

1130
	if (unlikely(iommu->need_sync))
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
		iommu_completion_wait(iommu);

out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return mapped_elems;
unmap:
	for_each_sg(sglist, s, mapped_elems, i) {
		if (s->dma_address)
			__unmap_single(iommu, domain->priv, s->dma_address,
				       s->dma_length, dir);
		s->dma_address = s->dma_length = 0;
	}

	mapped_elems = 0;

	goto out;
}

1150 1151 1152 1153
/*
 * The exported map_sg function for dma_ops (handles scatter-gather
 * lists).
 */
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
		     int nelems, int dir)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	struct scatterlist *s;
	u16 devid;
	int i;

1164 1165
	if (!check_device(dev) ||
	    !get_device_resources(dev, &iommu, &domain, &devid))
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
		return;

	spin_lock_irqsave(&domain->lock, flags);

	for_each_sg(sglist, s, nelems, i) {
		__unmap_single(iommu, domain->priv, s->dma_address,
			       s->dma_length, dir);
		s->dma_address = s->dma_length = 0;
	}

1176
	if (unlikely(iommu->need_sync))
1177 1178 1179 1180 1181
		iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);
}

1182 1183 1184
/*
 * The exported alloc_coherent function for dma_ops.
 */
1185 1186 1187 1188 1189 1190 1191 1192 1193
static void *alloc_coherent(struct device *dev, size_t size,
			    dma_addr_t *dma_addr, gfp_t flag)
{
	unsigned long flags;
	void *virt_addr;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;
	phys_addr_t paddr;
1194
	u64 dma_mask = dev->coherent_dma_mask;
1195

1196 1197
	if (!check_device(dev))
		return NULL;
1198

1199 1200
	if (!get_device_resources(dev, &iommu, &domain, &devid))
		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1201

1202
	flag |= __GFP_ZERO;
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	virt_addr = (void *)__get_free_pages(flag, get_order(size));
	if (!virt_addr)
		return 0;

	paddr = virt_to_phys(virt_addr);

	if (!iommu || !domain) {
		*dma_addr = (dma_addr_t)paddr;
		return virt_addr;
	}

1214 1215 1216
	if (!dma_mask)
		dma_mask = *dev->dma_mask;

1217 1218 1219
	spin_lock_irqsave(&domain->lock, flags);

	*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1220
				 size, DMA_BIDIRECTIONAL, true, dma_mask);
1221 1222 1223 1224 1225 1226 1227

	if (*dma_addr == bad_dma_address) {
		free_pages((unsigned long)virt_addr, get_order(size));
		virt_addr = NULL;
		goto out;
	}

1228
	if (unlikely(iommu->need_sync))
1229 1230 1231 1232 1233 1234 1235 1236
		iommu_completion_wait(iommu);

out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return virt_addr;
}

1237 1238 1239
/*
 * The exported free_coherent function for dma_ops.
 */
1240 1241 1242 1243 1244 1245 1246 1247
static void free_coherent(struct device *dev, size_t size,
			  void *virt_addr, dma_addr_t dma_addr)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct protection_domain *domain;
	u16 devid;

1248 1249 1250
	if (!check_device(dev))
		return;

1251 1252 1253 1254 1255 1256 1257 1258 1259
	get_device_resources(dev, &iommu, &domain, &devid);

	if (!iommu || !domain)
		goto free_mem;

	spin_lock_irqsave(&domain->lock, flags);

	__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);

1260
	if (unlikely(iommu->need_sync))
1261 1262 1263 1264 1265 1266 1267 1268
		iommu_completion_wait(iommu);

	spin_unlock_irqrestore(&domain->lock, flags);

free_mem:
	free_pages((unsigned long)virt_addr, get_order(size));
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
/*
 * This function is called by the DMA layer to find out if we can handle a
 * particular device. It is part of the dma_ops.
 */
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
	u16 bdf;
	struct pci_dev *pcidev;

	/* No device or no PCI device */
	if (!dev || dev->bus != &pci_bus_type)
		return 0;

	pcidev = to_pci_dev(dev);

	bdf = calc_devid(pcidev->bus->number, pcidev->devfn);

	/* Out of our scope? */
	if (bdf > amd_iommu_last_bdf)
		return 0;

	return 1;
}

1293
/*
1294 1295
 * The function for pre-allocating protection domains.
 *
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
 * If the driver core informs the DMA layer if a driver grabs a device
 * we don't need to preallocate the protection domains anymore.
 * For now we have to.
 */
void prealloc_protection_domains(void)
{
	struct pci_dev *dev = NULL;
	struct dma_ops_domain *dma_dom;
	struct amd_iommu *iommu;
	int order = amd_iommu_aperture_order;
	u16 devid;

	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
		devid = (dev->bus->number << 8) | dev->devfn;
1310
		if (devid > amd_iommu_last_bdf)
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
			continue;
		devid = amd_iommu_alias_table[devid];
		if (domain_for_device(devid))
			continue;
		iommu = amd_iommu_rlookup_table[devid];
		if (!iommu)
			continue;
		dma_dom = dma_ops_domain_alloc(iommu, order);
		if (!dma_dom)
			continue;
		init_unity_mappings_for_device(dma_dom, devid);
1322 1323 1324
		dma_dom->target_dev = devid;

		list_add_tail(&dma_dom->list, &iommu_pd_list);
1325 1326 1327
	}
}

1328 1329 1330 1331 1332 1333 1334
static struct dma_mapping_ops amd_iommu_dma_ops = {
	.alloc_coherent = alloc_coherent,
	.free_coherent = free_coherent,
	.map_single = map_single,
	.unmap_single = unmap_single,
	.map_sg = map_sg,
	.unmap_sg = unmap_sg,
1335
	.dma_supported = amd_iommu_dma_supported,
1336 1337
};

1338 1339 1340
/*
 * The function which clues the AMD IOMMU driver into dma_ops.
 */
1341 1342 1343 1344 1345 1346
int __init amd_iommu_init_dma_ops(void)
{
	struct amd_iommu *iommu;
	int order = amd_iommu_aperture_order;
	int ret;

1347 1348 1349 1350 1351
	/*
	 * first allocate a default protection domain for every IOMMU we
	 * found in the system. Devices not assigned to any other
	 * protection domain will be assigned to the default one.
	 */
1352 1353 1354 1355 1356 1357 1358 1359 1360
	list_for_each_entry(iommu, &amd_iommu_list, list) {
		iommu->default_dom = dma_ops_domain_alloc(iommu, order);
		if (iommu->default_dom == NULL)
			return -ENOMEM;
		ret = iommu_init_unity_mappings(iommu);
		if (ret)
			goto free_domains;
	}

1361 1362 1363 1364
	/*
	 * If device isolation is enabled, pre-allocate the protection
	 * domains for each device.
	 */
1365 1366 1367 1368 1369 1370
	if (amd_iommu_isolate)
		prealloc_protection_domains();

	iommu_detected = 1;
	force_iommu = 1;
	bad_dma_address = 0;
I
Ingo Molnar 已提交
1371
#ifdef CONFIG_GART_IOMMU
1372 1373
	gart_iommu_aperture_disabled = 1;
	gart_iommu_aperture = 0;
I
Ingo Molnar 已提交
1374
#endif
1375

1376
	/* Make the driver finally visible to the drivers */
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
	dma_ops = &amd_iommu_dma_ops;

	return 0;

free_domains:

	list_for_each_entry(iommu, &amd_iommu_list, list) {
		if (iommu->default_dom)
			dma_ops_domain_free(iommu->default_dom);
	}

	return ret;
}