dma-iommu.c 33.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10
/*
 * A fairly generic DMA-API to IOMMU-API glue layer.
 *
 * Copyright (C) 2014-2015 ARM Ltd.
 *
 * based in part on arch/arm/mm/dma-mapping.c:
 * Copyright (C) 2000-2004 Russell King
 */

11
#include <linux/acpi_iort.h>
12
#include <linux/device.h>
13
#include <linux/dma-contiguous.h>
14
#include <linux/dma-iommu.h>
15
#include <linux/dma-noncoherent.h>
16
#include <linux/gfp.h>
17 18 19
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
20
#include <linux/irq.h>
21
#include <linux/mm.h>
22
#include <linux/pci.h>
23 24
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
25

26 27 28 29 30 31
struct iommu_dma_msi_page {
	struct list_head	list;
	dma_addr_t		iova;
	phys_addr_t		phys;
};

R
Robin Murphy 已提交
32 33 34 35 36
enum iommu_dma_cookie_type {
	IOMMU_DMA_IOVA_COOKIE,
	IOMMU_DMA_MSI_COOKIE,
};

37
struct iommu_dma_cookie {
R
Robin Murphy 已提交
38 39 40 41 42 43 44 45 46
	enum iommu_dma_cookie_type	type;
	union {
		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
		struct iova_domain	iovad;
		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
		dma_addr_t		msi_iova;
	};
	struct list_head		msi_page_list;
	spinlock_t			msi_lock;
47 48 49

	/* Domain for flush queue callback; NULL if flush queue not in use */
	struct iommu_domain		*fq_domain;
50 51
};

R
Robin Murphy 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
		return cookie->iovad.granule;
	return PAGE_SIZE;
}

static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
	struct iommu_dma_cookie *cookie;

	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
	if (cookie) {
		spin_lock_init(&cookie->msi_lock);
		INIT_LIST_HEAD(&cookie->msi_page_list);
		cookie->type = type;
	}
	return cookie;
70 71
}

72 73 74 75 76 77 78 79
/**
 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
 * @domain: IOMMU domain to prepare for DMA-API usage
 *
 * IOMMU drivers should normally call this from their domain_alloc
 * callback when domain->type == IOMMU_DOMAIN_DMA.
 */
int iommu_get_dma_cookie(struct iommu_domain *domain)
R
Robin Murphy 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
{
	if (domain->iova_cookie)
		return -EEXIST;

	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
	if (!domain->iova_cookie)
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);

/**
 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 * @domain: IOMMU domain to prepare
 * @base: Start address of IOVA region for MSI mappings
 *
 * Users who manage their own IOVA allocation and do not want DMA API support,
 * but would still like to take advantage of automatic MSI remapping, can use
 * this to initialise their own domain appropriately. Users should reserve a
 * contiguous IOVA region, starting at @base, large enough to accommodate the
 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 * used by the devices attached to @domain.
 */
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105
{
106
	struct iommu_dma_cookie *cookie;
107

R
Robin Murphy 已提交
108 109 110
	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;

111 112 113
	if (domain->iova_cookie)
		return -EEXIST;

R
Robin Murphy 已提交
114
	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115 116
	if (!cookie)
		return -ENOMEM;
117

R
Robin Murphy 已提交
118
	cookie->msi_iova = base;
119 120
	domain->iova_cookie = cookie;
	return 0;
121
}
R
Robin Murphy 已提交
122
EXPORT_SYMBOL(iommu_get_msi_cookie);
123 124 125

/**
 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
R
Robin Murphy 已提交
126 127
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 *          iommu_get_msi_cookie()
128 129 130 131 132
 *
 * IOMMU drivers should normally call this from their domain_free callback.
 */
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
133 134
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi, *tmp;
135

136
	if (!cookie)
137 138
		return;

R
Robin Murphy 已提交
139
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
140 141 142 143 144 145 146
		put_iova_domain(&cookie->iovad);

	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
		list_del(&msi->list);
		kfree(msi);
	}
	kfree(cookie);
147 148 149 150
	domain->iova_cookie = NULL;
}
EXPORT_SYMBOL(iommu_put_dma_cookie);

151 152 153 154 155 156
/**
 * iommu_dma_get_resv_regions - Reserved region driver helper
 * @dev: Device from iommu_get_resv_regions()
 * @list: Reserved region list from iommu_get_resv_regions()
 *
 * IOMMU drivers can use this to implement their .get_resv_regions callback
157 158 159
 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 * reservation.
160 161
 */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
162 163
{

164
	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
165
		iort_iommu_msi_get_resv_regions(dev, list);
166

167
}
168
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
		phys_addr_t start, phys_addr_t end)
{
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_dma_msi_page *msi_page;
	int i, num_pages;

	start -= iova_offset(iovad, start);
	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);

	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
	if (!msi_page)
		return -ENOMEM;

	for (i = 0; i < num_pages; i++) {
		msi_page[i].phys = start;
		msi_page[i].iova = start;
		INIT_LIST_HEAD(&msi_page[i].list);
		list_add(&msi_page[i].list, &cookie->msi_page_list);
		start += iovad->granule;
	}

	return 0;
}

195
static int iova_reserve_pci_windows(struct pci_dev *dev,
196 197 198 199 200
		struct iova_domain *iovad)
{
	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
	struct resource_entry *window;
	unsigned long lo, hi;
201
	phys_addr_t start = 0, end;
202 203 204 205 206 207 208 209 210

	resource_list_for_each_entry(window, &bridge->windows) {
		if (resource_type(window->res) != IORESOURCE_MEM)
			continue;

		lo = iova_pfn(iovad, window->res->start - window->offset);
		hi = iova_pfn(iovad, window->res->end - window->offset);
		reserve_iova(iovad, lo, hi);
	}
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

	/* Get reserved DMA windows from host bridge */
	resource_list_for_each_entry(window, &bridge->dma_ranges) {
		end = window->res->start - window->offset;
resv_iova:
		if (end > start) {
			lo = iova_pfn(iovad, start);
			hi = iova_pfn(iovad, end);
			reserve_iova(iovad, lo, hi);
		} else {
			/* dma_ranges list should be sorted */
			dev_err(&dev->dev, "Failed to reserve IOVA\n");
			return -EINVAL;
		}

		start = window->res->end - window->offset + 1;
		/* If window is last entry */
		if (window->node.next == &bridge->dma_ranges &&
A
Arnd Bergmann 已提交
229 230
		    end != ~(phys_addr_t)0) {
			end = ~(phys_addr_t)0;
231 232 233 234 235
			goto resv_iova;
		}
	}

	return 0;
236 237
}

238 239 240 241 242 243 244 245 246
static int iova_reserve_iommu_regions(struct device *dev,
		struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_resv_region *region;
	LIST_HEAD(resv_regions);
	int ret = 0;

247 248 249 250 251
	if (dev_is_pci(dev)) {
		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
		if (ret)
			return ret;
	}
252

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	iommu_get_resv_regions(dev, &resv_regions);
	list_for_each_entry(region, &resv_regions, list) {
		unsigned long lo, hi;

		/* We ARE the software that manages these! */
		if (region->type == IOMMU_RESV_SW_MSI)
			continue;

		lo = iova_pfn(iovad, region->start);
		hi = iova_pfn(iovad, region->start + region->length - 1);
		reserve_iova(iovad, lo, hi);

		if (region->type == IOMMU_RESV_MSI)
			ret = cookie_init_hw_msi_region(cookie, region->start,
					region->start + region->length);
		if (ret)
			break;
	}
	iommu_put_resv_regions(dev, &resv_regions);

	return ret;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
{
	struct iommu_dma_cookie *cookie;
	struct iommu_domain *domain;

	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
	domain = cookie->fq_domain;
	/*
	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
	 * implies that ops->flush_iotlb_all must be non-NULL.
	 */
	domain->ops->flush_iotlb_all(domain);
}

290 291 292 293 294
/**
 * iommu_dma_init_domain - Initialise a DMA mapping domain
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 * @base: IOVA at which the mappable address space starts
 * @size: Size of IOVA space
295
 * @dev: Device the domain is being initialised for
296 297 298 299 300 301
 *
 * @base and @size should be exact multiples of IOMMU page granularity to
 * avoid rounding surprises. If necessary, we reserve the page at address 0
 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 * any change which could make prior IOVAs invalid will fail.
 */
302
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
303
		u64 size, struct device *dev)
304
{
R
Robin Murphy 已提交
305 306
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
307
	unsigned long order, base_pfn;
308
	int attr;
309

R
Robin Murphy 已提交
310 311
	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
		return -EINVAL;
312 313

	/* Use the smallest supported page size for IOVA granularity */
314
	order = __ffs(domain->pgsize_bitmap);
315 316 317 318 319 320 321 322 323 324 325 326 327 328
	base_pfn = max_t(unsigned long, 1, base >> order);

	/* Check the domain allows at least some access to the device... */
	if (domain->geometry.force_aperture) {
		if (base > domain->geometry.aperture_end ||
		    base + size <= domain->geometry.aperture_start) {
			pr_warn("specified DMA range outside IOMMU capability\n");
			return -EFAULT;
		}
		/* ...then finally give it a kicking to make sure it fits */
		base_pfn = max_t(unsigned long, base_pfn,
				domain->geometry.aperture_start >> order);
	}

329
	/* start_pfn is always nonzero for an already-initialised domain */
330 331
	if (iovad->start_pfn) {
		if (1UL << order != iovad->granule ||
332
		    base_pfn != iovad->start_pfn) {
333 334 335
			pr_warn("Incompatible range for DMA domain\n");
			return -EFAULT;
		}
336 337

		return 0;
338
	}
339

340
	init_iova_domain(iovad, 1UL << order, base_pfn);
341 342 343 344 345 346 347

	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
		cookie->fq_domain = domain;
		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
	}

348 349 350 351
	if (!dev)
		return 0;

	return iova_reserve_iommu_regions(dev, domain);
352 353 354
}

/**
355 356
 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 *                    page flags.
357 358
 * @dir: Direction of DMA transfer
 * @coherent: Is the DMA master cache-coherent?
359
 * @attrs: DMA attributes for the mapping
360 361 362
 *
 * Return: corresponding IOMMU API page protection flags
 */
363
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
364
		     unsigned long attrs)
365 366 367
{
	int prot = coherent ? IOMMU_CACHE : 0;

368 369 370
	if (attrs & DMA_ATTR_PRIVILEGED)
		prot |= IOMMU_PRIV;

371 372 373 374 375 376 377 378 379 380 381 382
	switch (dir) {
	case DMA_BIDIRECTIONAL:
		return prot | IOMMU_READ | IOMMU_WRITE;
	case DMA_TO_DEVICE:
		return prot | IOMMU_READ;
	case DMA_FROM_DEVICE:
		return prot | IOMMU_WRITE;
	default:
		return 0;
	}
}

383 384
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
		size_t size, dma_addr_t dma_limit, struct device *dev)
385
{
386 387
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
388
	unsigned long shift, iova_len, iova = 0;
389

390 391 392 393 394 395 396
	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
		cookie->msi_iova += size;
		return cookie->msi_iova - size;
	}

	shift = iova_shift(iovad);
	iova_len = size >> shift;
397 398 399 400 401 402 403 404
	/*
	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
	 * will come back to bite us badly, so we have to waste a bit of space
	 * rounding up anything cacheable to make sure that can't happen. The
	 * order of the unadjusted size will still match upon freeing.
	 */
	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
		iova_len = roundup_pow_of_two(iova_len);
405

406 407 408
	if (dev->bus_dma_mask)
		dma_limit &= dev->bus_dma_mask;

409 410
	if (domain->geometry.force_aperture)
		dma_limit = min(dma_limit, domain->geometry.aperture_end);
411 412 413

	/* Try to get PCI devices a SAC address */
	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
414 415
		iova = alloc_iova_fast(iovad, iova_len,
				       DMA_BIT_MASK(32) >> shift, false);
416

417
	if (!iova)
418 419
		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
				       true);
420

421
	return (dma_addr_t)iova << shift;
422 423
}

424 425
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
		dma_addr_t iova, size_t size)
426
{
427
	struct iova_domain *iovad = &cookie->iovad;
428

429
	/* The MSI case is only ever cleaning up its most recent allocation */
430
	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
431
		cookie->msi_iova -= size;
432 433 434
	else if (cookie->fq_domain)	/* non-strict mode */
		queue_iova(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad), 0);
435
	else
436 437
		free_iova_fast(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad));
438 439
}

440
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
441 442
		size_t size)
{
443
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
444 445
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
446 447 448 449 450
	size_t iova_off = iova_offset(iovad, dma_addr);

	dma_addr -= iova_off;
	size = iova_align(iovad, size + iova_off);

451 452 453
	WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
	if (!cookie->fq_domain)
		iommu_tlb_sync(domain);
454
	iommu_dma_free_iova(cookie, dma_addr, size);
455 456
}

457
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
458
		size_t size, int prot)
459
{
460
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
461
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 463
	struct iova_domain *iovad = &cookie->iovad;
	size_t iova_off = iova_offset(iovad, phys);
464 465
	dma_addr_t iova;

466
	size = iova_align(iovad, size + iova_off);
467 468 469 470 471 472 473 474 475 476 477 478

	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
	if (!iova)
		return DMA_MAPPING_ERROR;

	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
		iommu_dma_free_iova(cookie, iova, size);
		return DMA_MAPPING_ERROR;
	}
	return iova + iova_off;
}

479 480 481 482 483 484 485
static void __iommu_dma_free_pages(struct page **pages, int count)
{
	while (count--)
		__free_page(pages[count]);
	kvfree(pages);
}

486 487
static struct page **__iommu_dma_alloc_pages(struct device *dev,
		unsigned int count, unsigned long order_mask, gfp_t gfp)
488 489
{
	struct page **pages;
490
	unsigned int i = 0, nid = dev_to_node(dev);
491 492 493 494

	order_mask &= (2U << MAX_ORDER) - 1;
	if (!order_mask)
		return NULL;
495

496
	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
497 498 499 500 501 502 503 504
	if (!pages)
		return NULL;

	/* IOMMU can map any pages, so himem can also be used here */
	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;

	while (count) {
		struct page *page = NULL;
505
		unsigned int order_size;
506 507 508 509

		/*
		 * Higher-order allocations are a convenience rather
		 * than a necessity, hence using __GFP_NORETRY until
510
		 * falling back to minimum-order allocations.
511
		 */
512 513 514
		for (order_mask &= (2U << __fls(count)) - 1;
		     order_mask; order_mask &= ~order_size) {
			unsigned int order = __fls(order_mask);
515
			gfp_t alloc_flags = gfp;
516 517

			order_size = 1U << order;
518 519 520
			if (order_mask > order_size)
				alloc_flags |= __GFP_NORETRY;
			page = alloc_pages_node(nid, alloc_flags, order);
521 522
			if (!page)
				continue;
523 524 525
			if (!order)
				break;
			if (!PageCompound(page)) {
526 527
				split_page(page, order);
				break;
528 529
			} else if (!split_huge_page(page)) {
				break;
530
			}
531
			__free_pages(page, order);
532 533 534 535 536
		}
		if (!page) {
			__iommu_dma_free_pages(pages, i);
			return NULL;
		}
537 538
		count -= order_size;
		while (order_size--)
539 540 541 542 543
			pages[i++] = page++;
	}
	return pages;
}

544
static struct page **__iommu_dma_get_pages(void *cpu_addr)
545
{
546 547 548 549 550
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || !area->pages)
		return NULL;
	return area->pages;
551 552 553
}

/**
554
 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
555 556 557
 * @dev: Device to allocate memory for. Must be a real device
 *	 attached to an iommu_dma_domain
 * @size: Size of buffer in bytes
558
 * @dma_handle: Out argument for allocated DMA handle
559
 * @gfp: Allocation flags
560
 * @attrs: DMA attributes for this allocation
561 562 563 564
 *
 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 * but an IOMMU which supports smaller pages might not map the whole thing.
 *
565
 * Return: Mapped virtual address, or NULL on failure.
566
 */
567 568
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
569
{
570
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
571 572
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
573 574
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
575
	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
576
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
577 578
	struct page **pages;
	struct sg_table sgt;
579
	dma_addr_t iova;
580
	void *vaddr;
581

582
	*dma_handle = DMA_MAPPING_ERROR;
583

584 585 586 587 588 589 590
	min_size = alloc_sizes & -alloc_sizes;
	if (min_size < PAGE_SIZE) {
		min_size = PAGE_SIZE;
		alloc_sizes |= PAGE_SIZE;
	} else {
		size = ALIGN(size, min_size);
	}
591
	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
592 593 594
		alloc_sizes = min_size;

	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
595 596
	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
					gfp);
597 598 599
	if (!pages)
		return NULL;

600 601
	size = iova_align(iovad, size);
	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
602 603 604 605 606 607
	if (!iova)
		goto out_free_pages;

	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
		goto out_free_iova;

608
	if (!(ioprot & IOMMU_CACHE)) {
609 610 611 612 613
		struct scatterlist *sg;
		int i;

		for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
			arch_dma_prep_coherent(sg_page(sg), sg->length);
614 615
	}

616
	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
617 618 619
			< size)
		goto out_free_sg;

620 621 622 623 624 625
	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
			__builtin_return_address(0));
	if (!vaddr)
		goto out_unmap;

	*dma_handle = iova;
626
	sg_free_table(&sgt);
627
	return vaddr;
628

629 630
out_unmap:
	__iommu_dma_unmap(dev, iova, size);
631 632 633
out_free_sg:
	sg_free_table(&sgt);
out_free_iova:
634
	iommu_dma_free_iova(cookie, iova, size);
635 636 637 638 639 640
out_free_pages:
	__iommu_dma_free_pages(pages, count);
	return NULL;
}

/**
641 642
 * __iommu_dma_mmap - Map a buffer into provided user VMA
 * @pages: Array representing buffer from __iommu_dma_alloc()
643 644 645 646 647 648
 * @size: Size of buffer in bytes
 * @vma: VMA describing requested userspace mapping
 *
 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 * for verifying the correct size and protection of @vma beforehand.
 */
649 650
static int __iommu_dma_mmap(struct page **pages, size_t size,
		struct vm_area_struct *vma)
651
{
652
	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
653 654
}

655 656
static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
657
{
658
	phys_addr_t phys;
659

660 661
	if (dev_is_dma_coherent(dev))
		return;
662

663 664
	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_cpu(dev, phys, size, dir);
665 666
}

667 668
static void iommu_dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
669
{
670
	phys_addr_t phys;
671

672 673
	if (dev_is_dma_coherent(dev))
		return;
674

675 676 677
	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_device(dev, phys, size, dir);
}
678

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
705 706
}

707 708 709
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
710
{
711 712
	phys_addr_t phys = page_to_phys(page) + offset;
	bool coherent = dev_is_dma_coherent(dev);
713
	int prot = dma_info_to_prot(dir, coherent, attrs);
714 715
	dma_addr_t dma_handle;

716
	dma_handle =__iommu_dma_map(dev, phys, size, prot);
717 718 719 720
	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    dma_handle != DMA_MAPPING_ERROR)
		arch_sync_dma_for_device(dev, phys, size, dir);
	return dma_handle;
721 722
}

723 724
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
725
{
726 727
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
728
	__iommu_dma_unmap(dev, dma_handle, size);
729 730 731 732
}

/*
 * Prepare a successfully-mapped scatterlist to give back to the caller.
733 734 735 736
 *
 * At this point the segments are already laid out by iommu_dma_map_sg() to
 * avoid individually crossing any boundaries, so we merely need to check a
 * segment's start address to avoid concatenating across one.
737 738 739 740
 */
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
		dma_addr_t dma_addr)
{
741 742 743 744
	struct scatterlist *s, *cur = sg;
	unsigned long seg_mask = dma_get_seg_boundary(dev);
	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
	int i, count = 0;
745 746

	for_each_sg(sg, s, nents, i) {
747 748
		/* Restore this segment's original unaligned fields first */
		unsigned int s_iova_off = sg_dma_address(s);
749
		unsigned int s_length = sg_dma_len(s);
750
		unsigned int s_iova_len = s->length;
751

752
		s->offset += s_iova_off;
753
		s->length = s_length;
754
		sg_dma_address(s) = DMA_MAPPING_ERROR;
755 756 757 758 759 760 761 762 763 764
		sg_dma_len(s) = 0;

		/*
		 * Now fill in the real DMA data. If...
		 * - there is a valid output segment to append to
		 * - and this segment starts on an IOVA page boundary
		 * - but doesn't fall at a segment boundary
		 * - and wouldn't make the resulting output segment too long
		 */
		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
765
		    (max_len - cur_len >= s_length)) {
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
			/* ...then concatenate it with the previous one */
			cur_len += s_length;
		} else {
			/* Otherwise start the next output segment */
			if (i > 0)
				cur = sg_next(cur);
			cur_len = s_length;
			count++;

			sg_dma_address(cur) = dma_addr + s_iova_off;
		}

		sg_dma_len(cur) = cur_len;
		dma_addr += s_iova_len;

		if (s_length + s_iova_off < s_iova_len)
			cur_len = 0;
783
	}
784
	return count;
785 786 787 788 789 790 791 792 793 794 795 796
}

/*
 * If mapping failed, then just restore the original list,
 * but making sure the DMA fields are invalidated.
 */
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
797
		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
798
			s->offset += sg_dma_address(s);
799 800
		if (sg_dma_len(s))
			s->length = sg_dma_len(s);
801
		sg_dma_address(s) = DMA_MAPPING_ERROR;
802 803 804 805 806 807 808 809 810 811 812
		sg_dma_len(s) = 0;
	}
}

/*
 * The DMA API client is passing in a scatterlist which could describe
 * any old buffer layout, but the IOMMU API requires everything to be
 * aligned to IOMMU pages. Hence the need for this complicated bit of
 * impedance-matching, to be able to hand off a suitably-aligned list,
 * but still preserve the original offsets and sizes for the caller.
 */
813 814
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
815
{
816
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
817 818
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
819
	struct scatterlist *s, *prev = NULL;
820
	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
821
	dma_addr_t iova;
822
	size_t iova_len = 0;
823
	unsigned long mask = dma_get_seg_boundary(dev);
824 825
	int i;

826 827 828
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);

829 830 831 832
	/*
	 * Work out how much IOVA space we need, and align the segments to
	 * IOVA granules for the IOMMU driver to handle. With some clever
	 * trickery we can modify the list in-place, but reversibly, by
833
	 * stashing the unaligned parts in the as-yet-unused DMA fields.
834 835
	 */
	for_each_sg(sg, s, nents, i) {
836
		size_t s_iova_off = iova_offset(iovad, s->offset);
837
		size_t s_length = s->length;
838
		size_t pad_len = (mask - iova_len + 1) & mask;
839

840
		sg_dma_address(s) = s_iova_off;
841
		sg_dma_len(s) = s_length;
842 843
		s->offset -= s_iova_off;
		s_length = iova_align(iovad, s_length + s_iova_off);
844 845 846
		s->length = s_length;

		/*
847 848 849 850 851 852 853 854 855 856 857
		 * Due to the alignment of our single IOVA allocation, we can
		 * depend on these assumptions about the segment boundary mask:
		 * - If mask size >= IOVA size, then the IOVA range cannot
		 *   possibly fall across a boundary, so we don't care.
		 * - If mask size < IOVA size, then the IOVA range must start
		 *   exactly on a boundary, therefore we can lay things out
		 *   based purely on segment lengths without needing to know
		 *   the actual addresses beforehand.
		 * - The mask must be a power of 2, so pad_len == 0 if
		 *   iova_len == 0, thus we cannot dereference prev the first
		 *   time through here (i.e. before it has a meaningful value).
858
		 */
859
		if (pad_len && pad_len < s_length - 1) {
860 861 862 863 864 865 866 867
			prev->length += pad_len;
			iova_len += pad_len;
		}

		iova_len += s_length;
		prev = s;
	}

868
	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
869 870 871 872 873 874 875
	if (!iova)
		goto out_restore_sg;

	/*
	 * We'll leave any physical concatenation to the IOMMU driver's
	 * implementation - it knows better than we do.
	 */
876
	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
877 878
		goto out_free_iova;

879
	return __finalise_sg(dev, sg, nents, iova);
880 881

out_free_iova:
882
	iommu_dma_free_iova(cookie, iova, iova_len);
883 884 885 886 887
out_restore_sg:
	__invalidate_sg(sg, nents);
	return 0;
}

888 889
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
890
{
891 892 893
	dma_addr_t start, end;
	struct scatterlist *tmp;
	int i;
894

895
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
896 897
		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);

898 899 900 901
	/*
	 * The scatterlist segments are mapped into a single
	 * contiguous IOVA allocation, so this is incredibly easy.
	 */
902 903 904 905 906 907 908
	start = sg_dma_address(sg);
	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
		if (sg_dma_len(tmp) == 0)
			break;
		sg = tmp;
	}
	end = sg_dma_address(sg) + sg_dma_len(sg);
909
	__iommu_dma_unmap(dev, start, end - start);
910 911
}

912
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
913 914 915
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	return __iommu_dma_map(dev, phys, size,
916
			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
917 918
}

919
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
920 921
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
922
	__iommu_dma_unmap(dev, handle, size);
923 924
}

R
Robin Murphy 已提交
925
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
926 927 928 929 930 931
{
	size_t alloc_size = PAGE_ALIGN(size);
	int count = alloc_size >> PAGE_SHIFT;
	struct page *page = NULL, **pages = NULL;

	/* Non-coherent atomic allocation? Easy */
932 933
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    dma_free_from_pool(cpu_addr, alloc_size))
934 935
		return;

936
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
		/*
		 * If it the address is remapped, then it's either non-coherent
		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
		 */
		pages = __iommu_dma_get_pages(cpu_addr);
		if (!pages)
			page = vmalloc_to_page(cpu_addr);
		dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
	} else {
		/* Lowmem means a coherent atomic or CMA allocation */
		page = virt_to_page(cpu_addr);
	}

	if (pages)
		__iommu_dma_free_pages(pages, count);
952 953
	if (page)
		dma_free_contiguous(dev, page, alloc_size);
954 955
}

R
Robin Murphy 已提交
956 957 958 959 960 961 962
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t handle, unsigned long attrs)
{
	__iommu_dma_unmap(dev, handle, size);
	__iommu_dma_free(dev, size, cpu_addr);
}

963 964
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
		struct page **pagep, gfp_t gfp, unsigned long attrs)
965 966
{
	bool coherent = dev_is_dma_coherent(dev);
967
	size_t alloc_size = PAGE_ALIGN(size);
968
	int node = dev_to_node(dev);
969
	struct page *page = NULL;
970
	void *cpu_addr;
971

972
	page = dma_alloc_contiguous(dev, alloc_size, gfp);
973 974
	if (!page)
		page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 976 977
	if (!page)
		return NULL;

978
	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
979
		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
980

981 982 983
		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
				VM_USERMAP, prot, __builtin_return_address(0));
		if (!cpu_addr)
984
			goto out_free_pages;
985 986

		if (!coherent)
987
			arch_dma_prep_coherent(page, size);
988
	} else {
989
		cpu_addr = page_address(page);
990
	}
991 992

	*pagep = page;
993 994
	memset(cpu_addr, 0, alloc_size);
	return cpu_addr;
995
out_free_pages:
996
	dma_free_contiguous(dev, page, alloc_size);
997
	return NULL;
998 999
}

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	struct page *page = NULL;
	void *cpu_addr;

	gfp |= __GFP_ZERO;

1010
	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1011 1012 1013
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);

1014 1015
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !gfpflags_allow_blocking(gfp) && !coherent)
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
	else
		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
	if (!cpu_addr)
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
	if (*handle == DMA_MAPPING_ERROR) {
		__iommu_dma_free(dev, size, cpu_addr);
		return NULL;
	}

	return cpu_addr;
}

1031 1032 1033 1034 1035
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1036
	unsigned long pfn, off = vma->vm_pgoff;
1037 1038
	int ret;

1039
	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1040 1041 1042 1043 1044 1045 1046

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
		return -ENXIO;

1047
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1048
		struct page **pages = __iommu_dma_get_pages(cpu_addr);
1049

1050 1051 1052 1053 1054
		if (pages)
			return __iommu_dma_mmap(pages, size, vma);
		pfn = vmalloc_to_pfn(cpu_addr);
	} else {
		pfn = page_to_pfn(virt_to_page(cpu_addr));
1055 1056
	}

1057 1058 1059
	return remap_pfn_range(vma, vma->vm_start, pfn + off,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot);
1060 1061 1062 1063 1064 1065
}

static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
1066 1067
	struct page *page;
	int ret;
1068

1069
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1070
		struct page **pages = __iommu_dma_get_pages(cpu_addr);
1071

1072 1073 1074 1075 1076 1077 1078 1079 1080
		if (pages) {
			return sg_alloc_table_from_pages(sgt, pages,
					PAGE_ALIGN(size) >> PAGE_SHIFT,
					0, size, GFP_KERNEL);
		}

		page = vmalloc_to_page(cpu_addr);
	} else {
		page = virt_to_page(cpu_addr);
1081 1082
	}

1083 1084 1085 1086
	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return ret;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
}

static const struct dma_map_ops iommu_dma_ops = {
	.alloc			= iommu_dma_alloc,
	.free			= iommu_dma_free,
	.mmap			= iommu_dma_mmap,
	.get_sgtable		= iommu_dma_get_sgtable,
	.map_page		= iommu_dma_map_page,
	.unmap_page		= iommu_dma_unmap_page,
	.map_sg			= iommu_dma_map_sg,
	.unmap_sg		= iommu_dma_unmap_sg,
	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
	.sync_single_for_device	= iommu_dma_sync_single_for_device,
	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
	.map_resource		= iommu_dma_map_resource,
	.unmap_resource		= iommu_dma_unmap_resource,
};

/*
 * The IOMMU core code allocates the default DMA domain, which the underlying
 * IOMMU driver needs to support via the dma-iommu layer.
 */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	if (!domain)
		goto out_err;

	/*
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
	 */
	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;
		dev->dma_ops = &iommu_dma_ops;
	}

	return;
out_err:
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
		 dev_name(dev));
1131 1132
}

1133 1134 1135 1136 1137
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
		phys_addr_t msi_addr, struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi_page;
1138
	dma_addr_t iova;
1139
	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
R
Robin Murphy 已提交
1140
	size_t size = cookie_msi_granule(cookie);
1141

R
Robin Murphy 已提交
1142
	msi_addr &= ~(phys_addr_t)(size - 1);
1143 1144 1145 1146 1147 1148 1149 1150
	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
		if (msi_page->phys == msi_addr)
			return msi_page;

	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
	if (!msi_page)
		return NULL;

1151 1152
	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
	if (!iova)
1153
		goto out_free_page;
1154

1155 1156 1157
	if (iommu_map(domain, iova, msi_addr, size, prot))
		goto out_free_iova;

1158
	INIT_LIST_HEAD(&msi_page->list);
1159 1160
	msi_page->phys = msi_addr;
	msi_page->iova = iova;
1161 1162 1163
	list_add(&msi_page->list, &cookie->msi_page_list);
	return msi_page;

1164 1165
out_free_iova:
	iommu_dma_free_iova(cookie, iova, size);
1166 1167 1168 1169 1170
out_free_page:
	kfree(msi_page);
	return NULL;
}

1171
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1172
{
1173
	struct device *dev = msi_desc_to_dev(desc);
1174 1175 1176 1177 1178
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	struct iommu_dma_cookie *cookie;
	struct iommu_dma_msi_page *msi_page;
	unsigned long flags;

1179 1180 1181 1182
	if (!domain || !domain->iova_cookie) {
		desc->iommu_cookie = NULL;
		return 0;
	}
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	cookie = domain->iova_cookie;

	/*
	 * We disable IRQs to rule out a possible inversion against
	 * irq_desc_lock if, say, someone tries to retarget the affinity
	 * of an MSI from within an IPI handler.
	 */
	spin_lock_irqsave(&cookie->msi_lock, flags);
	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
	spin_unlock_irqrestore(&cookie->msi_lock, flags);

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	msi_desc_set_iommu_cookie(desc, msi_page);

	if (!msi_page)
		return -ENOMEM;
	return 0;
}

void iommu_dma_compose_msi_msg(struct msi_desc *desc,
			       struct msi_msg *msg)
{
	struct device *dev = msi_desc_to_dev(desc);
	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	const struct iommu_dma_msi_page *msi_page;

	msi_page = msi_desc_get_iommu_cookie(desc);

	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
		return;

	msg->address_hi = upper_32_bits(msi_page->iova);
	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
	msg->address_lo += lower_32_bits(msi_page->iova);
1217
}
1218 1219 1220 1221

static int iommu_dma_init(void)
{
	return iova_cache_get();
1222
}
1223
arch_initcall(iommu_dma_init);