dma-iommu.c 34.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/*
 * A fairly generic DMA-API to IOMMU-API glue layer.
 *
 * Copyright (C) 2014-2015 ARM Ltd.
 *
 * based in part on arch/arm/mm/dma-mapping.c:
 * Copyright (C) 2000-2004 Russell King
 */

11
#include <linux/acpi_iort.h>
12
#include <linux/device.h>
13
#include <linux/dma-contiguous.h>
14
#include <linux/dma-iommu.h>
15
#include <linux/dma-noncoherent.h>
16
#include <linux/gfp.h>
17 18 19
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
20
#include <linux/irq.h>
21
#include <linux/mm.h>
22
#include <linux/pci.h>
23 24
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
25

26 27 28 29 30 31
struct iommu_dma_msi_page {
	struct list_head	list;
	dma_addr_t		iova;
	phys_addr_t		phys;
};

R
Robin Murphy 已提交
32 33 34 35 36
enum iommu_dma_cookie_type {
	IOMMU_DMA_IOVA_COOKIE,
	IOMMU_DMA_MSI_COOKIE,
};

37
struct iommu_dma_cookie {
R
Robin Murphy 已提交
38 39 40 41 42 43 44 45 46
	enum iommu_dma_cookie_type	type;
	union {
		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
		struct iova_domain	iovad;
		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
		dma_addr_t		msi_iova;
	};
	struct list_head		msi_page_list;
	spinlock_t			msi_lock;
47 48 49

	/* Domain for flush queue callback; NULL if flush queue not in use */
	struct iommu_domain		*fq_domain;
50 51
};

R
Robin Murphy 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
		return cookie->iovad.granule;
	return PAGE_SIZE;
}

static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
	struct iommu_dma_cookie *cookie;

	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
	if (cookie) {
		spin_lock_init(&cookie->msi_lock);
		INIT_LIST_HEAD(&cookie->msi_page_list);
		cookie->type = type;
	}
	return cookie;
70 71
}

72 73 74 75 76 77 78 79
/**
 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
 * @domain: IOMMU domain to prepare for DMA-API usage
 *
 * IOMMU drivers should normally call this from their domain_alloc
 * callback when domain->type == IOMMU_DOMAIN_DMA.
 */
int iommu_get_dma_cookie(struct iommu_domain *domain)
R
Robin Murphy 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
{
	if (domain->iova_cookie)
		return -EEXIST;

	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
	if (!domain->iova_cookie)
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);

/**
 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 * @domain: IOMMU domain to prepare
 * @base: Start address of IOVA region for MSI mappings
 *
 * Users who manage their own IOVA allocation and do not want DMA API support,
 * but would still like to take advantage of automatic MSI remapping, can use
 * this to initialise their own domain appropriately. Users should reserve a
 * contiguous IOVA region, starting at @base, large enough to accommodate the
 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 * used by the devices attached to @domain.
 */
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105
{
106
	struct iommu_dma_cookie *cookie;
107

R
Robin Murphy 已提交
108 109 110
	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;

111 112 113
	if (domain->iova_cookie)
		return -EEXIST;

R
Robin Murphy 已提交
114
	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115 116
	if (!cookie)
		return -ENOMEM;
117

R
Robin Murphy 已提交
118
	cookie->msi_iova = base;
119 120
	domain->iova_cookie = cookie;
	return 0;
121
}
R
Robin Murphy 已提交
122
EXPORT_SYMBOL(iommu_get_msi_cookie);
123 124 125

/**
 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
R
Robin Murphy 已提交
126 127
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 *          iommu_get_msi_cookie()
128 129 130 131 132
 *
 * IOMMU drivers should normally call this from their domain_free callback.
 */
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
133 134
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi, *tmp;
135

136
	if (!cookie)
137 138
		return;

R
Robin Murphy 已提交
139
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
140 141 142 143 144 145 146
		put_iova_domain(&cookie->iovad);

	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
		list_del(&msi->list);
		kfree(msi);
	}
	kfree(cookie);
147 148 149 150
	domain->iova_cookie = NULL;
}
EXPORT_SYMBOL(iommu_put_dma_cookie);

151 152 153 154 155 156
/**
 * iommu_dma_get_resv_regions - Reserved region driver helper
 * @dev: Device from iommu_get_resv_regions()
 * @list: Reserved region list from iommu_get_resv_regions()
 *
 * IOMMU drivers can use this to implement their .get_resv_regions callback
157 158 159
 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 * reservation.
160 161
 */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
162 163
{

164
	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
165
		iort_iommu_msi_get_resv_regions(dev, list);
166

167
}
168
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
		phys_addr_t start, phys_addr_t end)
{
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_dma_msi_page *msi_page;
	int i, num_pages;

	start -= iova_offset(iovad, start);
	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);

	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
	if (!msi_page)
		return -ENOMEM;

	for (i = 0; i < num_pages; i++) {
		msi_page[i].phys = start;
		msi_page[i].iova = start;
		INIT_LIST_HEAD(&msi_page[i].list);
		list_add(&msi_page[i].list, &cookie->msi_page_list);
		start += iovad->granule;
	}

	return 0;
}

195
static int iova_reserve_pci_windows(struct pci_dev *dev,
196 197 198 199 200
		struct iova_domain *iovad)
{
	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
	struct resource_entry *window;
	unsigned long lo, hi;
201
	phys_addr_t start = 0, end;
202 203 204 205 206 207 208 209 210

	resource_list_for_each_entry(window, &bridge->windows) {
		if (resource_type(window->res) != IORESOURCE_MEM)
			continue;

		lo = iova_pfn(iovad, window->res->start - window->offset);
		hi = iova_pfn(iovad, window->res->end - window->offset);
		reserve_iova(iovad, lo, hi);
	}
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	/* Get reserved DMA windows from host bridge */
	resource_list_for_each_entry(window, &bridge->dma_ranges) {
		end = window->res->start - window->offset;
resv_iova:
		if (end > start) {
			lo = iova_pfn(iovad, start);
			hi = iova_pfn(iovad, end);
			reserve_iova(iovad, lo, hi);
		} else {
			/* dma_ranges list should be sorted */
			dev_err(&dev->dev, "Failed to reserve IOVA\n");
			return -EINVAL;
		}

		start = window->res->end - window->offset + 1;
		/* If window is last entry */
		if (window->node.next == &bridge->dma_ranges &&
		    end != ~(dma_addr_t)0) {
			end = ~(dma_addr_t)0;
			goto resv_iova;
		}
	}

	return 0;
236 237
}

238 239 240 241 242 243 244 245 246
static int iova_reserve_iommu_regions(struct device *dev,
		struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_resv_region *region;
	LIST_HEAD(resv_regions);
	int ret = 0;

247 248 249 250 251
	if (dev_is_pci(dev)) {
		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
		if (ret)
			return ret;
	}
252

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	iommu_get_resv_regions(dev, &resv_regions);
	list_for_each_entry(region, &resv_regions, list) {
		unsigned long lo, hi;

		/* We ARE the software that manages these! */
		if (region->type == IOMMU_RESV_SW_MSI)
			continue;

		lo = iova_pfn(iovad, region->start);
		hi = iova_pfn(iovad, region->start + region->length - 1);
		reserve_iova(iovad, lo, hi);

		if (region->type == IOMMU_RESV_MSI)
			ret = cookie_init_hw_msi_region(cookie, region->start,
					region->start + region->length);
		if (ret)
			break;
	}
	iommu_put_resv_regions(dev, &resv_regions);

	return ret;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
{
	struct iommu_dma_cookie *cookie;
	struct iommu_domain *domain;

	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
	domain = cookie->fq_domain;
	/*
	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
	 * implies that ops->flush_iotlb_all must be non-NULL.
	 */
	domain->ops->flush_iotlb_all(domain);
}

290 291 292 293 294
/**
 * iommu_dma_init_domain - Initialise a DMA mapping domain
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 * @base: IOVA at which the mappable address space starts
 * @size: Size of IOVA space
295
 * @dev: Device the domain is being initialised for
296 297 298 299 300 301
 *
 * @base and @size should be exact multiples of IOMMU page granularity to
 * avoid rounding surprises. If necessary, we reserve the page at address 0
 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 * any change which could make prior IOVAs invalid will fail.
 */
302
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
303
		u64 size, struct device *dev)
304
{
R
Robin Murphy 已提交
305 306
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
307
	unsigned long order, base_pfn;
308
	int attr;
309

R
Robin Murphy 已提交
310 311
	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
		return -EINVAL;
312 313

	/* Use the smallest supported page size for IOVA granularity */
314
	order = __ffs(domain->pgsize_bitmap);
315 316 317 318 319 320 321 322 323 324 325 326 327 328
	base_pfn = max_t(unsigned long, 1, base >> order);

	/* Check the domain allows at least some access to the device... */
	if (domain->geometry.force_aperture) {
		if (base > domain->geometry.aperture_end ||
		    base + size <= domain->geometry.aperture_start) {
			pr_warn("specified DMA range outside IOMMU capability\n");
			return -EFAULT;
		}
		/* ...then finally give it a kicking to make sure it fits */
		base_pfn = max_t(unsigned long, base_pfn,
				domain->geometry.aperture_start >> order);
	}

329
	/* start_pfn is always nonzero for an already-initialised domain */
330 331
	if (iovad->start_pfn) {
		if (1UL << order != iovad->granule ||
332
		    base_pfn != iovad->start_pfn) {
333 334 335
			pr_warn("Incompatible range for DMA domain\n");
			return -EFAULT;
		}
336 337

		return 0;
338
	}
339

340
	init_iova_domain(iovad, 1UL << order, base_pfn);
341 342 343 344 345 346 347

	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
		cookie->fq_domain = domain;
		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
	}

348 349 350 351
	if (!dev)
		return 0;

	return iova_reserve_iommu_regions(dev, domain);
352 353 354
}

/**
355 356
 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 *                    page flags.
357 358
 * @dir: Direction of DMA transfer
 * @coherent: Is the DMA master cache-coherent?
359
 * @attrs: DMA attributes for the mapping
360 361 362
 *
 * Return: corresponding IOMMU API page protection flags
 */
363
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
364
		     unsigned long attrs)
365 366 367
{
	int prot = coherent ? IOMMU_CACHE : 0;

368 369 370
	if (attrs & DMA_ATTR_PRIVILEGED)
		prot |= IOMMU_PRIV;

371 372 373 374 375 376 377 378 379 380 381 382
	switch (dir) {
	case DMA_BIDIRECTIONAL:
		return prot | IOMMU_READ | IOMMU_WRITE;
	case DMA_TO_DEVICE:
		return prot | IOMMU_READ;
	case DMA_FROM_DEVICE:
		return prot | IOMMU_WRITE;
	default:
		return 0;
	}
}

383 384
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
		size_t size, dma_addr_t dma_limit, struct device *dev)
385
{
386 387
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
388
	unsigned long shift, iova_len, iova = 0;
389

390 391 392 393 394 395 396
	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
		cookie->msi_iova += size;
		return cookie->msi_iova - size;
	}

	shift = iova_shift(iovad);
	iova_len = size >> shift;
397 398 399 400 401 402 403 404
	/*
	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
	 * will come back to bite us badly, so we have to waste a bit of space
	 * rounding up anything cacheable to make sure that can't happen. The
	 * order of the unadjusted size will still match upon freeing.
	 */
	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
		iova_len = roundup_pow_of_two(iova_len);
405

406 407 408
	if (dev->bus_dma_mask)
		dma_limit &= dev->bus_dma_mask;

409 410
	if (domain->geometry.force_aperture)
		dma_limit = min(dma_limit, domain->geometry.aperture_end);
411 412 413

	/* Try to get PCI devices a SAC address */
	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
414 415
		iova = alloc_iova_fast(iovad, iova_len,
				       DMA_BIT_MASK(32) >> shift, false);
416

417
	if (!iova)
418 419
		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
				       true);
420

421
	return (dma_addr_t)iova << shift;
422 423
}

424 425
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
		dma_addr_t iova, size_t size)
426
{
427
	struct iova_domain *iovad = &cookie->iovad;
428

429
	/* The MSI case is only ever cleaning up its most recent allocation */
430
	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
431
		cookie->msi_iova -= size;
432 433 434
	else if (cookie->fq_domain)	/* non-strict mode */
		queue_iova(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad), 0);
435
	else
436 437
		free_iova_fast(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad));
438 439
}

440
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
441 442
		size_t size)
{
443
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
444 445
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
446 447 448 449 450
	size_t iova_off = iova_offset(iovad, dma_addr);

	dma_addr -= iova_off;
	size = iova_align(iovad, size + iova_off);

451 452 453
	WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
	if (!cookie->fq_domain)
		iommu_tlb_sync(domain);
454
	iommu_dma_free_iova(cookie, dma_addr, size);
455 456
}

457
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
458
		size_t size, int prot)
459
{
460
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	size_t iova_off = 0;
	dma_addr_t iova;

	if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
		iova_off = iova_offset(&cookie->iovad, phys);
		size = iova_align(&cookie->iovad, size + iova_off);
	}

	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
	if (!iova)
		return DMA_MAPPING_ERROR;

	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
		iommu_dma_free_iova(cookie, iova, size);
		return DMA_MAPPING_ERROR;
	}
	return iova + iova_off;
}

481 482 483 484 485 486 487
static void __iommu_dma_free_pages(struct page **pages, int count)
{
	while (count--)
		__free_page(pages[count]);
	kvfree(pages);
}

488 489
static struct page **__iommu_dma_alloc_pages(struct device *dev,
		unsigned int count, unsigned long order_mask, gfp_t gfp)
490 491
{
	struct page **pages;
492
	unsigned int i = 0, nid = dev_to_node(dev);
493 494 495 496

	order_mask &= (2U << MAX_ORDER) - 1;
	if (!order_mask)
		return NULL;
497

498
	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
499 500 501 502 503 504 505 506
	if (!pages)
		return NULL;

	/* IOMMU can map any pages, so himem can also be used here */
	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;

	while (count) {
		struct page *page = NULL;
507
		unsigned int order_size;
508 509 510 511

		/*
		 * Higher-order allocations are a convenience rather
		 * than a necessity, hence using __GFP_NORETRY until
512
		 * falling back to minimum-order allocations.
513
		 */
514 515 516
		for (order_mask &= (2U << __fls(count)) - 1;
		     order_mask; order_mask &= ~order_size) {
			unsigned int order = __fls(order_mask);
517
			gfp_t alloc_flags = gfp;
518 519

			order_size = 1U << order;
520 521 522
			if (order_mask > order_size)
				alloc_flags |= __GFP_NORETRY;
			page = alloc_pages_node(nid, alloc_flags, order);
523 524
			if (!page)
				continue;
525 526 527
			if (!order)
				break;
			if (!PageCompound(page)) {
528 529
				split_page(page, order);
				break;
530 531
			} else if (!split_huge_page(page)) {
				break;
532
			}
533
			__free_pages(page, order);
534 535 536 537 538
		}
		if (!page) {
			__iommu_dma_free_pages(pages, i);
			return NULL;
		}
539 540
		count -= order_size;
		while (order_size--)
541 542 543 544 545
			pages[i++] = page++;
	}
	return pages;
}

546 547 548 549 550 551 552 553 554
static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || !area->pages)
		return NULL;
	return area->pages;
}

555
/**
556
 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
557 558 559
 * @dev: Device to allocate memory for. Must be a real device
 *	 attached to an iommu_dma_domain
 * @size: Size of buffer in bytes
560
 * @dma_handle: Out argument for allocated DMA handle
561
 * @gfp: Allocation flags
562
 * @attrs: DMA attributes for this allocation
563 564 565 566
 *
 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 * but an IOMMU which supports smaller pages might not map the whole thing.
 *
567
 * Return: Mapped virtual address, or NULL on failure.
568
 */
569 570
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
571
{
572
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
573 574
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
575 576 577 578
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
579 580
	struct page **pages;
	struct sg_table sgt;
581
	dma_addr_t iova;
582
	void *vaddr;
583

584
	*dma_handle = DMA_MAPPING_ERROR;
585

586 587 588 589 590 591 592
	min_size = alloc_sizes & -alloc_sizes;
	if (min_size < PAGE_SIZE) {
		min_size = PAGE_SIZE;
		alloc_sizes |= PAGE_SIZE;
	} else {
		size = ALIGN(size, min_size);
	}
593
	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
594 595 596
		alloc_sizes = min_size;

	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
597 598
	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
					gfp);
599 600 601
	if (!pages)
		return NULL;

602 603
	size = iova_align(iovad, size);
	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
604 605 606 607 608 609
	if (!iova)
		goto out_free_pages;

	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
		goto out_free_iova;

610
	if (!(ioprot & IOMMU_CACHE)) {
611 612 613 614 615
		struct scatterlist *sg;
		int i;

		for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
			arch_dma_prep_coherent(sg_page(sg), sg->length);
616 617
	}

618
	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
619 620 621
			< size)
		goto out_free_sg;

622 623 624 625 626 627
	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
			__builtin_return_address(0));
	if (!vaddr)
		goto out_unmap;

	*dma_handle = iova;
628
	sg_free_table(&sgt);
629
	return vaddr;
630

631 632
out_unmap:
	__iommu_dma_unmap(dev, iova, size);
633 634 635
out_free_sg:
	sg_free_table(&sgt);
out_free_iova:
636
	iommu_dma_free_iova(cookie, iova, size);
637 638 639 640 641 642
out_free_pages:
	__iommu_dma_free_pages(pages, count);
	return NULL;
}

/**
643 644
 * __iommu_dma_mmap - Map a buffer into provided user VMA
 * @pages: Array representing buffer from __iommu_dma_alloc()
645 646 647 648 649 650
 * @size: Size of buffer in bytes
 * @vma: VMA describing requested userspace mapping
 *
 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 * for verifying the correct size and protection of @vma beforehand.
 */
651 652
static int __iommu_dma_mmap(struct page **pages, size_t size,
		struct vm_area_struct *vma)
653
{
654
	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
655 656
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_cpu(dev, phys, size, dir);
}

static void iommu_dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_device(dev, phys, size, dir);
}

static void iommu_dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{
	phys_addr_t phys = page_to_phys(page) + offset;
	bool coherent = dev_is_dma_coherent(dev);
715
	int prot = dma_info_to_prot(dir, coherent, attrs);
716 717
	dma_addr_t dma_handle;

718
	dma_handle =__iommu_dma_map(dev, phys, size, prot);
719 720 721 722 723 724 725 726 727 728 729
	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    dma_handle != DMA_MAPPING_ERROR)
		arch_sync_dma_for_device(dev, phys, size, dir);
	return dma_handle;
}

static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
730
	__iommu_dma_unmap(dev, dma_handle, size);
731 732
}

733 734
/*
 * Prepare a successfully-mapped scatterlist to give back to the caller.
735 736 737 738
 *
 * At this point the segments are already laid out by iommu_dma_map_sg() to
 * avoid individually crossing any boundaries, so we merely need to check a
 * segment's start address to avoid concatenating across one.
739 740 741 742
 */
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
		dma_addr_t dma_addr)
{
743 744 745 746
	struct scatterlist *s, *cur = sg;
	unsigned long seg_mask = dma_get_seg_boundary(dev);
	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
	int i, count = 0;
747 748

	for_each_sg(sg, s, nents, i) {
749 750
		/* Restore this segment's original unaligned fields first */
		unsigned int s_iova_off = sg_dma_address(s);
751
		unsigned int s_length = sg_dma_len(s);
752
		unsigned int s_iova_len = s->length;
753

754
		s->offset += s_iova_off;
755
		s->length = s_length;
756
		sg_dma_address(s) = DMA_MAPPING_ERROR;
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
		sg_dma_len(s) = 0;

		/*
		 * Now fill in the real DMA data. If...
		 * - there is a valid output segment to append to
		 * - and this segment starts on an IOVA page boundary
		 * - but doesn't fall at a segment boundary
		 * - and wouldn't make the resulting output segment too long
		 */
		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
		    (cur_len + s_length <= max_len)) {
			/* ...then concatenate it with the previous one */
			cur_len += s_length;
		} else {
			/* Otherwise start the next output segment */
			if (i > 0)
				cur = sg_next(cur);
			cur_len = s_length;
			count++;

			sg_dma_address(cur) = dma_addr + s_iova_off;
		}

		sg_dma_len(cur) = cur_len;
		dma_addr += s_iova_len;

		if (s_length + s_iova_off < s_iova_len)
			cur_len = 0;
785
	}
786
	return count;
787 788 789 790 791 792 793 794 795 796 797 798
}

/*
 * If mapping failed, then just restore the original list,
 * but making sure the DMA fields are invalidated.
 */
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
799
		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
800
			s->offset += sg_dma_address(s);
801 802
		if (sg_dma_len(s))
			s->length = sg_dma_len(s);
803
		sg_dma_address(s) = DMA_MAPPING_ERROR;
804 805 806 807 808 809 810 811 812 813 814
		sg_dma_len(s) = 0;
	}
}

/*
 * The DMA API client is passing in a scatterlist which could describe
 * any old buffer layout, but the IOMMU API requires everything to be
 * aligned to IOMMU pages. Hence the need for this complicated bit of
 * impedance-matching, to be able to hand off a suitably-aligned list,
 * but still preserve the original offsets and sizes for the caller.
 */
815 816
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
817
{
818
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
819 820
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
821
	struct scatterlist *s, *prev = NULL;
822
	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
823
	dma_addr_t iova;
824
	size_t iova_len = 0;
825
	unsigned long mask = dma_get_seg_boundary(dev);
826 827
	int i;

828 829 830
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);

831 832 833 834
	/*
	 * Work out how much IOVA space we need, and align the segments to
	 * IOVA granules for the IOMMU driver to handle. With some clever
	 * trickery we can modify the list in-place, but reversibly, by
835
	 * stashing the unaligned parts in the as-yet-unused DMA fields.
836 837
	 */
	for_each_sg(sg, s, nents, i) {
838
		size_t s_iova_off = iova_offset(iovad, s->offset);
839
		size_t s_length = s->length;
840
		size_t pad_len = (mask - iova_len + 1) & mask;
841

842
		sg_dma_address(s) = s_iova_off;
843
		sg_dma_len(s) = s_length;
844 845
		s->offset -= s_iova_off;
		s_length = iova_align(iovad, s_length + s_iova_off);
846 847 848
		s->length = s_length;

		/*
849 850 851 852 853 854 855 856 857 858 859
		 * Due to the alignment of our single IOVA allocation, we can
		 * depend on these assumptions about the segment boundary mask:
		 * - If mask size >= IOVA size, then the IOVA range cannot
		 *   possibly fall across a boundary, so we don't care.
		 * - If mask size < IOVA size, then the IOVA range must start
		 *   exactly on a boundary, therefore we can lay things out
		 *   based purely on segment lengths without needing to know
		 *   the actual addresses beforehand.
		 * - The mask must be a power of 2, so pad_len == 0 if
		 *   iova_len == 0, thus we cannot dereference prev the first
		 *   time through here (i.e. before it has a meaningful value).
860
		 */
861
		if (pad_len && pad_len < s_length - 1) {
862 863 864 865 866 867 868 869
			prev->length += pad_len;
			iova_len += pad_len;
		}

		iova_len += s_length;
		prev = s;
	}

870
	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
871 872 873 874 875 876 877
	if (!iova)
		goto out_restore_sg;

	/*
	 * We'll leave any physical concatenation to the IOMMU driver's
	 * implementation - it knows better than we do.
	 */
878
	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
879 880
		goto out_free_iova;

881
	return __finalise_sg(dev, sg, nents, iova);
882 883

out_free_iova:
884
	iommu_dma_free_iova(cookie, iova, iova_len);
885 886 887 888 889
out_restore_sg:
	__invalidate_sg(sg, nents);
	return 0;
}

890 891
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
892
{
893 894 895
	dma_addr_t start, end;
	struct scatterlist *tmp;
	int i;
896 897 898 899

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);

900 901 902 903
	/*
	 * The scatterlist segments are mapped into a single
	 * contiguous IOVA allocation, so this is incredibly easy.
	 */
904 905 906 907 908 909 910
	start = sg_dma_address(sg);
	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
		if (sg_dma_len(tmp) == 0)
			break;
		sg = tmp;
	}
	end = sg_dma_address(sg) + sg_dma_len(sg);
911
	__iommu_dma_unmap(dev, start, end - start);
912 913
}

914
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
915 916 917
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	return __iommu_dma_map(dev, phys, size,
918
			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
919 920
}

921
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
922 923
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
924
	__iommu_dma_unmap(dev, handle, size);
925 926
}

R
Robin Murphy 已提交
927
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
928 929 930 931 932 933
{
	size_t alloc_size = PAGE_ALIGN(size);
	int count = alloc_size >> PAGE_SHIFT;
	struct page *page = NULL, **pages = NULL;

	/* Non-coherent atomic allocation? Easy */
934 935
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    dma_free_from_pool(cpu_addr, alloc_size))
936 937
		return;

938
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
		/*
		 * If it the address is remapped, then it's either non-coherent
		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
		 */
		pages = __iommu_dma_get_pages(cpu_addr);
		if (!pages)
			page = vmalloc_to_page(cpu_addr);
		dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
	} else {
		/* Lowmem means a coherent atomic or CMA allocation */
		page = virt_to_page(cpu_addr);
	}

	if (pages)
		__iommu_dma_free_pages(pages, count);
	if (page && !dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(alloc_size));
}

R
Robin Murphy 已提交
958 959 960 961 962 963 964
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t handle, unsigned long attrs)
{
	__iommu_dma_unmap(dev, handle, size);
	__iommu_dma_free(dev, size, cpu_addr);
}

965 966
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
		struct page **pagep, gfp_t gfp, unsigned long attrs)
967 968
{
	bool coherent = dev_is_dma_coherent(dev);
969
	size_t alloc_size = PAGE_ALIGN(size);
970
	struct page *page = NULL;
971
	void *cpu_addr;
972

973
	if (gfpflags_allow_blocking(gfp))
974 975
		page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
						 get_order(alloc_size),
976 977
						 gfp & __GFP_NOWARN);
	if (!page)
978
		page = alloc_pages(gfp, get_order(alloc_size));
979 980 981
	if (!page)
		return NULL;

982
	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
983
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
984

985 986 987
		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
				VM_USERMAP, prot, __builtin_return_address(0));
		if (!cpu_addr)
988
			goto out_free_pages;
989 990

		if (!coherent)
991
			arch_dma_prep_coherent(page, size);
992
	} else {
993
		cpu_addr = page_address(page);
994
	}
995 996

	*pagep = page;
997 998
	memset(cpu_addr, 0, alloc_size);
	return cpu_addr;
999
out_free_pages:
1000 1001
	if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
		__free_pages(page, get_order(alloc_size));
1002
	return NULL;
1003 1004
}

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	struct page *page = NULL;
	void *cpu_addr;

	gfp |= __GFP_ZERO;

1015
	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1016 1017 1018
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);

1019 1020
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !gfpflags_allow_blocking(gfp) && !coherent)
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
	else
		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
	if (!cpu_addr)
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
	if (*handle == DMA_MAPPING_ERROR) {
		__iommu_dma_free(dev, size, cpu_addr);
		return NULL;
	}

	return cpu_addr;
}

1036 1037 1038 1039 1040
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1041
	unsigned long pfn, off = vma->vm_pgoff;
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	int ret;

	vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
		return -ENXIO;

1052
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1053
		struct page **pages = __iommu_dma_get_pages(cpu_addr);
1054

1055 1056 1057 1058 1059
		if (pages)
			return __iommu_dma_mmap(pages, size, vma);
		pfn = vmalloc_to_pfn(cpu_addr);
	} else {
		pfn = page_to_pfn(virt_to_page(cpu_addr));
1060 1061
	}

1062 1063 1064
	return remap_pfn_range(vma, vma->vm_start, pfn + off,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot);
1065 1066 1067 1068 1069 1070
}

static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
1071 1072
	struct page *page;
	int ret;
1073

1074
	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1075
		struct page **pages = __iommu_dma_get_pages(cpu_addr);
1076

1077 1078 1079 1080 1081 1082 1083 1084 1085
		if (pages) {
			return sg_alloc_table_from_pages(sgt, pages,
					PAGE_ALIGN(size) >> PAGE_SHIFT,
					0, size, GFP_KERNEL);
		}

		page = vmalloc_to_page(cpu_addr);
	} else {
		page = virt_to_page(cpu_addr);
1086 1087
	}

1088 1089 1090 1091
	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return ret;
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
}

static const struct dma_map_ops iommu_dma_ops = {
	.alloc			= iommu_dma_alloc,
	.free			= iommu_dma_free,
	.mmap			= iommu_dma_mmap,
	.get_sgtable		= iommu_dma_get_sgtable,
	.map_page		= iommu_dma_map_page,
	.unmap_page		= iommu_dma_unmap_page,
	.map_sg			= iommu_dma_map_sg,
	.unmap_sg		= iommu_dma_unmap_sg,
	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
	.sync_single_for_device	= iommu_dma_sync_single_for_device,
	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
	.map_resource		= iommu_dma_map_resource,
	.unmap_resource		= iommu_dma_unmap_resource,
};

/*
 * The IOMMU core code allocates the default DMA domain, which the underlying
 * IOMMU driver needs to support via the dma-iommu layer.
 */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	if (!domain)
		goto out_err;

	/*
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
	 */
	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;
		dev->dma_ops = &iommu_dma_ops;
	}

	return;
out_err:
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
		 dev_name(dev));
}

1138 1139 1140 1141 1142
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
		phys_addr_t msi_addr, struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi_page;
1143
	dma_addr_t iova;
1144
	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
R
Robin Murphy 已提交
1145
	size_t size = cookie_msi_granule(cookie);
1146

R
Robin Murphy 已提交
1147
	msi_addr &= ~(phys_addr_t)(size - 1);
1148 1149 1150 1151 1152 1153 1154 1155
	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
		if (msi_page->phys == msi_addr)
			return msi_page;

	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
	if (!msi_page)
		return NULL;

1156
	iova = __iommu_dma_map(dev, msi_addr, size, prot);
1157
	if (iova == DMA_MAPPING_ERROR)
1158
		goto out_free_page;
1159 1160

	INIT_LIST_HEAD(&msi_page->list);
1161 1162
	msi_page->phys = msi_addr;
	msi_page->iova = iova;
1163 1164 1165 1166 1167 1168 1169 1170
	list_add(&msi_page->list, &cookie->msi_page_list);
	return msi_page;

out_free_page:
	kfree(msi_page);
	return NULL;
}

1171
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1172
{
1173
	struct device *dev = msi_desc_to_dev(desc);
1174 1175 1176 1177 1178
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	struct iommu_dma_cookie *cookie;
	struct iommu_dma_msi_page *msi_page;
	unsigned long flags;

1179 1180 1181 1182
	if (!domain || !domain->iova_cookie) {
		desc->iommu_cookie = NULL;
		return 0;
	}
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	cookie = domain->iova_cookie;

	/*
	 * We disable IRQs to rule out a possible inversion against
	 * irq_desc_lock if, say, someone tries to retarget the affinity
	 * of an MSI from within an IPI handler.
	 */
	spin_lock_irqsave(&cookie->msi_lock, flags);
	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
	spin_unlock_irqrestore(&cookie->msi_lock, flags);

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	msi_desc_set_iommu_cookie(desc, msi_page);

	if (!msi_page)
		return -ENOMEM;
	return 0;
}

void iommu_dma_compose_msi_msg(struct msi_desc *desc,
			       struct msi_msg *msg)
{
	struct device *dev = msi_desc_to_dev(desc);
	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	const struct iommu_dma_msi_page *msi_page;

	msi_page = msi_desc_get_iommu_cookie(desc);

	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
		return;

	msg->address_hi = upper_32_bits(msi_page->iova);
	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
	msg->address_lo += lower_32_bits(msi_page->iova);
1217
}
1218 1219 1220 1221 1222 1223

static int iommu_dma_init(void)
{
	return iova_cache_get();
}
arch_initcall(iommu_dma_init);