dma-iommu.c 35.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * A fairly generic DMA-API to IOMMU-API glue layer.
 *
 * Copyright (C) 2014-2015 ARM Ltd.
 *
 * based in part on arch/arm/mm/dma-mapping.c:
 * Copyright (C) 2000-2004 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

22
#include <linux/acpi_iort.h>
23
#include <linux/device.h>
24
#include <linux/dma-contiguous.h>
25
#include <linux/dma-iommu.h>
26
#include <linux/dma-noncoherent.h>
27
#include <linux/gfp.h>
28 29 30
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
31
#include <linux/irq.h>
32
#include <linux/mm.h>
33
#include <linux/pci.h>
34 35
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
36

37 38 39 40 41 42
struct iommu_dma_msi_page {
	struct list_head	list;
	dma_addr_t		iova;
	phys_addr_t		phys;
};

R
Robin Murphy 已提交
43 44 45 46 47
enum iommu_dma_cookie_type {
	IOMMU_DMA_IOVA_COOKIE,
	IOMMU_DMA_MSI_COOKIE,
};

48
struct iommu_dma_cookie {
R
Robin Murphy 已提交
49 50 51 52 53 54 55 56 57
	enum iommu_dma_cookie_type	type;
	union {
		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
		struct iova_domain	iovad;
		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
		dma_addr_t		msi_iova;
	};
	struct list_head		msi_page_list;
	spinlock_t			msi_lock;
58 59 60

	/* Domain for flush queue callback; NULL if flush queue not in use */
	struct iommu_domain		*fq_domain;
61 62
};

R
Robin Murphy 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
		return cookie->iovad.granule;
	return PAGE_SIZE;
}

static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
	struct iommu_dma_cookie *cookie;

	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
	if (cookie) {
		spin_lock_init(&cookie->msi_lock);
		INIT_LIST_HEAD(&cookie->msi_page_list);
		cookie->type = type;
	}
	return cookie;
81 82
}

83 84 85 86 87 88 89 90
/**
 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
 * @domain: IOMMU domain to prepare for DMA-API usage
 *
 * IOMMU drivers should normally call this from their domain_alloc
 * callback when domain->type == IOMMU_DOMAIN_DMA.
 */
int iommu_get_dma_cookie(struct iommu_domain *domain)
R
Robin Murphy 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
{
	if (domain->iova_cookie)
		return -EEXIST;

	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
	if (!domain->iova_cookie)
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);

/**
 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 * @domain: IOMMU domain to prepare
 * @base: Start address of IOVA region for MSI mappings
 *
 * Users who manage their own IOVA allocation and do not want DMA API support,
 * but would still like to take advantage of automatic MSI remapping, can use
 * this to initialise their own domain appropriately. Users should reserve a
 * contiguous IOVA region, starting at @base, large enough to accommodate the
 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 * used by the devices attached to @domain.
 */
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
116
{
117
	struct iommu_dma_cookie *cookie;
118

R
Robin Murphy 已提交
119 120 121
	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;

122 123 124
	if (domain->iova_cookie)
		return -EEXIST;

R
Robin Murphy 已提交
125
	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
126 127
	if (!cookie)
		return -ENOMEM;
128

R
Robin Murphy 已提交
129
	cookie->msi_iova = base;
130 131
	domain->iova_cookie = cookie;
	return 0;
132
}
R
Robin Murphy 已提交
133
EXPORT_SYMBOL(iommu_get_msi_cookie);
134 135 136

/**
 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
R
Robin Murphy 已提交
137 138
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 *          iommu_get_msi_cookie()
139 140 141 142 143
 *
 * IOMMU drivers should normally call this from their domain_free callback.
 */
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
144 145
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi, *tmp;
146

147
	if (!cookie)
148 149
		return;

R
Robin Murphy 已提交
150
	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
151 152 153 154 155 156 157
		put_iova_domain(&cookie->iovad);

	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
		list_del(&msi->list);
		kfree(msi);
	}
	kfree(cookie);
158 159 160 161
	domain->iova_cookie = NULL;
}
EXPORT_SYMBOL(iommu_put_dma_cookie);

162 163 164 165 166 167
/**
 * iommu_dma_get_resv_regions - Reserved region driver helper
 * @dev: Device from iommu_get_resv_regions()
 * @list: Reserved region list from iommu_get_resv_regions()
 *
 * IOMMU drivers can use this to implement their .get_resv_regions callback
168 169 170
 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 * reservation.
171 172
 */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
173 174
{

175
	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
176
		iort_iommu_msi_get_resv_regions(dev, list);
177

178
}
179
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
180

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
		phys_addr_t start, phys_addr_t end)
{
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_dma_msi_page *msi_page;
	int i, num_pages;

	start -= iova_offset(iovad, start);
	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);

	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
	if (!msi_page)
		return -ENOMEM;

	for (i = 0; i < num_pages; i++) {
		msi_page[i].phys = start;
		msi_page[i].iova = start;
		INIT_LIST_HEAD(&msi_page[i].list);
		list_add(&msi_page[i].list, &cookie->msi_page_list);
		start += iovad->granule;
	}

	return 0;
}

206
static int iova_reserve_pci_windows(struct pci_dev *dev,
207 208 209 210 211
		struct iova_domain *iovad)
{
	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
	struct resource_entry *window;
	unsigned long lo, hi;
212
	phys_addr_t start = 0, end;
213 214 215 216 217 218 219 220 221

	resource_list_for_each_entry(window, &bridge->windows) {
		if (resource_type(window->res) != IORESOURCE_MEM)
			continue;

		lo = iova_pfn(iovad, window->res->start - window->offset);
		hi = iova_pfn(iovad, window->res->end - window->offset);
		reserve_iova(iovad, lo, hi);
	}
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	/* Get reserved DMA windows from host bridge */
	resource_list_for_each_entry(window, &bridge->dma_ranges) {
		end = window->res->start - window->offset;
resv_iova:
		if (end > start) {
			lo = iova_pfn(iovad, start);
			hi = iova_pfn(iovad, end);
			reserve_iova(iovad, lo, hi);
		} else {
			/* dma_ranges list should be sorted */
			dev_err(&dev->dev, "Failed to reserve IOVA\n");
			return -EINVAL;
		}

		start = window->res->end - window->offset + 1;
		/* If window is last entry */
		if (window->node.next == &bridge->dma_ranges &&
		    end != ~(dma_addr_t)0) {
			end = ~(dma_addr_t)0;
			goto resv_iova;
		}
	}

	return 0;
247 248
}

249 250 251 252 253 254 255 256 257
static int iova_reserve_iommu_regions(struct device *dev,
		struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	struct iommu_resv_region *region;
	LIST_HEAD(resv_regions);
	int ret = 0;

258 259 260 261 262
	if (dev_is_pci(dev)) {
		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
		if (ret)
			return ret;
	}
263

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	iommu_get_resv_regions(dev, &resv_regions);
	list_for_each_entry(region, &resv_regions, list) {
		unsigned long lo, hi;

		/* We ARE the software that manages these! */
		if (region->type == IOMMU_RESV_SW_MSI)
			continue;

		lo = iova_pfn(iovad, region->start);
		hi = iova_pfn(iovad, region->start + region->length - 1);
		reserve_iova(iovad, lo, hi);

		if (region->type == IOMMU_RESV_MSI)
			ret = cookie_init_hw_msi_region(cookie, region->start,
					region->start + region->length);
		if (ret)
			break;
	}
	iommu_put_resv_regions(dev, &resv_regions);

	return ret;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
{
	struct iommu_dma_cookie *cookie;
	struct iommu_domain *domain;

	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
	domain = cookie->fq_domain;
	/*
	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
	 * implies that ops->flush_iotlb_all must be non-NULL.
	 */
	domain->ops->flush_iotlb_all(domain);
}

301 302 303 304 305
/**
 * iommu_dma_init_domain - Initialise a DMA mapping domain
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 * @base: IOVA at which the mappable address space starts
 * @size: Size of IOVA space
306
 * @dev: Device the domain is being initialised for
307 308 309 310 311 312
 *
 * @base and @size should be exact multiples of IOMMU page granularity to
 * avoid rounding surprises. If necessary, we reserve the page at address 0
 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 * any change which could make prior IOVAs invalid will fail.
 */
313
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
314
		u64 size, struct device *dev)
315
{
R
Robin Murphy 已提交
316 317
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
318
	unsigned long order, base_pfn;
319
	int attr;
320

R
Robin Murphy 已提交
321 322
	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
		return -EINVAL;
323 324

	/* Use the smallest supported page size for IOVA granularity */
325
	order = __ffs(domain->pgsize_bitmap);
326 327 328 329 330 331 332 333 334 335 336 337 338 339
	base_pfn = max_t(unsigned long, 1, base >> order);

	/* Check the domain allows at least some access to the device... */
	if (domain->geometry.force_aperture) {
		if (base > domain->geometry.aperture_end ||
		    base + size <= domain->geometry.aperture_start) {
			pr_warn("specified DMA range outside IOMMU capability\n");
			return -EFAULT;
		}
		/* ...then finally give it a kicking to make sure it fits */
		base_pfn = max_t(unsigned long, base_pfn,
				domain->geometry.aperture_start >> order);
	}

340
	/* start_pfn is always nonzero for an already-initialised domain */
341 342
	if (iovad->start_pfn) {
		if (1UL << order != iovad->granule ||
343
		    base_pfn != iovad->start_pfn) {
344 345 346
			pr_warn("Incompatible range for DMA domain\n");
			return -EFAULT;
		}
347 348

		return 0;
349
	}
350

351
	init_iova_domain(iovad, 1UL << order, base_pfn);
352 353 354 355 356 357 358

	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
		cookie->fq_domain = domain;
		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
	}

359 360 361 362
	if (!dev)
		return 0;

	return iova_reserve_iommu_regions(dev, domain);
363 364 365
}

/**
366 367
 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 *                    page flags.
368 369
 * @dir: Direction of DMA transfer
 * @coherent: Is the DMA master cache-coherent?
370
 * @attrs: DMA attributes for the mapping
371 372 373
 *
 * Return: corresponding IOMMU API page protection flags
 */
374
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
375
		     unsigned long attrs)
376 377 378
{
	int prot = coherent ? IOMMU_CACHE : 0;

379 380 381
	if (attrs & DMA_ATTR_PRIVILEGED)
		prot |= IOMMU_PRIV;

382 383 384 385 386 387 388 389 390 391 392 393
	switch (dir) {
	case DMA_BIDIRECTIONAL:
		return prot | IOMMU_READ | IOMMU_WRITE;
	case DMA_TO_DEVICE:
		return prot | IOMMU_READ;
	case DMA_FROM_DEVICE:
		return prot | IOMMU_WRITE;
	default:
		return 0;
	}
}

394 395
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
		size_t size, dma_addr_t dma_limit, struct device *dev)
396
{
397 398
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
399
	unsigned long shift, iova_len, iova = 0;
400

401 402 403 404 405 406 407
	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
		cookie->msi_iova += size;
		return cookie->msi_iova - size;
	}

	shift = iova_shift(iovad);
	iova_len = size >> shift;
408 409 410 411 412 413 414 415
	/*
	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
	 * will come back to bite us badly, so we have to waste a bit of space
	 * rounding up anything cacheable to make sure that can't happen. The
	 * order of the unadjusted size will still match upon freeing.
	 */
	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
		iova_len = roundup_pow_of_two(iova_len);
416

417 418 419
	if (dev->bus_dma_mask)
		dma_limit &= dev->bus_dma_mask;

420 421
	if (domain->geometry.force_aperture)
		dma_limit = min(dma_limit, domain->geometry.aperture_end);
422 423 424

	/* Try to get PCI devices a SAC address */
	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
425 426
		iova = alloc_iova_fast(iovad, iova_len,
				       DMA_BIT_MASK(32) >> shift, false);
427

428
	if (!iova)
429 430
		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
				       true);
431

432
	return (dma_addr_t)iova << shift;
433 434
}

435 436
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
		dma_addr_t iova, size_t size)
437
{
438
	struct iova_domain *iovad = &cookie->iovad;
439

440
	/* The MSI case is only ever cleaning up its most recent allocation */
441
	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
442
		cookie->msi_iova -= size;
443 444 445
	else if (cookie->fq_domain)	/* non-strict mode */
		queue_iova(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad), 0);
446
	else
447 448
		free_iova_fast(iovad, iova_pfn(iovad, iova),
				size >> iova_shift(iovad));
449 450
}

451
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
452 453
		size_t size)
{
454
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
455 456
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
457 458 459 460 461
	size_t iova_off = iova_offset(iovad, dma_addr);

	dma_addr -= iova_off;
	size = iova_align(iovad, size + iova_off);

462 463 464
	WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
	if (!cookie->fq_domain)
		iommu_tlb_sync(domain);
465
	iommu_dma_free_iova(cookie, dma_addr, size);
466 467
}

468
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
469
		size_t size, int prot)
470
{
471
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	size_t iova_off = 0;
	dma_addr_t iova;

	if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
		iova_off = iova_offset(&cookie->iovad, phys);
		size = iova_align(&cookie->iovad, size + iova_off);
	}

	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
	if (!iova)
		return DMA_MAPPING_ERROR;

	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
		iommu_dma_free_iova(cookie, iova, size);
		return DMA_MAPPING_ERROR;
	}
	return iova + iova_off;
}

492 493 494 495 496 497 498
static void __iommu_dma_free_pages(struct page **pages, int count)
{
	while (count--)
		__free_page(pages[count]);
	kvfree(pages);
}

499 500
static struct page **__iommu_dma_alloc_pages(struct device *dev,
		unsigned int count, unsigned long order_mask, gfp_t gfp)
501 502
{
	struct page **pages;
503
	unsigned int i = 0, nid = dev_to_node(dev);
504 505 506 507

	order_mask &= (2U << MAX_ORDER) - 1;
	if (!order_mask)
		return NULL;
508

509
	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
510 511 512 513 514 515 516 517
	if (!pages)
		return NULL;

	/* IOMMU can map any pages, so himem can also be used here */
	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;

	while (count) {
		struct page *page = NULL;
518
		unsigned int order_size;
519 520 521 522

		/*
		 * Higher-order allocations are a convenience rather
		 * than a necessity, hence using __GFP_NORETRY until
523
		 * falling back to minimum-order allocations.
524
		 */
525 526 527
		for (order_mask &= (2U << __fls(count)) - 1;
		     order_mask; order_mask &= ~order_size) {
			unsigned int order = __fls(order_mask);
528
			gfp_t alloc_flags = gfp;
529 530

			order_size = 1U << order;
531 532 533
			if (order_mask > order_size)
				alloc_flags |= __GFP_NORETRY;
			page = alloc_pages_node(nid, alloc_flags, order);
534 535
			if (!page)
				continue;
536 537 538
			if (!order)
				break;
			if (!PageCompound(page)) {
539 540
				split_page(page, order);
				break;
541 542
			} else if (!split_huge_page(page)) {
				break;
543
			}
544
			__free_pages(page, order);
545 546 547 548 549
		}
		if (!page) {
			__iommu_dma_free_pages(pages, i);
			return NULL;
		}
550 551
		count -= order_size;
		while (order_size--)
552 553 554 555 556
			pages[i++] = page++;
	}
	return pages;
}

557 558 559 560 561 562 563 564 565
static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || !area->pages)
		return NULL;
	return area->pages;
}

566
/**
567
 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
568 569 570
 * @dev: Device to allocate memory for. Must be a real device
 *	 attached to an iommu_dma_domain
 * @size: Size of buffer in bytes
571
 * @dma_handle: Out argument for allocated DMA handle
572
 * @gfp: Allocation flags
573
 * @attrs: DMA attributes for this allocation
574 575 576 577
 *
 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 * but an IOMMU which supports smaller pages might not map the whole thing.
 *
578
 * Return: Mapped virtual address, or NULL on failure.
579
 */
580 581
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
582
{
583
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
584 585
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
586 587 588 589
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
590 591
	struct page **pages;
	struct sg_table sgt;
592
	dma_addr_t iova;
593
	void *vaddr;
594

595
	*dma_handle = DMA_MAPPING_ERROR;
596

597 598 599 600 601 602 603
	min_size = alloc_sizes & -alloc_sizes;
	if (min_size < PAGE_SIZE) {
		min_size = PAGE_SIZE;
		alloc_sizes |= PAGE_SIZE;
	} else {
		size = ALIGN(size, min_size);
	}
604
	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
605 606 607
		alloc_sizes = min_size;

	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
608 609
	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
					gfp);
610 611 612
	if (!pages)
		return NULL;

613 614
	size = iova_align(iovad, size);
	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
615 616 617 618 619 620
	if (!iova)
		goto out_free_pages;

	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
		goto out_free_iova;

621
	if (!(ioprot & IOMMU_CACHE)) {
622 623 624 625 626
		struct scatterlist *sg;
		int i;

		for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
			arch_dma_prep_coherent(sg_page(sg), sg->length);
627 628
	}

629
	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
630 631 632
			< size)
		goto out_free_sg;

633 634 635 636 637 638
	vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
			__builtin_return_address(0));
	if (!vaddr)
		goto out_unmap;

	*dma_handle = iova;
639
	sg_free_table(&sgt);
640
	return vaddr;
641

642 643
out_unmap:
	__iommu_dma_unmap(dev, iova, size);
644 645 646
out_free_sg:
	sg_free_table(&sgt);
out_free_iova:
647
	iommu_dma_free_iova(cookie, iova, size);
648 649 650 651 652 653
out_free_pages:
	__iommu_dma_free_pages(pages, count);
	return NULL;
}

/**
654 655
 * __iommu_dma_mmap - Map a buffer into provided user VMA
 * @pages: Array representing buffer from __iommu_dma_alloc()
656 657 658 659 660 661
 * @size: Size of buffer in bytes
 * @vma: VMA describing requested userspace mapping
 *
 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 * for verifying the correct size and protection of @vma beforehand.
 */
662 663
static int __iommu_dma_mmap(struct page **pages, size_t size,
		struct vm_area_struct *vma)
664
{
665
	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
666 667
}

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_cpu(dev, phys, size, dir);
}

static void iommu_dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	arch_sync_dma_for_device(dev, phys, size, dir);
}

static void iommu_dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{
	phys_addr_t phys = page_to_phys(page) + offset;
	bool coherent = dev_is_dma_coherent(dev);
726
	int prot = dma_info_to_prot(dir, coherent, attrs);
727 728
	dma_addr_t dma_handle;

729
	dma_handle =__iommu_dma_map(dev, phys, size, prot);
730 731 732 733 734 735 736 737 738 739 740
	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    dma_handle != DMA_MAPPING_ERROR)
		arch_sync_dma_for_device(dev, phys, size, dir);
	return dma_handle;
}

static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
741
	__iommu_dma_unmap(dev, dma_handle, size);
742 743
}

744 745
/*
 * Prepare a successfully-mapped scatterlist to give back to the caller.
746 747 748 749
 *
 * At this point the segments are already laid out by iommu_dma_map_sg() to
 * avoid individually crossing any boundaries, so we merely need to check a
 * segment's start address to avoid concatenating across one.
750 751 752 753
 */
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
		dma_addr_t dma_addr)
{
754 755 756 757
	struct scatterlist *s, *cur = sg;
	unsigned long seg_mask = dma_get_seg_boundary(dev);
	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
	int i, count = 0;
758 759

	for_each_sg(sg, s, nents, i) {
760 761
		/* Restore this segment's original unaligned fields first */
		unsigned int s_iova_off = sg_dma_address(s);
762
		unsigned int s_length = sg_dma_len(s);
763
		unsigned int s_iova_len = s->length;
764

765
		s->offset += s_iova_off;
766
		s->length = s_length;
767
		sg_dma_address(s) = DMA_MAPPING_ERROR;
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
		sg_dma_len(s) = 0;

		/*
		 * Now fill in the real DMA data. If...
		 * - there is a valid output segment to append to
		 * - and this segment starts on an IOVA page boundary
		 * - but doesn't fall at a segment boundary
		 * - and wouldn't make the resulting output segment too long
		 */
		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
		    (cur_len + s_length <= max_len)) {
			/* ...then concatenate it with the previous one */
			cur_len += s_length;
		} else {
			/* Otherwise start the next output segment */
			if (i > 0)
				cur = sg_next(cur);
			cur_len = s_length;
			count++;

			sg_dma_address(cur) = dma_addr + s_iova_off;
		}

		sg_dma_len(cur) = cur_len;
		dma_addr += s_iova_len;

		if (s_length + s_iova_off < s_iova_len)
			cur_len = 0;
796
	}
797
	return count;
798 799 800 801 802 803 804 805 806 807 808 809
}

/*
 * If mapping failed, then just restore the original list,
 * but making sure the DMA fields are invalidated.
 */
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
810
		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
811
			s->offset += sg_dma_address(s);
812 813
		if (sg_dma_len(s))
			s->length = sg_dma_len(s);
814
		sg_dma_address(s) = DMA_MAPPING_ERROR;
815 816 817 818 819 820 821 822 823 824 825
		sg_dma_len(s) = 0;
	}
}

/*
 * The DMA API client is passing in a scatterlist which could describe
 * any old buffer layout, but the IOMMU API requires everything to be
 * aligned to IOMMU pages. Hence the need for this complicated bit of
 * impedance-matching, to be able to hand off a suitably-aligned list,
 * but still preserve the original offsets and sizes for the caller.
 */
826 827
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
828
{
829
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
830 831
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
832
	struct scatterlist *s, *prev = NULL;
833
	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
834
	dma_addr_t iova;
835
	size_t iova_len = 0;
836
	unsigned long mask = dma_get_seg_boundary(dev);
837 838
	int i;

839 840 841
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);

842 843 844 845
	/*
	 * Work out how much IOVA space we need, and align the segments to
	 * IOVA granules for the IOMMU driver to handle. With some clever
	 * trickery we can modify the list in-place, but reversibly, by
846
	 * stashing the unaligned parts in the as-yet-unused DMA fields.
847 848
	 */
	for_each_sg(sg, s, nents, i) {
849
		size_t s_iova_off = iova_offset(iovad, s->offset);
850
		size_t s_length = s->length;
851
		size_t pad_len = (mask - iova_len + 1) & mask;
852

853
		sg_dma_address(s) = s_iova_off;
854
		sg_dma_len(s) = s_length;
855 856
		s->offset -= s_iova_off;
		s_length = iova_align(iovad, s_length + s_iova_off);
857 858 859
		s->length = s_length;

		/*
860 861 862 863 864 865 866 867 868 869 870
		 * Due to the alignment of our single IOVA allocation, we can
		 * depend on these assumptions about the segment boundary mask:
		 * - If mask size >= IOVA size, then the IOVA range cannot
		 *   possibly fall across a boundary, so we don't care.
		 * - If mask size < IOVA size, then the IOVA range must start
		 *   exactly on a boundary, therefore we can lay things out
		 *   based purely on segment lengths without needing to know
		 *   the actual addresses beforehand.
		 * - The mask must be a power of 2, so pad_len == 0 if
		 *   iova_len == 0, thus we cannot dereference prev the first
		 *   time through here (i.e. before it has a meaningful value).
871
		 */
872
		if (pad_len && pad_len < s_length - 1) {
873 874 875 876 877 878 879 880
			prev->length += pad_len;
			iova_len += pad_len;
		}

		iova_len += s_length;
		prev = s;
	}

881
	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
882 883 884 885 886 887 888
	if (!iova)
		goto out_restore_sg;

	/*
	 * We'll leave any physical concatenation to the IOMMU driver's
	 * implementation - it knows better than we do.
	 */
889
	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
890 891
		goto out_free_iova;

892
	return __finalise_sg(dev, sg, nents, iova);
893 894

out_free_iova:
895
	iommu_dma_free_iova(cookie, iova, iova_len);
896 897 898 899 900
out_restore_sg:
	__invalidate_sg(sg, nents);
	return 0;
}

901 902
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
903
{
904 905 906
	dma_addr_t start, end;
	struct scatterlist *tmp;
	int i;
907 908 909 910

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);

911 912 913 914
	/*
	 * The scatterlist segments are mapped into a single
	 * contiguous IOVA allocation, so this is incredibly easy.
	 */
915 916 917 918 919 920 921
	start = sg_dma_address(sg);
	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
		if (sg_dma_len(tmp) == 0)
			break;
		sg = tmp;
	}
	end = sg_dma_address(sg) + sg_dma_len(sg);
922
	__iommu_dma_unmap(dev, start, end - start);
923 924
}

925
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
926 927 928
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	return __iommu_dma_map(dev, phys, size,
929
			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
930 931
}

932
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
933 934
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
935
	__iommu_dma_unmap(dev, handle, size);
936 937
}

938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t handle, unsigned long attrs)
{
	size_t alloc_size = PAGE_ALIGN(size);
	int count = alloc_size >> PAGE_SHIFT;
	struct page *page = NULL, **pages = NULL;

	__iommu_dma_unmap(dev, handle, size);

	/* Non-coherent atomic allocation? Easy */
	if (dma_free_from_pool(cpu_addr, alloc_size))
		return;

	if (is_vmalloc_addr(cpu_addr)) {
		/*
		 * If it the address is remapped, then it's either non-coherent
		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
		 */
		pages = __iommu_dma_get_pages(cpu_addr);
		if (!pages)
			page = vmalloc_to_page(cpu_addr);
		dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
	} else {
		/* Lowmem means a coherent atomic or CMA allocation */
		page = virt_to_page(cpu_addr);
	}

	if (pages)
		__iommu_dma_free_pages(pages, count);
	if (page && !dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(alloc_size));
}

971 972 973 974 975 976
static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t iosize = size;
977
	struct page *page = NULL;
978 979 980 981 982
	void *addr;

	size = PAGE_ALIGN(size);
	gfp |= __GFP_ZERO;

983 984 985 986
	if (gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);

987 988
	if (!gfpflags_allow_blocking(gfp) && !coherent) {
		addr = dma_alloc_from_pool(size, &page, gfp);
989 990 991
		if (!addr)
			return NULL;

992 993
		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
					  ioprot);
994
		if (*handle == DMA_MAPPING_ERROR) {
995
			dma_free_from_pool(addr, size);
996 997
			return NULL;
		}
998
		return addr;
999
	}
1000

1001 1002 1003 1004 1005 1006
	if (gfpflags_allow_blocking(gfp))
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size),
						 gfp & __GFP_NOWARN);
	if (!page)
		page = alloc_pages(gfp, get_order(size));
1007 1008 1009 1010 1011 1012 1013
	if (!page)
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
	if (*handle == DMA_MAPPING_ERROR)
		goto out_free_pages;

1014 1015
	if (!coherent || PageHighMem(page)) {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
1016

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
		addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
				__builtin_return_address(0));
		if (!addr)
			goto out_unmap;

		if (!coherent)
			arch_dma_prep_coherent(page, iosize);
	} else {
		addr = page_address(page);
	}
1027
	memset(addr, 0, size);
1028
	return addr;
1029 1030 1031
out_unmap:
	__iommu_dma_unmap(dev, *handle, iosize);
out_free_pages:
1032 1033
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));
1034
	return NULL;
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
}

static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = vma_pages(vma);
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;
1061
	struct page **pages;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	int ret;

	vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
		return -ENXIO;

	if (!is_vmalloc_addr(cpu_addr)) {
		unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
		return __iommu_dma_mmap_pfn(vma, pfn, size);
	}

	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		return __iommu_dma_mmap_pfn(vma, pfn, size);
	}

1086 1087
	pages = __iommu_dma_get_pages(cpu_addr);
	if (!pages)
1088
		return -ENXIO;
1089
	return __iommu_dma_mmap(pages, size, vma);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
}

static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page,
		size_t size)
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return ret;
}

static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1107
	struct page **pages;
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	if (!is_vmalloc_addr(cpu_addr)) {
		struct page *page = virt_to_page(cpu_addr);
		return __iommu_dma_get_sgtable_page(sgt, page, size);
	}

	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		return __iommu_dma_get_sgtable_page(sgt, page, size);
	}

1123 1124
	pages = __iommu_dma_get_pages(cpu_addr);
	if (!pages)
1125
		return -ENXIO;
1126
	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
					 GFP_KERNEL);
}

static const struct dma_map_ops iommu_dma_ops = {
	.alloc			= iommu_dma_alloc,
	.free			= iommu_dma_free,
	.mmap			= iommu_dma_mmap,
	.get_sgtable		= iommu_dma_get_sgtable,
	.map_page		= iommu_dma_map_page,
	.unmap_page		= iommu_dma_unmap_page,
	.map_sg			= iommu_dma_map_sg,
	.unmap_sg		= iommu_dma_unmap_sg,
	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
	.sync_single_for_device	= iommu_dma_sync_single_for_device,
	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
	.map_resource		= iommu_dma_map_resource,
	.unmap_resource		= iommu_dma_unmap_resource,
};

/*
 * The IOMMU core code allocates the default DMA domain, which the underlying
 * IOMMU driver needs to support via the dma-iommu layer.
 */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	if (!domain)
		goto out_err;

	/*
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
	 */
	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;
		dev->dma_ops = &iommu_dma_ops;
	}

	return;
out_err:
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
		 dev_name(dev));
}

1174 1175 1176 1177 1178
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
		phys_addr_t msi_addr, struct iommu_domain *domain)
{
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iommu_dma_msi_page *msi_page;
1179
	dma_addr_t iova;
1180
	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
R
Robin Murphy 已提交
1181
	size_t size = cookie_msi_granule(cookie);
1182

R
Robin Murphy 已提交
1183
	msi_addr &= ~(phys_addr_t)(size - 1);
1184 1185 1186 1187 1188 1189 1190 1191
	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
		if (msi_page->phys == msi_addr)
			return msi_page;

	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
	if (!msi_page)
		return NULL;

1192
	iova = __iommu_dma_map(dev, msi_addr, size, prot);
1193
	if (iova == DMA_MAPPING_ERROR)
1194
		goto out_free_page;
1195 1196

	INIT_LIST_HEAD(&msi_page->list);
1197 1198
	msi_page->phys = msi_addr;
	msi_page->iova = iova;
1199 1200 1201 1202 1203 1204 1205 1206
	list_add(&msi_page->list, &cookie->msi_page_list);
	return msi_page;

out_free_page:
	kfree(msi_page);
	return NULL;
}

1207
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1208
{
1209
	struct device *dev = msi_desc_to_dev(desc);
1210 1211 1212 1213 1214
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	struct iommu_dma_cookie *cookie;
	struct iommu_dma_msi_page *msi_page;
	unsigned long flags;

1215 1216 1217 1218
	if (!domain || !domain->iova_cookie) {
		desc->iommu_cookie = NULL;
		return 0;
	}
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230

	cookie = domain->iova_cookie;

	/*
	 * We disable IRQs to rule out a possible inversion against
	 * irq_desc_lock if, say, someone tries to retarget the affinity
	 * of an MSI from within an IPI handler.
	 */
	spin_lock_irqsave(&cookie->msi_lock, flags);
	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
	spin_unlock_irqrestore(&cookie->msi_lock, flags);

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	msi_desc_set_iommu_cookie(desc, msi_page);

	if (!msi_page)
		return -ENOMEM;
	return 0;
}

void iommu_dma_compose_msi_msg(struct msi_desc *desc,
			       struct msi_msg *msg)
{
	struct device *dev = msi_desc_to_dev(desc);
	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	const struct iommu_dma_msi_page *msi_page;

	msi_page = msi_desc_get_iommu_cookie(desc);

	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
		return;

	msg->address_hi = upper_32_bits(msi_page->iova);
	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
	msg->address_lo += lower_32_bits(msi_page->iova);
1253
}
1254 1255 1256 1257 1258 1259

static int iommu_dma_init(void)
{
	return iova_cache_get();
}
arch_initcall(iommu_dma_init);