dma-mapping.c 24.6 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/gfp.h>
A
Arnd Bergmann 已提交
21
#include <linux/acpi.h>
22
#include <linux/bootmem.h>
23
#include <linux/cache.h>
C
Catalin Marinas 已提交
24 25
#include <linux/export.h>
#include <linux/slab.h>
26
#include <linux/genalloc.h>
C
Catalin Marinas 已提交
27
#include <linux/dma-mapping.h>
L
Laura Abbott 已提交
28
#include <linux/dma-contiguous.h>
C
Catalin Marinas 已提交
29 30
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
31
#include <linux/pci.h>
C
Catalin Marinas 已提交
32 33 34

#include <asm/cacheflush.h>

35
static int swiotlb __ro_after_init;
36

37
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
38 39
				 bool coherent)
{
40
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 42 43 44
		return pgprot_writecombine(prot);
	return prot;
}

45 46 47
static struct gen_pool *atomic_pool;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
48
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49 50 51 52 53 54 55 56

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

57
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
73
		memset(ptr, 0, size);
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
	}

	return ptr;
}

static bool __in_atomic_pool(void *start, size_t size)
{
	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}

static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}

94 95
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
96
				  unsigned long attrs)
C
Catalin Marinas 已提交
97
{
98
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
C
Catalin Marinas 已提交
99
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
100
		flags |= GFP_DMA;
101
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
L
Laura Abbott 已提交
102
		struct page *page;
103
		void *addr;
L
Laura Abbott 已提交
104 105

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
106
						 get_order(size), flags);
L
Laura Abbott 已提交
107 108 109 110
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
111
		addr = page_address(page);
112
		memset(addr, 0, size);
113
		return addr;
L
Laura Abbott 已提交
114 115 116
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
C
Catalin Marinas 已提交
117 118
}

119 120
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
121
				unsigned long attrs)
C
Catalin Marinas 已提交
122
{
123 124 125
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

126

127
	freed = dma_release_from_contiguous(dev,
L
Laura Abbott 已提交
128 129
					phys_to_page(paddr),
					size >> PAGE_SHIFT);
130
	if (!freed)
L
Laura Abbott 已提交
131
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
C
Catalin Marinas 已提交
132 133
}

134 135
static void *__dma_alloc(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flags,
136
			 unsigned long attrs)
137
{
138
	struct page *page;
139
	void *ptr, *coherent_ptr;
140
	bool coherent = is_device_dma_coherent(dev);
141
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
142 143

	size = PAGE_ALIGN(size);
144

145
	if (!coherent && !gfpflags_allow_blocking(flags)) {
146
		struct page *page = NULL;
147
		void *addr = __alloc_from_pool(size, &page, flags);
148 149 150 151 152 153

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}
154 155 156 157 158

	ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
	if (!ptr)
		goto no_mem;

159 160 161 162
	/* no need for non-cacheable mapping if coherent */
	if (coherent)
		return ptr;

163
	/* remove any dirty cache lines on the kernel alias */
164
	__dma_flush_area(ptr, size);
165 166 167

	/* create a coherent mapping */
	page = virt_to_page(ptr);
168
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
169
						   prot, NULL);
170 171 172 173 174 175 176 177 178 179 180
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
	__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
	return NULL;
}

181 182
static void __dma_free(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle,
183
		       unsigned long attrs)
184 185 186
{
	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));

187 188
	size = PAGE_ALIGN(size);

189 190 191 192 193
	if (!is_device_dma_coherent(dev)) {
		if (__free_from_pool(vaddr, size))
			return;
		vunmap(vaddr);
	}
194 195 196 197 198 199
	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}

static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir,
200
				     unsigned long attrs)
201 202 203 204
{
	dma_addr_t dev_addr;

	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
205 206
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
207
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
208 209 210 211 212 213 214

	return dev_addr;
}


static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
				 size_t size, enum dma_data_direction dir,
215
				 unsigned long attrs)
216
{
217 218
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
219
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
220 221 222 223 224
	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}

static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				  int nelems, enum dma_data_direction dir,
225
				  unsigned long attrs)
226 227 228 229 230
{
	struct scatterlist *sg;
	int i, ret;

	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
231 232
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
233 234 235
		for_each_sg(sgl, sg, ret, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
236 237 238 239 240 241 242

	return ret;
}

static void __swiotlb_unmap_sg_attrs(struct device *dev,
				     struct scatterlist *sgl, int nelems,
				     enum dma_data_direction dir,
243
				     unsigned long attrs)
244 245 246 247
{
	struct scatterlist *sg;
	int i;

248 249
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
250 251 252
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
253 254 255 256 257 258 259
	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}

static void __swiotlb_sync_single_for_cpu(struct device *dev,
					  dma_addr_t dev_addr, size_t size,
					  enum dma_data_direction dir)
{
260 261
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
262 263 264 265 266 267 268 269
	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}

static void __swiotlb_sync_single_for_device(struct device *dev,
					     dma_addr_t dev_addr, size_t size,
					     enum dma_data_direction dir)
{
	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
270 271
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
272 273 274 275 276 277 278 279 280
}

static void __swiotlb_sync_sg_for_cpu(struct device *dev,
				      struct scatterlist *sgl, int nelems,
				      enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

281 282 283 284
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
285 286 287 288 289 290 291 292 293 294 295
	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}

static void __swiotlb_sync_sg_for_device(struct device *dev,
					 struct scatterlist *sgl, int nelems,
					 enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
296 297 298 299
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
300 301
}

302 303
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
					PAGE_SHIFT;
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static int __swiotlb_mmap(struct device *dev,
			  struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
			  unsigned long attrs)
{
	int ret;
	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	return __swiotlb_mmap_pfn(vma, pfn, size);
}

static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
340 341 342 343
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
344
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
345 346 347 348

	return ret;
}

349 350 351 352 353 354 355 356 357
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t handle, size_t size,
				 unsigned long attrs)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));

	return __swiotlb_get_sgtable_page(sgt, page, size);
}

358 359 360 361 362 363 364
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	if (swiotlb)
		return swiotlb_dma_supported(hwdev, mask);
	return 1;
}

365 366 367 368 369 370 371
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
{
	if (swiotlb)
		return swiotlb_dma_mapping_error(hwdev, addr);
	return 0;
}

372
static const struct dma_map_ops swiotlb_dma_ops = {
373 374 375
	.alloc = __dma_alloc,
	.free = __dma_free,
	.mmap = __swiotlb_mmap,
376
	.get_sgtable = __swiotlb_get_sgtable,
377 378 379 380 381 382 383 384
	.map_page = __swiotlb_map_page,
	.unmap_page = __swiotlb_unmap_page,
	.map_sg = __swiotlb_map_sg_attrs,
	.unmap_sg = __swiotlb_unmap_sg_attrs,
	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
	.sync_single_for_device = __swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
385
	.dma_supported = __swiotlb_dma_supported,
386
	.mapping_error = __swiotlb_dma_mapping_error,
387
};
C
Catalin Marinas 已提交
388

389 390 391 392 393 394 395 396 397 398
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
399
						 pool_size_order, GFP_KERNEL);
400 401 402 403 404 405 406 407
	else
		page = alloc_pages(GFP_DMA, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
408
		__dma_flush_area(page_addr, atomic_pool_size);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  (void *)PAGE_SHIFT);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}

450 451 452 453 454 455
/********************************************
 * The following APIs are for dummy DMA ops *
 ********************************************/

static void *__dummy_alloc(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flags,
456
			   unsigned long attrs)
457 458 459 460 461 462
{
	return NULL;
}

static void __dummy_free(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle,
463
			 unsigned long attrs)
464 465 466 467 468 469
{
}

static int __dummy_mmap(struct device *dev,
			struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
470
			unsigned long attrs)
471 472 473 474 475 476 477
{
	return -ENXIO;
}

static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
478
				   unsigned long attrs)
479
{
C
Christoph Hellwig 已提交
480
	return 0;
481 482 483 484
}

static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
485
			       unsigned long attrs)
486 487 488 489 490
{
}

static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
			  int nelems, enum dma_data_direction dir,
491
			  unsigned long attrs)
492 493 494 495 496 497 498
{
	return 0;
}

static void __dummy_unmap_sg(struct device *dev,
			     struct scatterlist *sgl, int nelems,
			     enum dma_data_direction dir,
499
			     unsigned long attrs)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
{
}

static void __dummy_sync_single(struct device *dev,
				dma_addr_t dev_addr, size_t size,
				enum dma_data_direction dir)
{
}

static void __dummy_sync_sg(struct device *dev,
			    struct scatterlist *sgl, int nelems,
			    enum dma_data_direction dir)
{
}

static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return 1;
}

static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
	return 0;
}

525
const struct dma_map_ops dummy_dma_ops = {
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	.alloc                  = __dummy_alloc,
	.free                   = __dummy_free,
	.mmap                   = __dummy_mmap,
	.map_page               = __dummy_map_page,
	.unmap_page             = __dummy_unmap_page,
	.map_sg                 = __dummy_map_sg,
	.unmap_sg               = __dummy_unmap_sg,
	.sync_single_for_cpu    = __dummy_sync_single,
	.sync_single_for_device = __dummy_sync_single,
	.sync_sg_for_cpu        = __dummy_sync_sg,
	.sync_sg_for_device     = __dummy_sync_sg,
	.mapping_error          = __dummy_mapping_error,
	.dma_supported          = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);

542
static int __init arm64_dma_init(void)
C
Catalin Marinas 已提交
543
{
544 545
	if (swiotlb_force == SWIOTLB_FORCE ||
	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
546 547
		swiotlb = 1;

A
Arnd Bergmann 已提交
548
	return atomic_pool_init();
549 550
}
arch_initcall(arm64_dma_init);
C
Catalin Marinas 已提交
551 552 553 554 555 556 557 558 559

#define PREALLOC_DMA_DEBUG_ENTRIES	4096

static int __init dma_debug_do_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(dma_debug_do_init);
R
Robin Murphy 已提交
560 561 562 563 564 565 566 567 568 569


#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
570
	__dma_flush_area(virt, PAGE_SIZE);
R
Robin Murphy 已提交
571 572 573 574
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
575
				 unsigned long attrs)
R
Robin Murphy 已提交
576 577
{
	bool coherent = is_device_dma_coherent(dev);
578
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
579
	size_t iosize = size;
R
Robin Murphy 已提交
580 581 582 583
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;
584 585 586

	size = PAGE_ALIGN(size);

R
Robin Murphy 已提交
587 588 589 590 591 592
	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

593
	if (!gfpflags_allow_blocking(gfp)) {
R
Robin Murphy 已提交
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

609
		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
R
Robin Murphy 已提交
610 611 612 613 614 615 616
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size), gfp);
		if (!page)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (iommu_dma_mapping_error(dev, *handle)) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
		if (!coherent)
			__dma_flush_area(page_to_virt(page), iosize);

		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot,
						   __builtin_return_address(0));
		if (!addr) {
			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
		}
	} else {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page **pages;

		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
			iommu_dma_free(dev, pages, iosize, handle);
R
Robin Murphy 已提交
656 657 658 659 660
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
661
			       dma_addr_t handle, unsigned long attrs)
R
Robin Murphy 已提交
662
{
663 664 665
	size_t iosize = size;

	size = PAGE_ALIGN(size);
R
Robin Murphy 已提交
666
	/*
667 668
	 * @cpu_addr will be one of 4 things depending on how it was allocated:
	 * - A remapped array of pages for contiguous allocations.
R
Robin Murphy 已提交
669 670 671 672 673 674 675 676 677
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (__in_atomic_pool(cpu_addr, size)) {
678
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
679
		__free_from_pool(cpu_addr, size);
680 681 682 683 684 685
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		struct page *page = vmalloc_to_page(cpu_addr);

		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
R
Robin Murphy 已提交
686 687 688 689 690
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
691
		iommu_dma_free(dev, area->pages, iosize, &handle);
R
Robin Murphy 已提交
692 693
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
694
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
695 696 697 698 699 700
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
701
			      unsigned long attrs)
R
Robin Murphy 已提交
702 703 704 705 706 707 708 709 710 711
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

712 713 714 715 716 717 718 719 720
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}

R
Robin Murphy 已提交
721 722 723 724 725 726 727 728 729
	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
730
			       size_t size, unsigned long attrs)
R
Robin Murphy 已提交
731 732 733 734
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

735 736 737 738 739 740 741 742 743
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}

R
Robin Murphy 已提交
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_unmap_area(phys_to_virt(phys), size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_map_area(phys_to_virt(phys), size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
780
				   unsigned long attrs)
R
Robin Murphy 已提交
781 782
{
	bool coherent = is_device_dma_coherent(dev);
783
	int prot = dma_info_to_prot(dir, coherent, attrs);
R
Robin Murphy 已提交
784 785 786
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!iommu_dma_mapping_error(dev, dev_addr) &&
787
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
788 789 790 791 792 793 794
		__iommu_sync_single_for_device(dev, dev_addr, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
795
			       unsigned long attrs)
R
Robin Murphy 已提交
796
{
797
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_unmap_area(sg_virt(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_map_area(sg_virt(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
833
				unsigned long attrs)
R
Robin Murphy 已提交
834 835 836
{
	bool coherent = is_device_dma_coherent(dev);

837
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
838 839 840
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
841
				dma_info_to_prot(dir, coherent, attrs));
R
Robin Murphy 已提交
842 843 844 845 846
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
847
				   unsigned long attrs)
R
Robin Murphy 已提交
848
{
849
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
850 851 852 853 854
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

855
static const struct dma_map_ops iommu_dma_ops = {
R
Robin Murphy 已提交
856 857 858 859 860 861 862 863 864 865 866 867
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
868 869
	.map_resource = iommu_dma_map_resource,
	.unmap_resource = iommu_dma_unmap_resource,
R
Robin Murphy 已提交
870 871 872
	.mapping_error = iommu_dma_mapping_error,
};

873 874 875 876 877
static int __init __iommu_dma_init(void)
{
	return iommu_dma_init();
}
arch_initcall(__iommu_dma_init);
R
Robin Murphy 已提交
878

879 880
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
R
Robin Murphy 已提交
881
{
882 883 884 885
	struct iommu_domain *domain;

	if (!ops)
		return;
R
Robin Murphy 已提交
886 887

	/*
888 889
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
R
Robin Murphy 已提交
890
	 */
891 892
	domain = iommu_get_domain_for_dev(dev);

893 894 895 896 897 898 899
	if (!domain)
		goto out_err;

	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;

900
		dev->dma_ops = &iommu_dma_ops;
R
Robin Murphy 已提交
901 902
	}

903 904
	return;

905
out_err:
906
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
907
		 dev_name(dev));
R
Robin Murphy 已提交
908 909
}

R
Robin Murphy 已提交
910 911
void arch_teardown_dma_ops(struct device *dev)
{
912
	dev->dma_ops = NULL;
R
Robin Murphy 已提交
913 914
}

R
Robin Murphy 已提交
915 916 917
#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
918
				  const struct iommu_ops *iommu)
R
Robin Murphy 已提交
919 920 921 922
{ }

#endif  /* CONFIG_IOMMU_DMA */

R
Robin Murphy 已提交
923
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
924
			const struct iommu_ops *iommu, bool coherent)
R
Robin Murphy 已提交
925
{
926 927
	if (!dev->dma_ops)
		dev->dma_ops = &swiotlb_dma_ops;
R
Robin Murphy 已提交
928 929 930

	dev->archdata.dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
931 932 933 934 935 936 937

#ifdef CONFIG_XEN
	if (xen_initial_domain()) {
		dev->archdata.dev_dma_ops = dev->dma_ops;
		dev->dma_ops = xen_dma_ops;
	}
#endif
R
Robin Murphy 已提交
938
}