dma-mapping.c 24.9 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/gfp.h>
A
Arnd Bergmann 已提交
21
#include <linux/acpi.h>
22
#include <linux/bootmem.h>
23
#include <linux/cache.h>
C
Catalin Marinas 已提交
24 25
#include <linux/export.h>
#include <linux/slab.h>
26
#include <linux/genalloc.h>
C
Catalin Marinas 已提交
27
#include <linux/dma-mapping.h>
L
Laura Abbott 已提交
28
#include <linux/dma-contiguous.h>
C
Catalin Marinas 已提交
29 30
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
31
#include <linux/pci.h>
C
Catalin Marinas 已提交
32 33 34

#include <asm/cacheflush.h>

35
static int swiotlb __ro_after_init;
36

37
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
38 39
				 bool coherent)
{
40
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 42 43 44
		return pgprot_writecombine(prot);
	return prot;
}

45 46 47
static struct gen_pool *atomic_pool;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
48
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49 50 51 52 53 54 55 56

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

57
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
73
		memset(ptr, 0, size);
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
	}

	return ptr;
}

static bool __in_atomic_pool(void *start, size_t size)
{
	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}

static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}

94 95
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
96
				  unsigned long attrs)
C
Catalin Marinas 已提交
97
{
98 99 100 101 102
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return NULL;
	}

103
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
C
Catalin Marinas 已提交
104
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
105
		flags |= GFP_DMA;
106
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
L
Laura Abbott 已提交
107
		struct page *page;
108
		void *addr;
L
Laura Abbott 已提交
109 110

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
111
						 get_order(size), flags);
L
Laura Abbott 已提交
112 113 114 115
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
116
		addr = page_address(page);
117
		memset(addr, 0, size);
118
		return addr;
L
Laura Abbott 已提交
119 120 121
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
C
Catalin Marinas 已提交
122 123
}

124 125
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
126
				unsigned long attrs)
C
Catalin Marinas 已提交
127
{
128 129 130
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

131 132 133 134 135
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return;
	}

136
	freed = dma_release_from_contiguous(dev,
L
Laura Abbott 已提交
137 138
					phys_to_page(paddr),
					size >> PAGE_SHIFT);
139
	if (!freed)
L
Laura Abbott 已提交
140
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
C
Catalin Marinas 已提交
141 142
}

143 144
static void *__dma_alloc(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flags,
145
			 unsigned long attrs)
146
{
147
	struct page *page;
148
	void *ptr, *coherent_ptr;
149
	bool coherent = is_device_dma_coherent(dev);
150
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
151 152

	size = PAGE_ALIGN(size);
153

154
	if (!coherent && !gfpflags_allow_blocking(flags)) {
155
		struct page *page = NULL;
156
		void *addr = __alloc_from_pool(size, &page, flags);
157 158 159 160 161 162

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}
163 164 165 166 167

	ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
	if (!ptr)
		goto no_mem;

168 169 170 171
	/* no need for non-cacheable mapping if coherent */
	if (coherent)
		return ptr;

172
	/* remove any dirty cache lines on the kernel alias */
173
	__dma_flush_area(ptr, size);
174 175 176

	/* create a coherent mapping */
	page = virt_to_page(ptr);
177
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
178
						   prot, NULL);
179 180 181 182 183 184 185 186
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
	__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
187
	*dma_handle = DMA_ERROR_CODE;
188 189 190
	return NULL;
}

191 192
static void __dma_free(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle,
193
		       unsigned long attrs)
194 195 196
{
	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));

197 198
	size = PAGE_ALIGN(size);

199 200 201 202 203
	if (!is_device_dma_coherent(dev)) {
		if (__free_from_pool(vaddr, size))
			return;
		vunmap(vaddr);
	}
204 205 206 207 208 209
	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}

static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir,
210
				     unsigned long attrs)
211 212 213 214
{
	dma_addr_t dev_addr;

	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
215 216
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
217
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
218 219 220 221 222 223 224

	return dev_addr;
}


static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
				 size_t size, enum dma_data_direction dir,
225
				 unsigned long attrs)
226
{
227 228
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
229
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
230 231 232 233 234
	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}

static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				  int nelems, enum dma_data_direction dir,
235
				  unsigned long attrs)
236 237 238 239 240
{
	struct scatterlist *sg;
	int i, ret;

	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
241 242
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
243 244 245
		for_each_sg(sgl, sg, ret, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
246 247 248 249 250 251 252

	return ret;
}

static void __swiotlb_unmap_sg_attrs(struct device *dev,
				     struct scatterlist *sgl, int nelems,
				     enum dma_data_direction dir,
253
				     unsigned long attrs)
254 255 256 257
{
	struct scatterlist *sg;
	int i;

258 259
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
260 261 262
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
263 264 265 266 267 268 269
	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}

static void __swiotlb_sync_single_for_cpu(struct device *dev,
					  dma_addr_t dev_addr, size_t size,
					  enum dma_data_direction dir)
{
270 271
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
272 273 274 275 276 277 278 279
	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}

static void __swiotlb_sync_single_for_device(struct device *dev,
					     dma_addr_t dev_addr, size_t size,
					     enum dma_data_direction dir)
{
	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
280 281
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
282 283 284 285 286 287 288 289 290
}

static void __swiotlb_sync_sg_for_cpu(struct device *dev,
				      struct scatterlist *sgl, int nelems,
				      enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

291 292 293 294
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
295 296 297 298 299 300 301 302 303 304 305
	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}

static void __swiotlb_sync_sg_for_device(struct device *dev,
					 struct scatterlist *sgl, int nelems,
					 enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
306 307 308 309
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
310 311
}

312 313
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
					PAGE_SHIFT;
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
static int __swiotlb_mmap(struct device *dev,
			  struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
			  unsigned long attrs)
{
	int ret;
	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	return __swiotlb_mmap_pfn(vma, pfn, size);
}

static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
350 351 352 353
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
354
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
355 356 357 358

	return ret;
}

359 360 361 362 363 364 365 366 367
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t handle, size_t size,
				 unsigned long attrs)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));

	return __swiotlb_get_sgtable_page(sgt, page, size);
}

368 369 370 371 372 373 374
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	if (swiotlb)
		return swiotlb_dma_supported(hwdev, mask);
	return 1;
}

375 376 377 378 379 380 381
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
{
	if (swiotlb)
		return swiotlb_dma_mapping_error(hwdev, addr);
	return 0;
}

382
static const struct dma_map_ops swiotlb_dma_ops = {
383 384 385
	.alloc = __dma_alloc,
	.free = __dma_free,
	.mmap = __swiotlb_mmap,
386
	.get_sgtable = __swiotlb_get_sgtable,
387 388 389 390 391 392 393 394
	.map_page = __swiotlb_map_page,
	.unmap_page = __swiotlb_unmap_page,
	.map_sg = __swiotlb_map_sg_attrs,
	.unmap_sg = __swiotlb_unmap_sg_attrs,
	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
	.sync_single_for_device = __swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
395
	.dma_supported = __swiotlb_dma_supported,
396
	.mapping_error = __swiotlb_dma_mapping_error,
397
};
C
Catalin Marinas 已提交
398

399 400 401 402 403 404 405 406 407 408
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
409
						 pool_size_order, GFP_KERNEL);
410 411 412 413 414 415 416 417
	else
		page = alloc_pages(GFP_DMA, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
418
		__dma_flush_area(page_addr, atomic_pool_size);
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  (void *)PAGE_SHIFT);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}

460 461 462 463 464 465
/********************************************
 * The following APIs are for dummy DMA ops *
 ********************************************/

static void *__dummy_alloc(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flags,
466
			   unsigned long attrs)
467 468 469 470 471 472
{
	return NULL;
}

static void __dummy_free(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle,
473
			 unsigned long attrs)
474 475 476 477 478 479
{
}

static int __dummy_mmap(struct device *dev,
			struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
480
			unsigned long attrs)
481 482 483 484 485 486 487
{
	return -ENXIO;
}

static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
488
				   unsigned long attrs)
489 490 491 492 493 494
{
	return DMA_ERROR_CODE;
}

static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
495
			       unsigned long attrs)
496 497 498 499 500
{
}

static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
			  int nelems, enum dma_data_direction dir,
501
			  unsigned long attrs)
502 503 504 505 506 507 508
{
	return 0;
}

static void __dummy_unmap_sg(struct device *dev,
			     struct scatterlist *sgl, int nelems,
			     enum dma_data_direction dir,
509
			     unsigned long attrs)
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
{
}

static void __dummy_sync_single(struct device *dev,
				dma_addr_t dev_addr, size_t size,
				enum dma_data_direction dir)
{
}

static void __dummy_sync_sg(struct device *dev,
			    struct scatterlist *sgl, int nelems,
			    enum dma_data_direction dir)
{
}

static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return 1;
}

static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
	return 0;
}

535
const struct dma_map_ops dummy_dma_ops = {
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	.alloc                  = __dummy_alloc,
	.free                   = __dummy_free,
	.mmap                   = __dummy_mmap,
	.map_page               = __dummy_map_page,
	.unmap_page             = __dummy_unmap_page,
	.map_sg                 = __dummy_map_sg,
	.unmap_sg               = __dummy_unmap_sg,
	.sync_single_for_cpu    = __dummy_sync_single,
	.sync_single_for_device = __dummy_sync_single,
	.sync_sg_for_cpu        = __dummy_sync_sg,
	.sync_sg_for_device     = __dummy_sync_sg,
	.mapping_error          = __dummy_mapping_error,
	.dma_supported          = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);

552
static int __init arm64_dma_init(void)
C
Catalin Marinas 已提交
553
{
554 555
	if (swiotlb_force == SWIOTLB_FORCE ||
	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
556 557
		swiotlb = 1;

A
Arnd Bergmann 已提交
558
	return atomic_pool_init();
559 560
}
arch_initcall(arm64_dma_init);
C
Catalin Marinas 已提交
561 562 563 564 565 566 567 568 569

#define PREALLOC_DMA_DEBUG_ENTRIES	4096

static int __init dma_debug_do_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(dma_debug_do_init);
R
Robin Murphy 已提交
570 571 572 573 574 575 576 577 578 579


#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
580
	__dma_flush_area(virt, PAGE_SIZE);
R
Robin Murphy 已提交
581 582 583 584
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
585
				 unsigned long attrs)
R
Robin Murphy 已提交
586 587
{
	bool coherent = is_device_dma_coherent(dev);
588
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
589
	size_t iosize = size;
R
Robin Murphy 已提交
590 591 592 593
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;
594 595 596

	size = PAGE_ALIGN(size);

R
Robin Murphy 已提交
597 598 599 600 601 602
	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

603
	if (!gfpflags_allow_blocking(gfp)) {
R
Robin Murphy 已提交
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

619
		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
R
Robin Murphy 已提交
620 621 622 623 624 625 626
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size), gfp);
		if (!page)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (iommu_dma_mapping_error(dev, *handle)) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
		if (!coherent)
			__dma_flush_area(page_to_virt(page), iosize);

		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot,
						   __builtin_return_address(0));
		if (!addr) {
			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
		}
	} else {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page **pages;

		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
			iommu_dma_free(dev, pages, iosize, handle);
R
Robin Murphy 已提交
666 667 668 669 670
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
671
			       dma_addr_t handle, unsigned long attrs)
R
Robin Murphy 已提交
672
{
673 674 675
	size_t iosize = size;

	size = PAGE_ALIGN(size);
R
Robin Murphy 已提交
676
	/*
677 678
	 * @cpu_addr will be one of 4 things depending on how it was allocated:
	 * - A remapped array of pages for contiguous allocations.
R
Robin Murphy 已提交
679 680 681 682 683 684 685 686 687
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (__in_atomic_pool(cpu_addr, size)) {
688
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
689
		__free_from_pool(cpu_addr, size);
690 691 692 693 694 695
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		struct page *page = vmalloc_to_page(cpu_addr);

		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
R
Robin Murphy 已提交
696 697 698 699 700
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
701
		iommu_dma_free(dev, area->pages, iosize, &handle);
R
Robin Murphy 已提交
702 703
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
704
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
705 706 707 708 709 710
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
711
			      unsigned long attrs)
R
Robin Murphy 已提交
712 713 714 715 716 717 718 719 720 721
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

722 723 724 725 726 727 728 729 730
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}

R
Robin Murphy 已提交
731 732 733 734 735 736 737 738 739
	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
740
			       size_t size, unsigned long attrs)
R
Robin Murphy 已提交
741 742 743 744
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

745 746 747 748 749 750 751 752 753
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}

R
Robin Murphy 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_unmap_area(phys_to_virt(phys), size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_map_area(phys_to_virt(phys), size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
790
				   unsigned long attrs)
R
Robin Murphy 已提交
791 792
{
	bool coherent = is_device_dma_coherent(dev);
793
	int prot = dma_info_to_prot(dir, coherent, attrs);
R
Robin Murphy 已提交
794 795 796
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!iommu_dma_mapping_error(dev, dev_addr) &&
797
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
798 799 800 801 802 803 804
		__iommu_sync_single_for_device(dev, dev_addr, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
805
			       unsigned long attrs)
R
Robin Murphy 已提交
806
{
807
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_unmap_area(sg_virt(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_map_area(sg_virt(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
843
				unsigned long attrs)
R
Robin Murphy 已提交
844 845 846
{
	bool coherent = is_device_dma_coherent(dev);

847
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
848 849 850
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
851
				dma_info_to_prot(dir, coherent, attrs));
R
Robin Murphy 已提交
852 853 854 855 856
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
857
				   unsigned long attrs)
R
Robin Murphy 已提交
858
{
859
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
860 861 862 863 864
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

865
static const struct dma_map_ops iommu_dma_ops = {
R
Robin Murphy 已提交
866 867 868 869 870 871 872 873 874 875 876 877
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
878 879
	.map_resource = iommu_dma_map_resource,
	.unmap_resource = iommu_dma_unmap_resource,
R
Robin Murphy 已提交
880 881 882
	.mapping_error = iommu_dma_mapping_error,
};

883 884 885 886 887
static int __init __iommu_dma_init(void)
{
	return iommu_dma_init();
}
arch_initcall(__iommu_dma_init);
R
Robin Murphy 已提交
888

889 890
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
R
Robin Murphy 已提交
891
{
892 893 894 895
	struct iommu_domain *domain;

	if (!ops)
		return;
R
Robin Murphy 已提交
896 897

	/*
898 899
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
R
Robin Murphy 已提交
900
	 */
901 902
	domain = iommu_get_domain_for_dev(dev);

903 904 905 906 907 908 909
	if (!domain)
		goto out_err;

	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;

910
		dev->dma_ops = &iommu_dma_ops;
R
Robin Murphy 已提交
911 912
	}

913 914
	return;

915
out_err:
916
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
917
		 dev_name(dev));
R
Robin Murphy 已提交
918 919
}

R
Robin Murphy 已提交
920 921
void arch_teardown_dma_ops(struct device *dev)
{
922
	dev->dma_ops = NULL;
R
Robin Murphy 已提交
923 924
}

R
Robin Murphy 已提交
925 926 927
#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
928
				  const struct iommu_ops *iommu)
R
Robin Murphy 已提交
929 930 931 932
{ }

#endif  /* CONFIG_IOMMU_DMA */

R
Robin Murphy 已提交
933
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
934
			const struct iommu_ops *iommu, bool coherent)
R
Robin Murphy 已提交
935
{
936 937
	if (!dev->dma_ops)
		dev->dma_ops = &swiotlb_dma_ops;
R
Robin Murphy 已提交
938 939 940

	dev->archdata.dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
941 942 943 944 945 946 947

#ifdef CONFIG_XEN
	if (xen_initial_domain()) {
		dev->archdata.dev_dma_ops = dev->dma_ops;
		dev->dma_ops = xen_dma_ops;
	}
#endif
R
Robin Murphy 已提交
948
}