dma-mapping.c 25.3 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/gfp.h>
A
Arnd Bergmann 已提交
21
#include <linux/acpi.h>
22
#include <linux/bootmem.h>
23
#include <linux/cache.h>
C
Catalin Marinas 已提交
24 25
#include <linux/export.h>
#include <linux/slab.h>
26
#include <linux/genalloc.h>
C
Catalin Marinas 已提交
27
#include <linux/dma-mapping.h>
L
Laura Abbott 已提交
28
#include <linux/dma-contiguous.h>
C
Catalin Marinas 已提交
29 30 31 32 33
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>

#include <asm/cacheflush.h>

34
static int swiotlb __ro_after_init;
35

36
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
37 38
				 bool coherent)
{
39
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
40 41 42 43
		return pgprot_writecombine(prot);
	return prot;
}

44 45 46
static struct gen_pool *atomic_pool;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
47
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
48 49 50 51 52 53 54 55

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

56
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
72
		memset(ptr, 0, size);
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	}

	return ptr;
}

static bool __in_atomic_pool(void *start, size_t size)
{
	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}

static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}

93 94
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
95
				  unsigned long attrs)
C
Catalin Marinas 已提交
96
{
97 98 99 100 101
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return NULL;
	}

102
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
C
Catalin Marinas 已提交
103
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
104
		flags |= GFP_DMA;
105
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
L
Laura Abbott 已提交
106
		struct page *page;
107
		void *addr;
L
Laura Abbott 已提交
108 109 110 111 112 113 114

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
115
		addr = page_address(page);
116
		memset(addr, 0, size);
117
		return addr;
L
Laura Abbott 已提交
118 119 120
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
C
Catalin Marinas 已提交
121 122
}

123 124
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
125
				unsigned long attrs)
C
Catalin Marinas 已提交
126
{
127 128 129
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

130 131 132 133 134
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return;
	}

135
	freed = dma_release_from_contiguous(dev,
L
Laura Abbott 已提交
136 137
					phys_to_page(paddr),
					size >> PAGE_SHIFT);
138
	if (!freed)
L
Laura Abbott 已提交
139
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
C
Catalin Marinas 已提交
140 141
}

142 143
static void *__dma_alloc(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flags,
144
			 unsigned long attrs)
145
{
146
	struct page *page;
147
	void *ptr, *coherent_ptr;
148
	bool coherent = is_device_dma_coherent(dev);
149
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
150 151

	size = PAGE_ALIGN(size);
152

153
	if (!coherent && !gfpflags_allow_blocking(flags)) {
154
		struct page *page = NULL;
155
		void *addr = __alloc_from_pool(size, &page, flags);
156 157 158 159 160 161

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}
162 163 164 165 166

	ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
	if (!ptr)
		goto no_mem;

167 168 169 170
	/* no need for non-cacheable mapping if coherent */
	if (coherent)
		return ptr;

171
	/* remove any dirty cache lines on the kernel alias */
172
	__dma_flush_area(ptr, size);
173 174 175

	/* create a coherent mapping */
	page = virt_to_page(ptr);
176
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
177
						   prot, NULL);
178 179 180 181 182 183 184 185
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
	__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
186
	*dma_handle = DMA_ERROR_CODE;
187 188 189
	return NULL;
}

190 191
static void __dma_free(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle,
192
		       unsigned long attrs)
193 194 195
{
	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));

196 197
	size = PAGE_ALIGN(size);

198 199 200 201 202
	if (!is_device_dma_coherent(dev)) {
		if (__free_from_pool(vaddr, size))
			return;
		vunmap(vaddr);
	}
203 204 205 206 207 208
	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}

static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir,
209
				     unsigned long attrs)
210 211 212 213
{
	dma_addr_t dev_addr;

	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
214 215
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
216 217 218 219 220 221 222

	return dev_addr;
}


static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
				 size_t size, enum dma_data_direction dir,
223
				 unsigned long attrs)
224
{
225 226
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
227 228 229 230 231
	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}

static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				  int nelems, enum dma_data_direction dir,
232
				  unsigned long attrs)
233 234 235 236 237
{
	struct scatterlist *sg;
	int i, ret;

	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
238 239 240 241
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, ret, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
242 243 244 245 246 247 248

	return ret;
}

static void __swiotlb_unmap_sg_attrs(struct device *dev,
				     struct scatterlist *sgl, int nelems,
				     enum dma_data_direction dir,
249
				     unsigned long attrs)
250 251 252 253
{
	struct scatterlist *sg;
	int i;

254 255 256 257
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
258 259 260 261 262 263 264
	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}

static void __swiotlb_sync_single_for_cpu(struct device *dev,
					  dma_addr_t dev_addr, size_t size,
					  enum dma_data_direction dir)
{
265 266
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
267 268 269 270 271 272 273 274
	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}

static void __swiotlb_sync_single_for_device(struct device *dev,
					     dma_addr_t dev_addr, size_t size,
					     enum dma_data_direction dir)
{
	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
275 276
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
277 278 279 280 281 282 283 284 285
}

static void __swiotlb_sync_sg_for_cpu(struct device *dev,
				      struct scatterlist *sgl, int nelems,
				      enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

286 287 288 289
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
290 291 292 293 294 295 296 297 298 299 300
	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}

static void __swiotlb_sync_sg_for_device(struct device *dev,
					 struct scatterlist *sgl, int nelems,
					 enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
301 302 303 304
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
305 306
}

R
Robin Murphy 已提交
307 308 309
static int __swiotlb_mmap(struct device *dev,
			  struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
310
			  unsigned long attrs)
311 312 313 314 315 316 317 318
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
					PAGE_SHIFT;
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

R
Robin Murphy 已提交
319 320 321
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

322 323 324 325 326 327 328 329 330 331 332 333 334
	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

335 336
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t handle, size_t size,
337
				 unsigned long attrs)
338 339 340 341 342 343 344 345 346 347
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
			    PAGE_ALIGN(size), 0);

	return ret;
}

348 349 350 351 352 353 354
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	if (swiotlb)
		return swiotlb_dma_supported(hwdev, mask);
	return 1;
}

355 356 357 358
static struct dma_map_ops swiotlb_dma_ops = {
	.alloc = __dma_alloc,
	.free = __dma_free,
	.mmap = __swiotlb_mmap,
359
	.get_sgtable = __swiotlb_get_sgtable,
360 361 362 363 364 365 366 367
	.map_page = __swiotlb_map_page,
	.unmap_page = __swiotlb_unmap_page,
	.map_sg = __swiotlb_map_sg_attrs,
	.unmap_sg = __swiotlb_unmap_sg_attrs,
	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
	.sync_single_for_device = __swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
368
	.dma_supported = __swiotlb_dma_supported,
369 370
	.mapping_error = swiotlb_dma_mapping_error,
};
C
Catalin Marinas 已提交
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
							pool_size_order);
	else
		page = alloc_pages(GFP_DMA, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
391
		__dma_flush_area(page_addr, atomic_pool_size);
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  (void *)PAGE_SHIFT);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}

433 434 435 436 437 438
/********************************************
 * The following APIs are for dummy DMA ops *
 ********************************************/

static void *__dummy_alloc(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flags,
439
			   unsigned long attrs)
440 441 442 443 444 445
{
	return NULL;
}

static void __dummy_free(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle,
446
			 unsigned long attrs)
447 448 449 450 451 452
{
}

static int __dummy_mmap(struct device *dev,
			struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
453
			unsigned long attrs)
454 455 456 457 458 459 460
{
	return -ENXIO;
}

static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
461
				   unsigned long attrs)
462 463 464 465 466 467
{
	return DMA_ERROR_CODE;
}

static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
468
			       unsigned long attrs)
469 470 471 472 473
{
}

static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
			  int nelems, enum dma_data_direction dir,
474
			  unsigned long attrs)
475 476 477 478 479 480 481
{
	return 0;
}

static void __dummy_unmap_sg(struct device *dev,
			     struct scatterlist *sgl, int nelems,
			     enum dma_data_direction dir,
482
			     unsigned long attrs)
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
{
}

static void __dummy_sync_single(struct device *dev,
				dma_addr_t dev_addr, size_t size,
				enum dma_data_direction dir)
{
}

static void __dummy_sync_sg(struct device *dev,
			    struct scatterlist *sgl, int nelems,
			    enum dma_data_direction dir)
{
}

static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return 1;
}

static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
	return 0;
}

struct dma_map_ops dummy_dma_ops = {
	.alloc                  = __dummy_alloc,
	.free                   = __dummy_free,
	.mmap                   = __dummy_mmap,
	.map_page               = __dummy_map_page,
	.unmap_page             = __dummy_unmap_page,
	.map_sg                 = __dummy_map_sg,
	.unmap_sg               = __dummy_unmap_sg,
	.sync_single_for_cpu    = __dummy_sync_single,
	.sync_single_for_device = __dummy_sync_single,
	.sync_sg_for_cpu        = __dummy_sync_sg,
	.sync_sg_for_device     = __dummy_sync_sg,
	.mapping_error          = __dummy_mapping_error,
	.dma_supported          = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);

525
static int __init arm64_dma_init(void)
C
Catalin Marinas 已提交
526
{
527 528 529
	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
		swiotlb = 1;

A
Arnd Bergmann 已提交
530
	return atomic_pool_init();
531 532
}
arch_initcall(arm64_dma_init);
C
Catalin Marinas 已提交
533 534 535 536 537 538 539 540 541

#define PREALLOC_DMA_DEBUG_ENTRIES	4096

static int __init dma_debug_do_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(dma_debug_do_init);
R
Robin Murphy 已提交
542 543 544 545 546 547 548 549 550 551


#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
552
	__dma_flush_area(virt, PAGE_SIZE);
R
Robin Murphy 已提交
553 554 555 556
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
557
				 unsigned long attrs)
R
Robin Murphy 已提交
558 559 560
{
	bool coherent = is_device_dma_coherent(dev);
	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
561
	size_t iosize = size;
R
Robin Murphy 已提交
562 563 564 565
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;
566 567 568

	size = PAGE_ALIGN(size);

R
Robin Murphy 已提交
569 570 571 572 573 574
	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

A
Andrew Morton 已提交
575
	if (gfpflags_allow_blocking(gfp)) {
R
Robin Murphy 已提交
576 577 578
		struct page **pages;
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);

579 580
		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
R
Robin Murphy 已提交
581 582 583 584 585 586
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
587
			iommu_dma_free(dev, pages, iosize, handle);
R
Robin Murphy 已提交
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	} else {
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

604
		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
R
Robin Murphy 已提交
605 606 607 608 609 610 611 612 613 614 615 616
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
617
			       dma_addr_t handle, unsigned long attrs)
R
Robin Murphy 已提交
618
{
619 620 621
	size_t iosize = size;

	size = PAGE_ALIGN(size);
R
Robin Murphy 已提交
622 623 624 625 626 627 628 629 630 631 632
	/*
	 * @cpu_addr will be one of 3 things depending on how it was allocated:
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (__in_atomic_pool(cpu_addr, size)) {
633
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
634 635 636 637 638 639
		__free_from_pool(cpu_addr, size);
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
640
		iommu_dma_free(dev, area->pages, iosize, &handle);
R
Robin Murphy 已提交
641 642
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
643
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
644 645 646 647 648 649
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
650
			      unsigned long attrs)
R
Robin Murphy 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
670
			       size_t size, unsigned long attrs)
R
Robin Murphy 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_unmap_area(phys_to_virt(phys), size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_map_area(phys_to_virt(phys), size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
711
				   unsigned long attrs)
R
Robin Murphy 已提交
712 713 714 715 716 717
{
	bool coherent = is_device_dma_coherent(dev);
	int prot = dma_direction_to_prot(dir, coherent);
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!iommu_dma_mapping_error(dev, dev_addr) &&
718
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
719 720 721 722 723 724 725
		__iommu_sync_single_for_device(dev, dev_addr, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
726
			       unsigned long attrs)
R
Robin Murphy 已提交
727
{
728
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_unmap_area(sg_virt(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_map_area(sg_virt(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
764
				unsigned long attrs)
R
Robin Murphy 已提交
765 766 767
{
	bool coherent = is_device_dma_coherent(dev);

768
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
769 770 771 772 773 774 775 776 777
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
			dma_direction_to_prot(dir, coherent));
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
778
				   unsigned long attrs)
R
Robin Murphy 已提交
779
{
780
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

static struct dma_map_ops iommu_dma_ops = {
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
	.dma_supported = iommu_dma_supported,
	.mapping_error = iommu_dma_mapping_error,
};

/*
 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
 * everything it needs to - the device is only partially created and the
 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
 * to move the arch_setup_dma_ops() call later, all the notifier bits below
 * become unnecessary, and will go away.
 */
struct iommu_dma_notifier_data {
	struct list_head list;
	struct device *dev;
	const struct iommu_ops *ops;
	u64 dma_base;
	u64 size;
};
static LIST_HEAD(iommu_dma_masters);
static DEFINE_MUTEX(iommu_dma_notifier_lock);

static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
			   u64 dma_base, u64 size)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	/*
827 828 829
	 * If the IOMMU driver has the DMA domain support that we require,
	 * then the IOMMU core will have already configured a group for this
	 * device, and allocated the default domain for that group.
R
Robin Murphy 已提交
830
	 */
831
	if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
832 833 834
		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
			dev_name(dev));
		return false;
R
Robin Murphy 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
	}

	dev->archdata.dma_ops = &iommu_dma_ops;
	return true;
}

static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
			      u64 dma_base, u64 size)
{
	struct iommu_dma_notifier_data *iommudata;

	iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
	if (!iommudata)
		return;

	iommudata->dev = dev;
	iommudata->ops = ops;
	iommudata->dma_base = dma_base;
	iommudata->size = size;

	mutex_lock(&iommu_dma_notifier_lock);
	list_add(&iommudata->list, &iommu_dma_masters);
	mutex_unlock(&iommu_dma_notifier_lock);
}

static int __iommu_attach_notifier(struct notifier_block *nb,
				   unsigned long action, void *data)
{
	struct iommu_dma_notifier_data *master, *tmp;

865
	if (action != BUS_NOTIFY_BIND_DRIVER)
R
Robin Murphy 已提交
866 867 868 869
		return 0;

	mutex_lock(&iommu_dma_notifier_lock);
	list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
870 871
		if (data == master->dev && do_iommu_attach(master->dev,
				master->ops, master->dma_base, master->size)) {
R
Robin Murphy 已提交
872 873
			list_del(&master->list);
			kfree(master);
874
			break;
R
Robin Murphy 已提交
875 876 877 878 879 880
		}
	}
	mutex_unlock(&iommu_dma_notifier_lock);
	return 0;
}

881
static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
R
Robin Murphy 已提交
882 883 884 885 886 887
{
	struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
	int ret;

	if (!nb)
		return -ENOMEM;
888

R
Robin Murphy 已提交
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	nb->notifier_call = __iommu_attach_notifier;

	ret = bus_register_notifier(bus, nb);
	if (ret) {
		pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
			bus->name);
		kfree(nb);
	}
	return ret;
}

static int __init __iommu_dma_init(void)
{
	int ret;

	ret = iommu_dma_init();
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&platform_bus_type);
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&amba_bustype);
909 910 911 912
#ifdef CONFIG_PCI
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&pci_bus_type);
#endif
R
Robin Murphy 已提交
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	return ret;
}
arch_initcall(__iommu_dma_init);

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
{
	struct iommu_group *group;

	if (!ops)
		return;
	/*
	 * TODO: As a concession to the future, we're ready to handle being
	 * called both early and late (i.e. after bus_add_device). Once all
	 * the platform bus code is reworked to call us late and the notifier
	 * junk above goes away, move the body of do_iommu_attach here.
	 */
	group = iommu_group_get(dev);
	if (group) {
		do_iommu_attach(dev, ops, dma_base, size);
		iommu_group_put(group);
	} else {
		queue_iommu_attach(dev, ops, dma_base, size);
	}
}

R
Robin Murphy 已提交
939 940 941 942
void arch_teardown_dma_ops(struct device *dev)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

943
	if (WARN_ON(domain))
R
Robin Murphy 已提交
944 945 946 947 948
		iommu_detach_device(domain, dev);

	dev->archdata.dma_ops = NULL;
}

R
Robin Murphy 已提交
949 950 951
#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
952
				  const struct iommu_ops *iommu)
R
Robin Murphy 已提交
953 954 955 956
{ }

#endif  /* CONFIG_IOMMU_DMA */

R
Robin Murphy 已提交
957
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
958
			const struct iommu_ops *iommu, bool coherent)
R
Robin Murphy 已提交
959
{
A
Arnd Bergmann 已提交
960 961
	if (!dev->archdata.dma_ops)
		dev->archdata.dma_ops = &swiotlb_dma_ops;
R
Robin Murphy 已提交
962 963 964 965

	dev->archdata.dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}