dma-mapping.c 25.3 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/gfp.h>
A
Arnd Bergmann 已提交
21
#include <linux/acpi.h>
22
#include <linux/bootmem.h>
C
Catalin Marinas 已提交
23 24
#include <linux/export.h>
#include <linux/slab.h>
25
#include <linux/genalloc.h>
C
Catalin Marinas 已提交
26
#include <linux/dma-mapping.h>
L
Laura Abbott 已提交
27
#include <linux/dma-contiguous.h>
C
Catalin Marinas 已提交
28 29 30 31 32
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>

#include <asm/cacheflush.h>

33 34
static int swiotlb __read_mostly;

35
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
36 37
				 bool coherent)
{
38
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
39 40 41 42
		return pgprot_writecombine(prot);
	return prot;
}

43 44 45
static struct gen_pool *atomic_pool;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
46
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
47 48 49 50 51 52 53 54

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

55
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
71
		memset(ptr, 0, size);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	}

	return ptr;
}

static bool __in_atomic_pool(void *start, size_t size)
{
	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}

static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}

92 93
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
94
				  unsigned long attrs)
C
Catalin Marinas 已提交
95
{
96 97 98 99 100
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return NULL;
	}

101
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
C
Catalin Marinas 已提交
102
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
103
		flags |= GFP_DMA;
104
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
L
Laura Abbott 已提交
105
		struct page *page;
106
		void *addr;
L
Laura Abbott 已提交
107 108 109 110 111 112 113

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
114
		addr = page_address(page);
115
		memset(addr, 0, size);
116
		return addr;
L
Laura Abbott 已提交
117 118 119
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
	}
C
Catalin Marinas 已提交
120 121
}

122 123
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
124
				unsigned long attrs)
C
Catalin Marinas 已提交
125
{
126 127 128
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

129 130 131 132 133
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return;
	}

134
	freed = dma_release_from_contiguous(dev,
L
Laura Abbott 已提交
135 136
					phys_to_page(paddr),
					size >> PAGE_SHIFT);
137
	if (!freed)
L
Laura Abbott 已提交
138
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
C
Catalin Marinas 已提交
139 140
}

141 142
static void *__dma_alloc(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flags,
143
			 unsigned long attrs)
144
{
145
	struct page *page;
146
	void *ptr, *coherent_ptr;
147
	bool coherent = is_device_dma_coherent(dev);
148
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
149 150

	size = PAGE_ALIGN(size);
151

152
	if (!coherent && !gfpflags_allow_blocking(flags)) {
153
		struct page *page = NULL;
154
		void *addr = __alloc_from_pool(size, &page, flags);
155 156 157 158 159 160

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}
161 162 163 164 165

	ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
	if (!ptr)
		goto no_mem;

166 167 168 169
	/* no need for non-cacheable mapping if coherent */
	if (coherent)
		return ptr;

170 171 172 173 174
	/* remove any dirty cache lines on the kernel alias */
	__dma_flush_range(ptr, ptr + size);

	/* create a coherent mapping */
	page = virt_to_page(ptr);
175
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
176
						   prot, NULL);
177 178 179 180 181 182 183 184
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
	__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
185
	*dma_handle = DMA_ERROR_CODE;
186 187 188
	return NULL;
}

189 190
static void __dma_free(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle,
191
		       unsigned long attrs)
192 193 194
{
	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));

195 196
	size = PAGE_ALIGN(size);

197 198 199 200 201
	if (!is_device_dma_coherent(dev)) {
		if (__free_from_pool(vaddr, size))
			return;
		vunmap(vaddr);
	}
202 203 204 205 206 207
	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}

static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir,
208
				     unsigned long attrs)
209 210 211 212
{
	dma_addr_t dev_addr;

	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
213 214
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
215 216 217 218 219 220 221

	return dev_addr;
}


static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
				 size_t size, enum dma_data_direction dir,
222
				 unsigned long attrs)
223
{
224 225
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
226 227 228 229 230
	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}

static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				  int nelems, enum dma_data_direction dir,
231
				  unsigned long attrs)
232 233 234 235 236
{
	struct scatterlist *sg;
	int i, ret;

	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
237 238 239 240
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, ret, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
241 242 243 244 245 246 247

	return ret;
}

static void __swiotlb_unmap_sg_attrs(struct device *dev,
				     struct scatterlist *sgl, int nelems,
				     enum dma_data_direction dir,
248
				     unsigned long attrs)
249 250 251 252
{
	struct scatterlist *sg;
	int i;

253 254 255 256
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
257 258 259 260 261 262 263
	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}

static void __swiotlb_sync_single_for_cpu(struct device *dev,
					  dma_addr_t dev_addr, size_t size,
					  enum dma_data_direction dir)
{
264 265
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
266 267 268 269 270 271 272 273
	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}

static void __swiotlb_sync_single_for_device(struct device *dev,
					     dma_addr_t dev_addr, size_t size,
					     enum dma_data_direction dir)
{
	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
274 275
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
276 277 278 279 280 281 282 283 284
}

static void __swiotlb_sync_sg_for_cpu(struct device *dev,
				      struct scatterlist *sgl, int nelems,
				      enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

285 286 287 288
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
289 290 291 292 293 294 295 296 297 298 299
	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}

static void __swiotlb_sync_sg_for_device(struct device *dev,
					 struct scatterlist *sgl, int nelems,
					 enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
300 301 302 303
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
304 305
}

R
Robin Murphy 已提交
306 307 308
static int __swiotlb_mmap(struct device *dev,
			  struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
309
			  unsigned long attrs)
310 311 312 313 314 315 316 317
{
	int ret = -ENXIO;
	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
					PAGE_SHIFT;
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

R
Robin Murphy 已提交
318 319 320
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

321 322 323 324 325 326 327 328 329 330 331 332 333
	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

334 335
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t handle, size_t size,
336
				 unsigned long attrs)
337 338 339 340 341 342 343 344 345 346
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
			    PAGE_ALIGN(size), 0);

	return ret;
}

347 348 349 350 351 352 353
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	if (swiotlb)
		return swiotlb_dma_supported(hwdev, mask);
	return 1;
}

354 355 356 357
static struct dma_map_ops swiotlb_dma_ops = {
	.alloc = __dma_alloc,
	.free = __dma_free,
	.mmap = __swiotlb_mmap,
358
	.get_sgtable = __swiotlb_get_sgtable,
359 360 361 362 363 364 365 366
	.map_page = __swiotlb_map_page,
	.unmap_page = __swiotlb_unmap_page,
	.map_sg = __swiotlb_map_sg_attrs,
	.unmap_sg = __swiotlb_unmap_sg_attrs,
	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
	.sync_single_for_device = __swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
367
	.dma_supported = __swiotlb_dma_supported,
368 369
	.mapping_error = swiotlb_dma_mapping_error,
};
C
Catalin Marinas 已提交
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
							pool_size_order);
	else
		page = alloc_pages(GFP_DMA, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
		__dma_flush_range(page_addr, page_addr + atomic_pool_size);

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  (void *)PAGE_SHIFT);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}

432 433 434 435 436 437
/********************************************
 * The following APIs are for dummy DMA ops *
 ********************************************/

static void *__dummy_alloc(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flags,
438
			   unsigned long attrs)
439 440 441 442 443 444
{
	return NULL;
}

static void __dummy_free(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle,
445
			 unsigned long attrs)
446 447 448 449 450 451
{
}

static int __dummy_mmap(struct device *dev,
			struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
452
			unsigned long attrs)
453 454 455 456 457 458 459
{
	return -ENXIO;
}

static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
460
				   unsigned long attrs)
461 462 463 464 465 466
{
	return DMA_ERROR_CODE;
}

static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
467
			       unsigned long attrs)
468 469 470 471 472
{
}

static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
			  int nelems, enum dma_data_direction dir,
473
			  unsigned long attrs)
474 475 476 477 478 479 480
{
	return 0;
}

static void __dummy_unmap_sg(struct device *dev,
			     struct scatterlist *sgl, int nelems,
			     enum dma_data_direction dir,
481
			     unsigned long attrs)
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
{
}

static void __dummy_sync_single(struct device *dev,
				dma_addr_t dev_addr, size_t size,
				enum dma_data_direction dir)
{
}

static void __dummy_sync_sg(struct device *dev,
			    struct scatterlist *sgl, int nelems,
			    enum dma_data_direction dir)
{
}

static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return 1;
}

static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
	return 0;
}

struct dma_map_ops dummy_dma_ops = {
	.alloc                  = __dummy_alloc,
	.free                   = __dummy_free,
	.mmap                   = __dummy_mmap,
	.map_page               = __dummy_map_page,
	.unmap_page             = __dummy_unmap_page,
	.map_sg                 = __dummy_map_sg,
	.unmap_sg               = __dummy_unmap_sg,
	.sync_single_for_cpu    = __dummy_sync_single,
	.sync_single_for_device = __dummy_sync_single,
	.sync_sg_for_cpu        = __dummy_sync_sg,
	.sync_sg_for_device     = __dummy_sync_sg,
	.mapping_error          = __dummy_mapping_error,
	.dma_supported          = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);

524
static int __init arm64_dma_init(void)
C
Catalin Marinas 已提交
525
{
526 527 528
	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
		swiotlb = 1;

A
Arnd Bergmann 已提交
529
	return atomic_pool_init();
530 531
}
arch_initcall(arm64_dma_init);
C
Catalin Marinas 已提交
532 533 534 535 536 537 538 539 540

#define PREALLOC_DMA_DEBUG_ENTRIES	4096

static int __init dma_debug_do_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(dma_debug_do_init);
R
Robin Murphy 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555


#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
	__dma_flush_range(virt, virt + PAGE_SIZE);
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
556
				 unsigned long attrs)
R
Robin Murphy 已提交
557 558 559
{
	bool coherent = is_device_dma_coherent(dev);
	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
560
	size_t iosize = size;
R
Robin Murphy 已提交
561 562 563 564
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;
565 566 567

	size = PAGE_ALIGN(size);

R
Robin Murphy 已提交
568 569 570 571 572 573
	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

A
Andrew Morton 已提交
574
	if (gfpflags_allow_blocking(gfp)) {
R
Robin Murphy 已提交
575 576 577
		struct page **pages;
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);

578 579
		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
R
Robin Murphy 已提交
580 581 582 583 584 585
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
586
			iommu_dma_free(dev, pages, iosize, handle);
R
Robin Murphy 已提交
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	} else {
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

603
		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
R
Robin Murphy 已提交
604 605 606 607 608 609 610 611 612 613 614 615
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
616
			       dma_addr_t handle, unsigned long attrs)
R
Robin Murphy 已提交
617
{
618 619 620
	size_t iosize = size;

	size = PAGE_ALIGN(size);
R
Robin Murphy 已提交
621 622 623 624 625 626 627 628 629 630 631
	/*
	 * @cpu_addr will be one of 3 things depending on how it was allocated:
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (__in_atomic_pool(cpu_addr, size)) {
632
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
633 634 635 636 637 638
		__free_from_pool(cpu_addr, size);
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
639
		iommu_dma_free(dev, area->pages, iosize, &handle);
R
Robin Murphy 已提交
640 641
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
642
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
643 644 645 646 647 648
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
649
			      unsigned long attrs)
R
Robin Murphy 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
669
			       size_t size, unsigned long attrs)
R
Robin Murphy 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_unmap_area(phys_to_virt(phys), size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_map_area(phys_to_virt(phys), size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
710
				   unsigned long attrs)
R
Robin Murphy 已提交
711 712 713 714 715 716
{
	bool coherent = is_device_dma_coherent(dev);
	int prot = dma_direction_to_prot(dir, coherent);
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!iommu_dma_mapping_error(dev, dev_addr) &&
717
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
718 719 720 721 722 723 724
		__iommu_sync_single_for_device(dev, dev_addr, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
725
			       unsigned long attrs)
R
Robin Murphy 已提交
726
{
727
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_unmap_area(sg_virt(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_map_area(sg_virt(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
763
				unsigned long attrs)
R
Robin Murphy 已提交
764 765 766
{
	bool coherent = is_device_dma_coherent(dev);

767
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
768 769 770 771 772 773 774 775 776
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
			dma_direction_to_prot(dir, coherent));
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
777
				   unsigned long attrs)
R
Robin Murphy 已提交
778
{
779
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

static struct dma_map_ops iommu_dma_ops = {
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
	.dma_supported = iommu_dma_supported,
	.mapping_error = iommu_dma_mapping_error,
};

/*
 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
 * everything it needs to - the device is only partially created and the
 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
 * to move the arch_setup_dma_ops() call later, all the notifier bits below
 * become unnecessary, and will go away.
 */
struct iommu_dma_notifier_data {
	struct list_head list;
	struct device *dev;
	const struct iommu_ops *ops;
	u64 dma_base;
	u64 size;
};
static LIST_HEAD(iommu_dma_masters);
static DEFINE_MUTEX(iommu_dma_notifier_lock);

static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
			   u64 dma_base, u64 size)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	/*
826 827 828
	 * If the IOMMU driver has the DMA domain support that we require,
	 * then the IOMMU core will have already configured a group for this
	 * device, and allocated the default domain for that group.
R
Robin Murphy 已提交
829
	 */
830 831 832 833
	if (!domain || iommu_dma_init_domain(domain, dma_base, size)) {
		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
			dev_name(dev));
		return false;
R
Robin Murphy 已提交
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	}

	dev->archdata.dma_ops = &iommu_dma_ops;
	return true;
}

static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
			      u64 dma_base, u64 size)
{
	struct iommu_dma_notifier_data *iommudata;

	iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
	if (!iommudata)
		return;

	iommudata->dev = dev;
	iommudata->ops = ops;
	iommudata->dma_base = dma_base;
	iommudata->size = size;

	mutex_lock(&iommu_dma_notifier_lock);
	list_add(&iommudata->list, &iommu_dma_masters);
	mutex_unlock(&iommu_dma_notifier_lock);
}

static int __iommu_attach_notifier(struct notifier_block *nb,
				   unsigned long action, void *data)
{
	struct iommu_dma_notifier_data *master, *tmp;

864
	if (action != BUS_NOTIFY_BIND_DRIVER)
R
Robin Murphy 已提交
865 866 867 868
		return 0;

	mutex_lock(&iommu_dma_notifier_lock);
	list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
869 870
		if (data == master->dev && do_iommu_attach(master->dev,
				master->ops, master->dma_base, master->size)) {
R
Robin Murphy 已提交
871 872
			list_del(&master->list);
			kfree(master);
873
			break;
R
Robin Murphy 已提交
874 875 876 877 878 879
		}
	}
	mutex_unlock(&iommu_dma_notifier_lock);
	return 0;
}

880
static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
R
Robin Murphy 已提交
881 882 883 884 885 886
{
	struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
	int ret;

	if (!nb)
		return -ENOMEM;
887

R
Robin Murphy 已提交
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
	nb->notifier_call = __iommu_attach_notifier;

	ret = bus_register_notifier(bus, nb);
	if (ret) {
		pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
			bus->name);
		kfree(nb);
	}
	return ret;
}

static int __init __iommu_dma_init(void)
{
	int ret;

	ret = iommu_dma_init();
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&platform_bus_type);
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&amba_bustype);
908 909 910 911
#ifdef CONFIG_PCI
	if (!ret)
		ret = register_iommu_dma_ops_notifier(&pci_bus_type);
#endif
R
Robin Murphy 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	return ret;
}
arch_initcall(__iommu_dma_init);

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
{
	struct iommu_group *group;

	if (!ops)
		return;
	/*
	 * TODO: As a concession to the future, we're ready to handle being
	 * called both early and late (i.e. after bus_add_device). Once all
	 * the platform bus code is reworked to call us late and the notifier
	 * junk above goes away, move the body of do_iommu_attach here.
	 */
	group = iommu_group_get(dev);
	if (group) {
		do_iommu_attach(dev, ops, dma_base, size);
		iommu_group_put(group);
	} else {
		queue_iommu_attach(dev, ops, dma_base, size);
	}
}

R
Robin Murphy 已提交
938 939 940 941
void arch_teardown_dma_ops(struct device *dev)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

942
	if (WARN_ON(domain))
R
Robin Murphy 已提交
943 944 945 946 947
		iommu_detach_device(domain, dev);

	dev->archdata.dma_ops = NULL;
}

R
Robin Murphy 已提交
948 949 950
#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
951
				  const struct iommu_ops *iommu)
R
Robin Murphy 已提交
952 953 954 955
{ }

#endif  /* CONFIG_IOMMU_DMA */

R
Robin Murphy 已提交
956
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
957
			const struct iommu_ops *iommu, bool coherent)
R
Robin Murphy 已提交
958
{
A
Arnd Bergmann 已提交
959 960
	if (!dev->archdata.dma_ops)
		dev->archdata.dma_ops = &swiotlb_dma_ops;
R
Robin Murphy 已提交
961 962 963 964

	dev->archdata.dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}