You need to sign in or sign up before continuing.
dma-mapping.c 23.4 KB
Newer Older
C
Catalin Marinas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * SWIOTLB-based DMA API implementation
 *
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/gfp.h>
A
Arnd Bergmann 已提交
21
#include <linux/acpi.h>
22
#include <linux/bootmem.h>
23
#include <linux/cache.h>
C
Catalin Marinas 已提交
24 25
#include <linux/export.h>
#include <linux/slab.h>
26
#include <linux/genalloc.h>
27
#include <linux/dma-direct.h>
L
Laura Abbott 已提交
28
#include <linux/dma-contiguous.h>
C
Catalin Marinas 已提交
29 30
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
31
#include <linux/pci.h>
C
Catalin Marinas 已提交
32 33 34

#include <asm/cacheflush.h>

35
static int swiotlb __ro_after_init;
36

37
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
38 39
				 bool coherent)
{
40
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 42 43 44
		return pgprot_writecombine(prot);
	return prot;
}

45
static struct gen_pool *atomic_pool __ro_after_init;
46 47

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
48
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49 50 51 52 53 54 55 56

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

57
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
73
		memset(ptr, 0, size);
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
	}

	return ptr;
}

static bool __in_atomic_pool(void *start, size_t size)
{
	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
}

static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}

94 95
static void *__dma_alloc(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t flags,
96
			 unsigned long attrs)
97
{
98
	struct page *page;
99
	void *ptr, *coherent_ptr;
100
	bool coherent = is_device_dma_coherent(dev);
101
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
102 103

	size = PAGE_ALIGN(size);
104

105
	if (!coherent && !gfpflags_allow_blocking(flags)) {
106
		struct page *page = NULL;
107
		void *addr = __alloc_from_pool(size, &page, flags);
108 109 110 111 112 113

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}
114

115
	ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
116 117 118
	if (!ptr)
		goto no_mem;

119 120 121 122
	/* no need for non-cacheable mapping if coherent */
	if (coherent)
		return ptr;

123
	/* remove any dirty cache lines on the kernel alias */
124
	__dma_flush_area(ptr, size);
125 126 127

	/* create a coherent mapping */
	page = virt_to_page(ptr);
128
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
129
						   prot, __builtin_return_address(0));
130 131 132 133 134 135
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
136
	swiotlb_free(dev, size, ptr, *dma_handle, attrs);
137 138 139 140
no_mem:
	return NULL;
}

141 142
static void __dma_free(struct device *dev, size_t size,
		       void *vaddr, dma_addr_t dma_handle,
143
		       unsigned long attrs)
144 145 146
{
	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));

147 148
	size = PAGE_ALIGN(size);

149 150 151 152 153
	if (!is_device_dma_coherent(dev)) {
		if (__free_from_pool(vaddr, size))
			return;
		vunmap(vaddr);
	}
154
	swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
155 156 157 158 159
}

static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir,
160
				     unsigned long attrs)
161 162 163 164
{
	dma_addr_t dev_addr;

	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
165 166
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
167
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
168 169 170 171 172 173 174

	return dev_addr;
}


static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
				 size_t size, enum dma_data_direction dir,
175
				 unsigned long attrs)
176
{
177 178
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
179
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
180 181 182 183 184
	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}

static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				  int nelems, enum dma_data_direction dir,
185
				  unsigned long attrs)
186 187 188 189 190
{
	struct scatterlist *sg;
	int i, ret;

	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
191 192
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
193 194 195
		for_each_sg(sgl, sg, ret, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
196 197 198 199 200 201 202

	return ret;
}

static void __swiotlb_unmap_sg_attrs(struct device *dev,
				     struct scatterlist *sgl, int nelems,
				     enum dma_data_direction dir,
203
				     unsigned long attrs)
204 205 206 207
{
	struct scatterlist *sg;
	int i;

208 209
	if (!is_device_dma_coherent(dev) &&
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
210 211 212
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
213 214 215 216 217 218 219
	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}

static void __swiotlb_sync_single_for_cpu(struct device *dev,
					  dma_addr_t dev_addr, size_t size,
					  enum dma_data_direction dir)
{
220 221
	if (!is_device_dma_coherent(dev))
		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
222 223 224 225 226 227 228 229
	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}

static void __swiotlb_sync_single_for_device(struct device *dev,
					     dma_addr_t dev_addr, size_t size,
					     enum dma_data_direction dir)
{
	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
230 231
	if (!is_device_dma_coherent(dev))
		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
232 233 234 235 236 237 238 239 240
}

static void __swiotlb_sync_sg_for_cpu(struct device *dev,
				      struct scatterlist *sgl, int nelems,
				      enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

241 242 243 244
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
					 sg->length, dir);
245 246 247 248 249 250 251 252 253 254 255
	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}

static void __swiotlb_sync_sg_for_device(struct device *dev,
					 struct scatterlist *sgl, int nelems,
					 enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
256 257 258 259
	if (!is_device_dma_coherent(dev))
		for_each_sg(sgl, sg, nelems, i)
			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
				       sg->length, dir);
260 261
}

262 263
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
264 265
{
	int ret = -ENXIO;
266
	unsigned long nr_vma_pages = vma_pages(vma);
267 268 269 270 271 272 273 274 275 276 277 278 279
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;

	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}

	return ret;
}

280 281 282 283 284 285 286 287 288 289 290
static int __swiotlb_mmap(struct device *dev,
			  struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
			  unsigned long attrs)
{
	int ret;
	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

291
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
292 293 294 295 296 297 298
		return ret;

	return __swiotlb_mmap_pfn(vma, pfn, size);
}

static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
299 300 301 302
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
303
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
304 305 306 307

	return ret;
}

308 309 310 311 312 313 314 315 316
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t handle, size_t size,
				 unsigned long attrs)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));

	return __swiotlb_get_sgtable_page(sgt, page, size);
}

317 318 319 320 321 322 323
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	if (swiotlb)
		return swiotlb_dma_supported(hwdev, mask);
	return 1;
}

324 325 326 327 328 329 330
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
{
	if (swiotlb)
		return swiotlb_dma_mapping_error(hwdev, addr);
	return 0;
}

331
static const struct dma_map_ops arm64_swiotlb_dma_ops = {
332 333 334
	.alloc = __dma_alloc,
	.free = __dma_free,
	.mmap = __swiotlb_mmap,
335
	.get_sgtable = __swiotlb_get_sgtable,
336 337 338 339 340 341 342 343
	.map_page = __swiotlb_map_page,
	.unmap_page = __swiotlb_unmap_page,
	.map_sg = __swiotlb_map_sg_attrs,
	.unmap_sg = __swiotlb_unmap_sg_attrs,
	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
	.sync_single_for_device = __swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
344
	.dma_supported = __swiotlb_dma_supported,
345
	.mapping_error = __swiotlb_dma_mapping_error,
346
};
C
Catalin Marinas 已提交
347

348 349 350 351 352 353 354 355 356 357
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
358
						 pool_size_order, GFP_KERNEL);
359
	else
360
		page = alloc_pages(GFP_DMA32, pool_size_order);
361 362 363 364 365 366

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
367
		__dma_flush_area(page_addr, atomic_pool_size);
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
387
				  NULL);
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}

409 410 411 412 413 414
/********************************************
 * The following APIs are for dummy DMA ops *
 ********************************************/

static void *__dummy_alloc(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flags,
415
			   unsigned long attrs)
416 417 418 419 420 421
{
	return NULL;
}

static void __dummy_free(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle,
422
			 unsigned long attrs)
423 424 425 426 427 428
{
}

static int __dummy_mmap(struct device *dev,
			struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
429
			unsigned long attrs)
430 431 432 433 434 435 436
{
	return -ENXIO;
}

static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
437
				   unsigned long attrs)
438
{
C
Christoph Hellwig 已提交
439
	return 0;
440 441 442 443
}

static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
444
			       unsigned long attrs)
445 446 447 448 449
{
}

static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
			  int nelems, enum dma_data_direction dir,
450
			  unsigned long attrs)
451 452 453 454 455 456 457
{
	return 0;
}

static void __dummy_unmap_sg(struct device *dev,
			     struct scatterlist *sgl, int nelems,
			     enum dma_data_direction dir,
458
			     unsigned long attrs)
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
{
}

static void __dummy_sync_single(struct device *dev,
				dma_addr_t dev_addr, size_t size,
				enum dma_data_direction dir)
{
}

static void __dummy_sync_sg(struct device *dev,
			    struct scatterlist *sgl, int nelems,
			    enum dma_data_direction dir)
{
}

static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return 1;
}

static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
	return 0;
}

484
const struct dma_map_ops dummy_dma_ops = {
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
	.alloc                  = __dummy_alloc,
	.free                   = __dummy_free,
	.mmap                   = __dummy_mmap,
	.map_page               = __dummy_map_page,
	.unmap_page             = __dummy_unmap_page,
	.map_sg                 = __dummy_map_sg,
	.unmap_sg               = __dummy_unmap_sg,
	.sync_single_for_cpu    = __dummy_sync_single,
	.sync_single_for_device = __dummy_sync_single,
	.sync_sg_for_cpu        = __dummy_sync_sg,
	.sync_sg_for_device     = __dummy_sync_sg,
	.mapping_error          = __dummy_mapping_error,
	.dma_supported          = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);

501
static int __init arm64_dma_init(void)
C
Catalin Marinas 已提交
502
{
503 504
	if (swiotlb_force == SWIOTLB_FORCE ||
	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
505 506
		swiotlb = 1;

A
Arnd Bergmann 已提交
507
	return atomic_pool_init();
508 509
}
arch_initcall(arm64_dma_init);
C
Catalin Marinas 已提交
510

R
Robin Murphy 已提交
511 512 513 514 515 516 517 518
#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-iommu.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>

/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{
519
	__dma_flush_area(virt, PAGE_SIZE);
R
Robin Murphy 已提交
520 521 522 523
}

static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
524
				 unsigned long attrs)
R
Robin Murphy 已提交
525 526
{
	bool coherent = is_device_dma_coherent(dev);
527
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
528
	size_t iosize = size;
R
Robin Murphy 已提交
529 530 531 532
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;
533 534 535

	size = PAGE_ALIGN(size);

R
Robin Murphy 已提交
536 537 538 539 540 541
	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

542
	if (!gfpflags_allow_blocking(gfp)) {
R
Robin Murphy 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

558
		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
R
Robin Murphy 已提交
559 560 561 562 563 564 565
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size), gfp);
		if (!page)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (iommu_dma_mapping_error(dev, *handle)) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
		if (!coherent)
			__dma_flush_area(page_to_virt(page), iosize);

		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot,
						   __builtin_return_address(0));
		if (!addr) {
			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
		}
	} else {
		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
		struct page **pages;

		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
					handle, flush_page);
		if (!pages)
			return NULL;

		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
					      __builtin_return_address(0));
		if (!addr)
			iommu_dma_free(dev, pages, iosize, handle);
R
Robin Murphy 已提交
605 606 607 608 609
	}
	return addr;
}

static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
610
			       dma_addr_t handle, unsigned long attrs)
R
Robin Murphy 已提交
611
{
612 613 614
	size_t iosize = size;

	size = PAGE_ALIGN(size);
R
Robin Murphy 已提交
615
	/*
616 617
	 * @cpu_addr will be one of 4 things depending on how it was allocated:
	 * - A remapped array of pages for contiguous allocations.
R
Robin Murphy 已提交
618 619 620 621 622 623 624 625 626
	 * - A remapped array of pages from iommu_dma_alloc(), for all
	 *   non-atomic allocations.
	 * - A non-cacheable alias from the atomic pool, for atomic
	 *   allocations by non-coherent devices.
	 * - A normal lowmem address, for atomic allocations by
	 *   coherent devices.
	 * Hence how dodgy the below logic looks...
	 */
	if (__in_atomic_pool(cpu_addr, size)) {
627
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
628
		__free_from_pool(cpu_addr, size);
629 630 631 632 633 634
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		struct page *page = vmalloc_to_page(cpu_addr);

		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
R
Robin Murphy 已提交
635 636 637 638 639
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
640
		iommu_dma_free(dev, area->pages, iosize, &handle);
R
Robin Murphy 已提交
641 642
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
	} else {
643
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
R
Robin Murphy 已提交
644 645 646 647 648 649
		__free_pages(virt_to_page(cpu_addr), get_order(size));
	}
}

static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
650
			      unsigned long attrs)
R
Robin Murphy 已提交
651 652 653 654 655 656 657
{
	struct vm_struct *area;
	int ret;

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					     is_device_dma_coherent(dev));

658
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
R
Robin Murphy 已提交
659 660
		return ret;

661 662 663 664 665 666 667 668 669
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
		return __swiotlb_mmap_pfn(vma, pfn, size);
	}

R
Robin Murphy 已提交
670 671 672 673 674 675 676 677 678
	area = find_vm_area(cpu_addr);
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return iommu_dma_mmap(area->pages, size, vma);
}

static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t dma_addr,
679
			       size_t size, unsigned long attrs)
R
Robin Murphy 已提交
680 681 682 683
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area = find_vm_area(cpu_addr);

684 685 686 687 688 689 690 691 692
	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		/*
		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
		 * hence in the vmalloc space.
		 */
		struct page *page = vmalloc_to_page(cpu_addr);
		return __swiotlb_get_sgtable_page(sgt, page, size);
	}

R
Robin Murphy 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	if (WARN_ON(!area || !area->pages))
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
					 GFP_KERNEL);
}

static void __iommu_sync_single_for_cpu(struct device *dev,
					dma_addr_t dev_addr, size_t size,
					enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_unmap_area(phys_to_virt(phys), size, dir);
}

static void __iommu_sync_single_for_device(struct device *dev,
					   dma_addr_t dev_addr, size_t size,
					   enum dma_data_direction dir)
{
	phys_addr_t phys;

	if (is_device_dma_coherent(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
	__dma_map_area(phys_to_virt(phys), size, dir);
}

static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
729
				   unsigned long attrs)
R
Robin Murphy 已提交
730 731
{
	bool coherent = is_device_dma_coherent(dev);
732
	int prot = dma_info_to_prot(dir, coherent, attrs);
R
Robin Murphy 已提交
733 734 735
	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);

	if (!iommu_dma_mapping_error(dev, dev_addr) &&
736
	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
737 738 739 740 741 742 743
		__iommu_sync_single_for_device(dev, dev_addr, size, dir);

	return dev_addr;
}

static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
			       size_t size, enum dma_data_direction dir,
744
			       unsigned long attrs)
R
Robin Murphy 已提交
745
{
746
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);

	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
}

static void __iommu_sync_sg_for_cpu(struct device *dev,
				    struct scatterlist *sgl, int nelems,
				    enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_unmap_area(sg_virt(sg), sg->length, dir);
}

static void __iommu_sync_sg_for_device(struct device *dev,
				       struct scatterlist *sgl, int nelems,
				       enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	if (is_device_dma_coherent(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
		__dma_map_area(sg_virt(sg), sg->length, dir);
}

static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
				int nelems, enum dma_data_direction dir,
782
				unsigned long attrs)
R
Robin Murphy 已提交
783 784 785
{
	bool coherent = is_device_dma_coherent(dev);

786
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
787 788 789
		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);

	return iommu_dma_map_sg(dev, sgl, nelems,
790
				dma_info_to_prot(dir, coherent, attrs));
R
Robin Murphy 已提交
791 792 793 794 795
}

static void __iommu_unmap_sg_attrs(struct device *dev,
				   struct scatterlist *sgl, int nelems,
				   enum dma_data_direction dir,
796
				   unsigned long attrs)
R
Robin Murphy 已提交
797
{
798
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
R
Robin Murphy 已提交
799 800 801 802 803
		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);

	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}

804
static const struct dma_map_ops iommu_dma_ops = {
R
Robin Murphy 已提交
805 806 807 808 809 810 811 812 813 814 815 816
	.alloc = __iommu_alloc_attrs,
	.free = __iommu_free_attrs,
	.mmap = __iommu_mmap_attrs,
	.get_sgtable = __iommu_get_sgtable,
	.map_page = __iommu_map_page,
	.unmap_page = __iommu_unmap_page,
	.map_sg = __iommu_map_sg_attrs,
	.unmap_sg = __iommu_unmap_sg_attrs,
	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
	.sync_single_for_device = __iommu_sync_single_for_device,
	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
	.sync_sg_for_device = __iommu_sync_sg_for_device,
817 818
	.map_resource = iommu_dma_map_resource,
	.unmap_resource = iommu_dma_unmap_resource,
R
Robin Murphy 已提交
819 820 821
	.mapping_error = iommu_dma_mapping_error,
};

822 823 824 825 826
static int __init __iommu_dma_init(void)
{
	return iommu_dma_init();
}
arch_initcall(__iommu_dma_init);
R
Robin Murphy 已提交
827

828 829
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
				  const struct iommu_ops *ops)
R
Robin Murphy 已提交
830
{
831 832 833 834
	struct iommu_domain *domain;

	if (!ops)
		return;
R
Robin Murphy 已提交
835 836

	/*
837 838
	 * The IOMMU core code allocates the default DMA domain, which the
	 * underlying IOMMU driver needs to support via the dma-iommu layer.
R
Robin Murphy 已提交
839
	 */
840 841
	domain = iommu_get_domain_for_dev(dev);

842 843 844 845 846 847 848
	if (!domain)
		goto out_err;

	if (domain->type == IOMMU_DOMAIN_DMA) {
		if (iommu_dma_init_domain(domain, dma_base, size, dev))
			goto out_err;

849
		dev->dma_ops = &iommu_dma_ops;
R
Robin Murphy 已提交
850 851
	}

852 853
	return;

854
out_err:
855
	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
856
		 dev_name(dev));
R
Robin Murphy 已提交
857 858
}

R
Robin Murphy 已提交
859 860
void arch_teardown_dma_ops(struct device *dev)
{
861
	dev->dma_ops = NULL;
R
Robin Murphy 已提交
862 863
}

R
Robin Murphy 已提交
864 865 866
#else

static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
867
				  const struct iommu_ops *iommu)
R
Robin Murphy 已提交
868 869 870 871
{ }

#endif  /* CONFIG_IOMMU_DMA */

R
Robin Murphy 已提交
872
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
873
			const struct iommu_ops *iommu, bool coherent)
R
Robin Murphy 已提交
874
{
875
	if (!dev->dma_ops)
876
		dev->dma_ops = &arm64_swiotlb_dma_ops;
R
Robin Murphy 已提交
877 878 879

	dev->archdata.dma_coherent = coherent;
	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
880 881 882 883 884 885 886

#ifdef CONFIG_XEN
	if (xen_initial_domain()) {
		dev->archdata.dev_dma_ops = dev->dma_ops;
		dev->dma_ops = xen_dma_ops;
	}
#endif
R
Robin Murphy 已提交
887
}