dma-mapping.c 52.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  linux/arch/arm/mm/dma-mapping.c
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11
 *
 *  Copyright (C) 2000-2004 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  DMA uncached mapping support.
 */
12
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/mm.h>
15
#include <linux/gfp.h>
L
Linus Torvalds 已提交
16 17 18 19 20
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
21
#include <linux/dma-contiguous.h>
22
#include <linux/highmem.h>
23
#include <linux/memblock.h>
24
#include <linux/slab.h>
25
#include <linux/iommu.h>
26
#include <linux/io.h>
27
#include <linux/vmalloc.h>
28
#include <linux/sizes.h>
L
Linus Torvalds 已提交
29

30
#include <asm/memory.h>
31
#include <asm/highmem.h>
L
Linus Torvalds 已提交
32 33
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
34
#include <asm/mach/arch.h>
35
#include <asm/dma-iommu.h>
36 37 38
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
39

40 41
#include "mm.h"

42 43 44 45 46 47 48 49 50 51 52 53
/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 */
54
static void __dma_page_cpu_to_dev(struct page *, unsigned long,
55
		size_t, enum dma_data_direction);
56
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
57 58
		size_t, enum dma_data_direction);

59 60 61 62 63 64 65 66 67 68 69 70 71 72
/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_page().
 */
73
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
74 75 76
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
R
Rob Herring 已提交
77
	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
78 79
		__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
80 81
}

R
Rob Herring 已提交
82 83 84 85 86 87 88
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102
/**
 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
 *
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
103
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
104 105 106
		size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
R
Rob Herring 已提交
107
	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
108 109
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
				      handle & ~PAGE_MASK, size, dir);
110 111
}

112
static void arm_dma_sync_single_for_cpu(struct device *dev,
113 114 115 116
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
R
Rob Herring 已提交
117
	__dma_page_dev_to_cpu(page, offset, size, dir);
118 119
}

120
static void arm_dma_sync_single_for_device(struct device *dev,
121 122 123 124
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
R
Rob Herring 已提交
125
	__dma_page_cpu_to_dev(page, offset, size, dir);
126 127 128
}

struct dma_map_ops arm_dma_ops = {
129 130 131
	.alloc			= arm_dma_alloc,
	.free			= arm_dma_free,
	.mmap			= arm_dma_mmap,
132
	.get_sgtable		= arm_dma_get_sgtable,
133 134 135 136 137 138 139 140 141 142 143 144
	.map_page		= arm_dma_map_page,
	.unmap_page		= arm_dma_unmap_page,
	.map_sg			= arm_dma_map_sg,
	.unmap_sg		= arm_dma_unmap_sg,
	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
	.sync_single_for_device	= arm_dma_sync_single_for_device,
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_dma_ops);

R
Rob Herring 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
				  dma_addr_t handle, struct dma_attrs *attrs);

struct dma_map_ops arm_coherent_dma_ops = {
	.alloc			= arm_coherent_dma_alloc,
	.free			= arm_coherent_dma_free,
	.mmap			= arm_dma_mmap,
	.get_sgtable		= arm_dma_get_sgtable,
	.map_page		= arm_coherent_dma_map_page,
	.map_sg			= arm_dma_map_sg,
	.set_dma_mask		= arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
	unsigned long max_dma_pfn;

	/*
	 * If the mask allows for more memory than we can address,
	 * and we actually have that much memory, then we must
	 * indicate that DMA to this device is not supported.
	 */
	if (sizeof(mask) != sizeof(dma_addr_t) &&
	    mask > (dma_addr_t)~0 &&
	    dma_to_pfn(dev, ~0) < max_pfn) {
		if (warn) {
			dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
				 mask);
			dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
		}
		return 0;
	}

	max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);

	/*
	 * Translate the device's DMA mask to a PFN limit.  This
	 * PFN number includes the page which we can DMA to.
	 */
	if (dma_to_pfn(dev, mask) < max_dma_pfn) {
		if (warn)
			dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
				 mask,
				 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
				 max_dma_pfn + 1);
		return 0;
	}

	return 1;
}

199 200
static u64 get_coherent_dma_mask(struct device *dev)
{
201
	u64 mask = (u64)DMA_BIT_MASK(32);
202 203 204 205 206 207 208 209 210 211 212 213 214

	if (dev) {
		mask = dev->coherent_dma_mask;

		/*
		 * Sanity check the DMA mask - it must be non-zero, and
		 * must be able to be satisfied by a DMA allocation.
		 */
		if (mask == 0) {
			dev_warn(dev, "coherent DMA mask is unset\n");
			return 0;
		}

215
		if (!__dma_supported(dev, mask, true))
216 217
			return 0;
	}
L
Linus Torvalds 已提交
218

219 220 221
	return mask;
}

222 223 224 225 226 227
static void __dma_clear_buffer(struct page *page, size_t size)
{
	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
228 229 230 231 232 233 234 235 236 237 238 239 240 241
	if (PageHighMem(page)) {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	} else {
		void *ptr = page_address(page);
242 243 244 245
		memset(ptr, 0, size);
		dmac_flush_range(ptr, ptr + size);
		outer_flush_range(__pa(ptr), __pa(ptr) + size);
	}
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

268
	__dma_clear_buffer(page, size);
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

	return page;
}

/*
 * Free a DMA buffer.  'size' must be page aligned.
 */
static void __dma_free_buffer(struct page *page, size_t size)
{
	struct page *e = page + (size >> PAGE_SHIFT);

	while (page < e) {
		__free_page(page);
		page++;
	}
}

286
#ifdef CONFIG_MMU
287
#ifdef CONFIG_HUGETLB_PAGE
288
#warning ARM Coherent DMA allocator does not (yet) support huge TLB
289
#endif
290

291
static void *__alloc_from_contiguous(struct device *dev, size_t size,
292 293
				     pgprot_t prot, struct page **ret_page,
				     const void *caller);
294

295 296 297
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
				 pgprot_t prot, struct page **ret_page,
				 const void *caller);
298

299 300 301
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
	const void *caller)
302
{
303 304
	struct vm_struct *area;
	unsigned long addr;
305

306 307 308 309 310 311 312 313 314 315
	/*
	 * DMA allocation can be mapped to user space, so lets
	 * set VM_USERMAP flags too.
	 */
	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
				  caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));
316

317 318 319 320 321
	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}
	return (void *)addr;
322
}
L
Linus Torvalds 已提交
323

324
static void __dma_free_remap(void *cpu_addr, size_t size)
325
{
326 327 328 329 330
	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
	struct vm_struct *area = find_vm_area(cpu_addr);
	if (!area || (area->flags & flags) != flags) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		return;
331
	}
332 333
	unmap_kernel_range((unsigned long)cpu_addr, size);
	vunmap(cpu_addr);
334 335
}

336 337
#define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K

338 339 340 341 342 343
struct dma_pool {
	size_t size;
	spinlock_t lock;
	unsigned long *bitmap;
	unsigned long nr_pages;
	void *vaddr;
344
	struct page **pages;
345 346
};

347
static struct dma_pool atomic_pool = {
348
	.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
349
};
350 351 352

static int __init early_coherent_pool(char *p)
{
353
	atomic_pool.size = memparse(p, &p);
354 355 356 357
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
void __init init_dma_coherent_pool_size(unsigned long size)
{
	/*
	 * Catch any attempt to set the pool size too late.
	 */
	BUG_ON(atomic_pool.vaddr);

	/*
	 * Set architecture specific coherent pool size only if
	 * it has not been changed by kernel command line parameter.
	 */
	if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
		atomic_pool.size = size;
}

373 374 375
/*
 * Initialise the coherent pool for atomic allocations.
 */
376
static int __init atomic_pool_init(void)
377
{
378
	struct dma_pool *pool = &atomic_pool;
379
	pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
380
	gfp_t gfp = GFP_KERNEL | GFP_DMA;
381 382
	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
	unsigned long *bitmap;
383
	struct page *page;
384
	struct page **pages;
385
	void *ptr;
386
	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
387

388 389 390
	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
	if (!bitmap)
		goto no_bitmap;
391

392 393 394 395
	pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
	if (!pages)
		goto no_pages;

396
	if (IS_ENABLED(CONFIG_DMA_CMA))
397 398
		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
					      atomic_pool_init);
399
	else
400 401
		ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
					   atomic_pool_init);
402
	if (ptr) {
403 404 405 406 407
		int i;

		for (i = 0; i < nr_pages; i++)
			pages[i] = page + i;

408 409
		spin_lock_init(&pool->lock);
		pool->vaddr = ptr;
410
		pool->pages = pages;
411 412 413 414
		pool->bitmap = bitmap;
		pool->nr_pages = nr_pages;
		pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
		       (unsigned)pool->size / 1024);
415 416
		return 0;
	}
417 418

	kfree(pages);
419
no_pages:
420 421 422 423
	kfree(bitmap);
no_bitmap:
	pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
	       (unsigned)pool->size / 1024);
424 425 426 427 428
	return -ENOMEM;
}
/*
 * CMA is activated by core_initcall, so we must be called after it.
 */
429
postcore_initcall(atomic_pool_init);
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458

struct dma_contig_early_reserve {
	phys_addr_t base;
	unsigned long size;
};

static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;

static int dma_mmu_remap_num __initdata;

void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
	dma_mmu_remap[dma_mmu_remap_num].base = base;
	dma_mmu_remap[dma_mmu_remap_num].size = size;
	dma_mmu_remap_num++;
}

void __init dma_contiguous_remap(void)
{
	int i;
	for (i = 0; i < dma_mmu_remap_num; i++) {
		phys_addr_t start = dma_mmu_remap[i].base;
		phys_addr_t end = start + dma_mmu_remap[i].size;
		struct map_desc map;
		unsigned long addr;

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
459
			continue;
460 461 462 463 464 465 466 467 468 469

		map.pfn = __phys_to_pfn(start);
		map.virtual = __phys_to_virt(start);
		map.length = end - start;
		map.type = MT_MEMORY_DMA_READY;

		/*
		 * Clear previous low-memory mapping
		 */
		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
470
		     addr += PMD_SIZE)
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
			pmd_clear(pmd_off_k(addr));

		iotable_init(&map, 1);
	}
}

static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
			    void *data)
{
	struct page *page = virt_to_page(addr);
	pgprot_t prot = *(pgprot_t *)data;

	set_pte_ext(pte, mk_pte(page, prot), 0);
	return 0;
}

static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
{
	unsigned long start = (unsigned long) page_address(page);
	unsigned end = start + size;

	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
	flush_tlb_kernel_range(start, end);
}

static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
				 pgprot_t prot, struct page **ret_page,
				 const void *caller)
{
	struct page *page;
	void *ptr;
	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
	if (!ptr) {
		__dma_free_buffer(page, size);
		return NULL;
	}

	*ret_page = page;
	return ptr;
}

516
static void *__alloc_from_pool(size_t size, struct page **ret_page)
517
{
518 519 520 521 522
	struct dma_pool *pool = &atomic_pool;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned int pageno;
	unsigned long flags;
	void *ptr = NULL;
523
	unsigned long align_mask;
524

525 526
	if (!pool->vaddr) {
		WARN(1, "coherent pool not initialised!\n");
527 528 529 530 531 532 533 534
		return NULL;
	}

	/*
	 * Align the region allocation - allocations from pool are rather
	 * small, so align them to their order in pages, minimum is a page
	 * size. This helps reduce fragmentation of the DMA space.
	 */
535
	align_mask = (1 << get_order(size)) - 1;
536 537 538

	spin_lock_irqsave(&pool->lock, flags);
	pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
539
					    0, count, align_mask);
540 541 542
	if (pageno < pool->nr_pages) {
		bitmap_set(pool->bitmap, pageno, count);
		ptr = pool->vaddr + PAGE_SIZE * pageno;
543
		*ret_page = pool->pages[pageno];
544 545 546 547
	} else {
		pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
			    "Please increase it with coherent_pool= kernel parameter!\n",
			    (unsigned)pool->size / 1024);
548
	}
549 550 551
	spin_unlock_irqrestore(&pool->lock, flags);

	return ptr;
552 553
}

554 555 556 557 558 559 560
static bool __in_atomic_pool(void *start, size_t size)
{
	struct dma_pool *pool = &atomic_pool;
	void *end = start + size;
	void *pool_start = pool->vaddr;
	void *pool_end = pool->vaddr + pool->size;

561
	if (start < pool_start || start >= pool_end)
562 563 564 565 566 567 568 569 570 571 572
		return false;

	if (end <= pool_end)
		return true;

	WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
	     start, end - 1, pool_start, pool_end - 1);

	return false;
}

573
static int __free_from_pool(void *start, size_t size)
574
{
575 576 577
	struct dma_pool *pool = &atomic_pool;
	unsigned long pageno, count;
	unsigned long flags;
578

579
	if (!__in_atomic_pool(start, size))
580 581
		return 0;

582 583 584 585 586 587 588
	pageno = (start - pool->vaddr) >> PAGE_SHIFT;
	count = size >> PAGE_SHIFT;

	spin_lock_irqsave(&pool->lock, flags);
	bitmap_clear(pool->bitmap, pageno, count);
	spin_unlock_irqrestore(&pool->lock, flags);

589 590 591 592
	return 1;
}

static void *__alloc_from_contiguous(struct device *dev, size_t size,
593 594
				     pgprot_t prot, struct page **ret_page,
				     const void *caller)
595 596 597 598
{
	unsigned long order = get_order(size);
	size_t count = size >> PAGE_SHIFT;
	struct page *page;
599
	void *ptr;
600 601 602 603 604 605 606

	page = dma_alloc_from_contiguous(dev, count, order);
	if (!page)
		return NULL;

	__dma_clear_buffer(page, size);

607 608 609 610 611 612 613 614 615 616
	if (PageHighMem(page)) {
		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
		if (!ptr) {
			dma_release_from_contiguous(dev, page, count);
			return NULL;
		}
	} else {
		__dma_remap(page, size, prot);
		ptr = page_address(page);
	}
617
	*ret_page = page;
618
	return ptr;
619 620 621
}

static void __free_from_contiguous(struct device *dev, struct page *page,
622
				   void *cpu_addr, size_t size)
623
{
624 625 626
	if (PageHighMem(page))
		__dma_free_remap(cpu_addr, size);
	else
627
		__dma_remap(page, size, PAGE_KERNEL);
628 629 630
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}

631 632 633 634 635 636 637 638
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
{
	prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
			    pgprot_writecombine(prot) :
			    pgprot_dmacoherent(prot);
	return prot;
}

639 640
#define nommu() 0

641
#else	/* !CONFIG_MMU */
642

643 644
#define nommu() 1

645
#define __get_dma_pgprot(attrs, prot)	__pgprot(0)
646
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL
647
#define __alloc_from_pool(size, ret_page)			NULL
648
#define __alloc_from_contiguous(dev, size, prot, ret, c)	NULL
649
#define __free_from_pool(cpu_addr, size)			0
650
#define __free_from_contiguous(dev, page, cpu_addr, size)	do { } while (0)
651
#define __dma_free_remap(cpu_addr, size)			do { } while (0)
652 653 654

#endif	/* CONFIG_MMU */

655 656
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
				   struct page **ret_page)
657
{
658 659 660 661 662 663 664 665 666 667 668 669
	struct page *page;
	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	*ret_page = page;
	return page_address(page);
}



static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
R
Rob Herring 已提交
670
			 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
671 672
{
	u64 mask = get_coherent_dma_mask(dev);
673
	struct page *page = NULL;
674
	void *addr;
675

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
#ifdef CONFIG_DMA_API_DEBUG
	u64 limit = (mask + 1) & ~mask;
	if (limit && size >= limit) {
		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
			size, mask);
		return NULL;
	}
#endif

	if (!mask)
		return NULL;

	if (mask < 0xffffffffULL)
		gfp |= GFP_DMA;

691 692 693 694 695 696 697 698 699
	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
	 * with __GFP_COMP being passed to split_page() which cannot
	 * handle them.  The real problem is that this flag probably
	 * should be 0 on ARM as it is not supported on this
	 * platform; see CONFIG_HUGETLBFS.
	 */
	gfp &= ~(__GFP_COMP);

700
	*handle = DMA_ERROR_CODE;
701
	size = PAGE_ALIGN(size);
702

R
Rob Herring 已提交
703
	if (is_coherent || nommu())
704
		addr = __alloc_simple_buffer(dev, size, gfp, &page);
705
	else if (!(gfp & __GFP_WAIT))
706
		addr = __alloc_from_pool(size, &page);
707
	else if (!IS_ENABLED(CONFIG_DMA_CMA))
708
		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
709
	else
710
		addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
711

712
	if (addr)
713
		*handle = pfn_to_dma(dev, page_to_pfn(page));
714

715 716
	return addr;
}
L
Linus Torvalds 已提交
717 718 719 720 721

/*
 * Allocate DMA-coherent memory space and return both the kernel remapped
 * virtual and bus address for that space.
 */
722 723
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		    gfp_t gfp, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
724
{
725
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
726 727 728 729 730
	void *memory;

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

R
Rob Herring 已提交
731 732 733 734 735 736 737
	return __dma_alloc(dev, size, handle, gfp, prot, false,
			   __builtin_return_address(0));
}

static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
738
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
R
Rob Herring 已提交
739 740 741 742 743 744
	void *memory;

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

	return __dma_alloc(dev, size, handle, gfp, prot, true,
745
			   __builtin_return_address(0));
L
Linus Torvalds 已提交
746 747 748
}

/*
749
 * Create userspace mapping for the DMA-coherent memory.
L
Linus Torvalds 已提交
750
 */
751 752 753
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
754
{
755 756
	int ret = -ENXIO;
#ifdef CONFIG_MMU
757 758
	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
759
	unsigned long pfn = dma_to_pfn(dev, dma_addr);
760 761
	unsigned long off = vma->vm_pgoff;

762 763
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);

764 765 766
	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

767 768 769 770 771 772
	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      vma->vm_end - vma->vm_start,
				      vma->vm_page_prot);
	}
773
#endif	/* CONFIG_MMU */
L
Linus Torvalds 已提交
774 775 776 777 778

	return ret;
}

/*
779
 * Free a buffer as defined by the above mapping.
L
Linus Torvalds 已提交
780
 */
R
Rob Herring 已提交
781 782 783
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
			   dma_addr_t handle, struct dma_attrs *attrs,
			   bool is_coherent)
L
Linus Torvalds 已提交
784
{
785
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
786

787 788 789
	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
		return;

790 791
	size = PAGE_ALIGN(size);

R
Rob Herring 已提交
792
	if (is_coherent || nommu()) {
793
		__dma_free_buffer(page, size);
794 795
	} else if (__free_from_pool(cpu_addr, size)) {
		return;
796
	} else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
797
		__dma_free_remap(cpu_addr, size);
798 799 800 801 802 803
		__dma_free_buffer(page, size);
	} else {
		/*
		 * Non-atomic allocations cannot be freed with IRQs disabled
		 */
		WARN_ON(irqs_disabled());
804
		__free_from_contiguous(dev, page, cpu_addr, size);
805
	}
L
Linus Torvalds 已提交
806
}
807

R
Rob Herring 已提交
808 809 810 811 812 813 814 815 816 817 818 819
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
		  dma_addr_t handle, struct dma_attrs *attrs)
{
	__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
}

static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
				  dma_addr_t handle, struct dma_attrs *attrs)
{
	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		 void *cpu_addr, dma_addr_t handle, size_t size,
		 struct dma_attrs *attrs)
{
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return 0;
}

835
static void dma_cache_maint_page(struct page *page, unsigned long offset,
836 837
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
838
{
839 840 841 842 843 844
	unsigned long pfn;
	size_t left = size;

	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
	offset %= PAGE_SIZE;

845 846 847 848 849 850 851 852
	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */
	do {
		size_t len = left;
853 854
		void *vaddr;

855 856
		page = pfn_to_page(pfn);

857
		if (PageHighMem(page)) {
858
			if (len + offset > PAGE_SIZE)
859
				len = PAGE_SIZE - offset;
860 861

			if (cache_is_vipt_nonaliasing()) {
862
				vaddr = kmap_atomic(page);
863
				op(vaddr + offset, len, dir);
864
				kunmap_atomic(vaddr);
865 866 867 868 869 870
			} else {
				vaddr = kmap_high_get(page);
				if (vaddr) {
					op(vaddr + offset, len, dir);
					kunmap_high(page);
				}
871
			}
872 873
		} else {
			vaddr = page_address(page) + offset;
874
			op(vaddr, len, dir);
875 876
		}
		offset = 0;
877
		pfn++;
878 879 880
		left -= len;
	} while (left);
}
881

882 883 884 885 886 887 888
/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
889 890
	size_t size, enum dma_data_direction dir)
{
891 892
	unsigned long paddr;

893
	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
894 895

	paddr = page_to_phys(page) + off;
896 897 898 899 900 901
	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(paddr, paddr + size);
	} else {
		outer_clean_range(paddr, paddr + size);
	}
	/* FIXME: non-speculating: flush on bidirectional mappings? */
902 903
}

904
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
905 906
	size_t size, enum dma_data_direction dir)
{
907 908 909 910 911 912 913
	unsigned long paddr = page_to_phys(page) + off;

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE)
		outer_inv_range(paddr, paddr + size);

914
	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
915 916

	/*
917
	 * Mark the D-cache clean for these pages to avoid extra flushing.
918
	 */
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
		unsigned long pfn;
		size_t left = size;

		pfn = page_to_pfn(page) + off / PAGE_SIZE;
		off %= PAGE_SIZE;
		if (off) {
			pfn++;
			left -= PAGE_SIZE - off;
		}
		while (left >= PAGE_SIZE) {
			page = pfn_to_page(pfn++);
			set_bit(PG_dcache_clean, &page->flags);
			left -= PAGE_SIZE;
		}
	}
935
}
936

937
/**
938
 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
939 940 941 942 943 944 945 946 947 948 949 950 951 952
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
953 954
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir, struct dma_attrs *attrs)
955
{
956
	struct dma_map_ops *ops = get_dma_ops(dev);
957
	struct scatterlist *s;
958
	int i, j;
959 960

	for_each_sg(sg, s, nents, i) {
961 962 963
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		s->dma_length = s->length;
#endif
964 965
		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
						s->length, dir, attrs);
966 967
		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
968 969
	}
	return nents;
970 971 972

 bad_mapping:
	for_each_sg(sg, s, i, j)
973
		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
974
	return 0;
975 976 977
}

/**
978
 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
979 980
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
981
 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
982 983 984 985 986
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 *
 * Unmap a set of streaming mode DMA translations.  Again, CPU access
 * rules concerning calls here are the same as for dma_unmap_single().
 */
987 988
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir, struct dma_attrs *attrs)
989
{
990
	struct dma_map_ops *ops = get_dma_ops(dev);
991 992 993
	struct scatterlist *s;

	int i;
994

995
	for_each_sg(sg, s, nents, i)
996
		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
997 998 999
}

/**
1000
 * arm_dma_sync_sg_for_cpu
1001 1002 1003 1004 1005
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
1006
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1007 1008
			int nents, enum dma_data_direction dir)
{
1009
	struct dma_map_ops *ops = get_dma_ops(dev);
1010 1011 1012
	struct scatterlist *s;
	int i;

1013 1014 1015
	for_each_sg(sg, s, nents, i)
		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
					 dir);
1016 1017 1018
}

/**
1019
 * arm_dma_sync_sg_for_device
1020 1021 1022 1023 1024
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
1025
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1026 1027
			int nents, enum dma_data_direction dir)
{
1028
	struct dma_map_ops *ops = get_dma_ops(dev);
1029 1030 1031
	struct scatterlist *s;
	int i;

1032 1033 1034
	for_each_sg(sg, s, nents, i)
		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
					    dir);
1035
}
1036

1037 1038 1039 1040 1041 1042 1043 1044
/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
 */
int dma_supported(struct device *dev, u64 mask)
{
1045
	return __dma_supported(dev, mask, false);
1046 1047 1048
}
EXPORT_SYMBOL(dma_supported);

1049
int arm_dma_set_mask(struct device *dev, u64 dma_mask)
1050 1051 1052 1053 1054 1055 1056 1057 1058
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

1059 1060 1061 1062 1063 1064 1065 1066
#define PREALLOC_DMA_DEBUG_ENTRIES	4096

static int __init dma_debug_do_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(dma_debug_do_init);
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079

#ifdef CONFIG_ARM_DMA_USE_IOMMU

/* IOMMU */

static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
				      size_t size)
{
	unsigned int order = get_order(size);
	unsigned int align = 0;
	unsigned int count, start;
	unsigned long flags;

1080 1081 1082
	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
		 (1 << mapping->order) - 1) >> mapping->order;

	if (order > mapping->order)
		align = (1 << (order - mapping->order)) - 1;

	spin_lock_irqsave(&mapping->lock, flags);
	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
					   count, align);
	if (start > mapping->bits) {
		spin_unlock_irqrestore(&mapping->lock, flags);
		return DMA_ERROR_CODE;
	}

	bitmap_set(mapping->bitmap, start, count);
	spin_unlock_irqrestore(&mapping->lock, flags);

	return mapping->base + (start << (mapping->order + PAGE_SHIFT));
}

static inline void __free_iova(struct dma_iommu_mapping *mapping,
			       dma_addr_t addr, size_t size)
{
	unsigned int start = (addr - mapping->base) >>
			     (mapping->order + PAGE_SHIFT);
	unsigned int count = ((size >> PAGE_SHIFT) +
			      (1 << mapping->order) - 1) >> mapping->order;
	unsigned long flags;

	spin_lock_irqsave(&mapping->lock, flags);
	bitmap_clear(mapping->bitmap, start, count);
	spin_unlock_irqrestore(&mapping->lock, flags);
}

1117 1118
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
					  gfp_t gfp, struct dma_attrs *attrs)
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
{
	struct page **pages;
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	int i = 0;

	if (array_size <= PAGE_SIZE)
		pages = kzalloc(array_size, gfp);
	else
		pages = vzalloc(array_size);
	if (!pages)
		return NULL;

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
	{
		unsigned long order = get_order(size);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (!page)
			goto error;

		__dma_clear_buffer(page, size);

		for (i = 0; i < count; i++)
			pages[i] = page + i;

		return pages;
	}

1149 1150 1151 1152 1153
	/*
	 * IOMMU can map any pages, so himem can also be used here
	 */
	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;

1154
	while (count) {
1155
		int j, order = __fls(count);
1156

1157
		pages[i] = alloc_pages(gfp, order);
1158
		while (!pages[i] && order)
1159
			pages[i] = alloc_pages(gfp, --order);
1160 1161 1162
		if (!pages[i])
			goto error;

1163
		if (order) {
1164
			split_page(pages[i], order);
1165 1166 1167 1168
			j = 1 << order;
			while (--j)
				pages[i + j] = pages[i] + j;
		}
1169 1170 1171 1172 1173 1174 1175 1176

		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
		i += 1 << order;
		count -= 1 << order;
	}

	return pages;
error:
1177
	while (i--)
1178 1179
		if (pages[i])
			__free_pages(pages[i], 0);
1180
	if (array_size <= PAGE_SIZE)
1181 1182 1183 1184 1185 1186
		kfree(pages);
	else
		vfree(pages);
	return NULL;
}

1187 1188
static int __iommu_free_buffer(struct device *dev, struct page **pages,
			       size_t size, struct dma_attrs *attrs)
1189 1190 1191 1192
{
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	int i;
1193 1194 1195 1196 1197 1198 1199 1200 1201

	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
		dma_release_from_contiguous(dev, pages[0], count);
	} else {
		for (i = 0; i < count; i++)
			if (pages[i])
				__free_pages(pages[i], 0);
	}

1202
	if (array_size <= PAGE_SIZE)
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
		kfree(pages);
	else
		vfree(pages);
	return 0;
}

/*
 * Create a CPU mapping for a specified pages
 */
static void *
1213 1214
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
		    const void *caller)
1215
{
1216 1217 1218
	unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct vm_struct *area;
	unsigned long p;
1219

1220 1221 1222
	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
				  caller);
	if (!area)
1223 1224
		return NULL;

1225 1226 1227
	area->pages = pages;
	area->nr_pages = nr_pages;
	p = (unsigned long)area->addr;
1228

1229 1230 1231 1232 1233
	for (i = 0; i < nr_pages; i++) {
		phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
		if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
			goto err;
		p += PAGE_SIZE;
1234
	}
1235 1236 1237 1238
	return area->addr;
err:
	unmap_kernel_range((unsigned long)area->addr, size);
	vunmap(area->addr);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	return NULL;
}

/*
 * Create a mapping in device IO address space for specified pages
 */
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t dma_addr, iova;
	int i, ret = DMA_ERROR_CODE;

	dma_addr = __alloc_iova(mapping, size);
	if (dma_addr == DMA_ERROR_CODE)
		return dma_addr;

	iova = dma_addr;
	for (i = 0; i < count; ) {
		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
		phys_addr_t phys = page_to_phys(pages[i]);
		unsigned int len, j;

		for (j = i + 1; j < count; j++, next_pfn++)
			if (page_to_pfn(pages[j]) != next_pfn)
				break;

		len = (j - i) << PAGE_SHIFT;
1268 1269
		ret = iommu_map(mapping->domain, iova, phys, len,
				IOMMU_READ|IOMMU_WRITE);
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
		if (ret < 0)
			goto fail;
		iova += len;
		i = j;
	}
	return dma_addr;
fail:
	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
	__free_iova(mapping, dma_addr, size);
	return DMA_ERROR_CODE;
}

static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;

	/*
	 * add optional in-page offset from iova to size and align
	 * result to page size
	 */
	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
	iova &= PAGE_MASK;

	iommu_unmap(mapping->domain, iova, size);
	__free_iova(mapping, iova, size);
	return 0;
}

1298 1299 1300 1301 1302 1303 1304 1305 1306
static struct page **__atomic_get_pages(void *addr)
{
	struct dma_pool *pool = &atomic_pool;
	struct page **pages = pool->pages;
	int offs = (addr - pool->vaddr) >> PAGE_SHIFT;

	return pages + offs;
}

1307
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1308 1309 1310
{
	struct vm_struct *area;

1311 1312 1313
	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
		return __atomic_get_pages(cpu_addr);

1314 1315 1316
	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
		return cpu_addr;

1317 1318 1319 1320 1321 1322
	area = find_vm_area(cpu_addr);
	if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
		return area->pages;
	return NULL;
}

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
				  dma_addr_t *handle)
{
	struct page *page;
	void *addr;

	addr = __alloc_from_pool(size, &page);
	if (!addr)
		return NULL;

	*handle = __iommu_create_mapping(dev, &page, size);
	if (*handle == DMA_ERROR_CODE)
		goto err_mapping;

	return addr;

err_mapping:
	__free_from_pool(addr, size);
	return NULL;
}

1344
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1345 1346 1347
				dma_addr_t handle, size_t size)
{
	__iommu_remove_mapping(dev, handle, size);
1348
	__free_from_pool(cpu_addr, size);
1349 1350
}

1351 1352 1353
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
1354
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1355 1356 1357 1358 1359 1360
	struct page **pages;
	void *addr = NULL;

	*handle = DMA_ERROR_CODE;
	size = PAGE_ALIGN(size);

1361
	if (!(gfp & __GFP_WAIT))
1362 1363
		return __iommu_alloc_atomic(dev, size, handle);

1364 1365 1366 1367 1368 1369 1370 1371 1372
	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
	 * with __GFP_COMP being passed to split_page() which cannot
	 * handle them.  The real problem is that this flag probably
	 * should be 0 on ARM as it is not supported on this
	 * platform; see CONFIG_HUGETLBFS.
	 */
	gfp &= ~(__GFP_COMP);

1373
	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1374 1375 1376 1377 1378 1379 1380
	if (!pages)
		return NULL;

	*handle = __iommu_create_mapping(dev, pages, size);
	if (*handle == DMA_ERROR_CODE)
		goto err_buffer;

1381 1382 1383
	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
		return pages;

1384 1385
	addr = __iommu_alloc_remap(pages, size, gfp, prot,
				   __builtin_return_address(0));
1386 1387 1388 1389 1390 1391 1392 1393
	if (!addr)
		goto err_mapping;

	return addr;

err_mapping:
	__iommu_remove_mapping(dev, *handle, size);
err_buffer:
1394
	__iommu_free_buffer(dev, pages, size, attrs);
1395 1396 1397 1398 1399 1400 1401
	return NULL;
}

static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
		    struct dma_attrs *attrs)
{
1402 1403
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
1404
	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1405 1406 1407

	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);

1408 1409
	if (!pages)
		return -ENXIO;
1410

1411 1412 1413 1414 1415 1416 1417 1418 1419
	do {
		int ret = vm_insert_page(vma, uaddr, *pages++);
		if (ret) {
			pr_err("Remapping memory failed: %d\n", ret);
			return ret;
		}
		uaddr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430

	return 0;
}

/*
 * free a page as defined by the above mapping.
 * Must not be called with IRQs disabled.
 */
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
			  dma_addr_t handle, struct dma_attrs *attrs)
{
1431
	struct page **pages;
1432 1433
	size = PAGE_ALIGN(size);

1434 1435
	if (__in_atomic_pool(cpu_addr, size)) {
		__iommu_free_atomic(dev, cpu_addr, handle, size);
1436
		return;
1437
	}
1438

1439 1440 1441
	pages = __iommu_get_pages(cpu_addr, attrs);
	if (!pages) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1442 1443 1444
		return;
	}

1445 1446 1447 1448
	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
		unmap_kernel_range((unsigned long)cpu_addr, size);
		vunmap(cpu_addr);
	}
1449 1450

	__iommu_remove_mapping(dev, handle, size);
1451
	__iommu_free_buffer(dev, pages, size, attrs);
1452 1453
}

1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
				 void *cpu_addr, dma_addr_t dma_addr,
				 size_t size, struct dma_attrs *attrs)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page **pages = __iommu_get_pages(cpu_addr, attrs);

	if (!pages)
		return -ENXIO;

	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
					 GFP_KERNEL);
1466 1467
}

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
static int __dma_direction_to_prot(enum dma_data_direction dir)
{
	int prot;

	switch (dir) {
	case DMA_BIDIRECTIONAL:
		prot = IOMMU_READ | IOMMU_WRITE;
		break;
	case DMA_TO_DEVICE:
		prot = IOMMU_READ;
		break;
	case DMA_FROM_DEVICE:
		prot = IOMMU_WRITE;
		break;
	default:
		prot = 0;
	}

	return prot;
}

1489 1490 1491 1492 1493
/*
 * Map a part of the scatter-gather list into contiguous io address space
 */
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
			  size_t size, dma_addr_t *handle,
R
Rob Herring 已提交
1494 1495
			  enum dma_data_direction dir, struct dma_attrs *attrs,
			  bool is_coherent)
1496 1497 1498 1499 1500 1501
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova, iova_base;
	int ret = 0;
	unsigned int count;
	struct scatterlist *s;
1502
	int prot;
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514

	size = PAGE_ALIGN(size);
	*handle = DMA_ERROR_CODE;

	iova_base = iova = __alloc_iova(mapping, size);
	if (iova == DMA_ERROR_CODE)
		return -ENOMEM;

	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
		phys_addr_t phys = page_to_phys(sg_page(s));
		unsigned int len = PAGE_ALIGN(s->offset + s->length);

R
Rob Herring 已提交
1515 1516
		if (!is_coherent &&
			!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1517 1518
			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);

1519 1520 1521
		prot = __dma_direction_to_prot(dir);

		ret = iommu_map(mapping->domain, iova, phys, len, prot);
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
		if (ret < 0)
			goto fail;
		count += len >> PAGE_SHIFT;
		iova += len;
	}
	*handle = iova_base;

	return 0;
fail:
	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
	__free_iova(mapping, iova_base, size);
	return ret;
}

R
Rob Herring 已提交
1536 1537 1538
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		     enum dma_data_direction dir, struct dma_attrs *attrs,
		     bool is_coherent)
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
{
	struct scatterlist *s = sg, *dma = sg, *start = sg;
	int i, count = 0;
	unsigned int offset = s->offset;
	unsigned int size = s->offset + s->length;
	unsigned int max = dma_get_max_seg_size(dev);

	for (i = 1; i < nents; i++) {
		s = sg_next(s);

		s->dma_address = DMA_ERROR_CODE;
		s->dma_length = 0;

		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
R
Rob Herring 已提交
1554
			    dir, attrs, is_coherent) < 0)
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
				goto bad_mapping;

			dma->dma_address += offset;
			dma->dma_length = size - offset;

			size = offset = s->offset;
			start = s;
			dma = sg_next(dma);
			count += 1;
		}
		size += s->length;
	}
R
Rob Herring 已提交
1567 1568
	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
		is_coherent) < 0)
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
		goto bad_mapping;

	dma->dma_address += offset;
	dma->dma_length = size - offset;

	return count+1;

bad_mapping:
	for_each_sg(sg, s, count, i)
		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
	return 0;
}

/**
R
Rob Herring 已提交
1583
 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1584 1585
 * @dev: valid struct device pointer
 * @sg: list of buffers
R
Rob Herring 已提交
1586 1587
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
1588
 *
R
Rob Herring 已提交
1589 1590 1591 1592
 * Map a set of i/o coherent buffers described by scatterlist in streaming
 * mode for DMA. The scatter gather list elements are merged together (if
 * possible) and tagged with the appropriate dma address and length. They are
 * obtained via sg_dma_{address,length}.
1593
 */
R
Rob Herring 已提交
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
	return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
}

/**
 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * The scatter gather list elements are merged together (if possible) and
 * tagged with the appropriate dma address and length. They are obtained via
 * sg_dma_{address,length}.
 */
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
}

static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
		bool is_coherent)
1621 1622 1623 1624 1625 1626 1627 1628
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		if (sg_dma_len(s))
			__iommu_remove_mapping(dev, sg_dma_address(s),
					       sg_dma_len(s));
R
Rob Herring 已提交
1629
		if (!is_coherent &&
1630
		    !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1631 1632 1633 1634 1635
			__dma_page_dev_to_cpu(sg_page(s), s->offset,
					      s->length, dir);
	}
}

R
Rob Herring 已提交
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
/**
 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
 * @dev: valid struct device pointer
 * @sg: list of buffers
 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 *
 * Unmap a set of streaming mode DMA translations.  Again, CPU access
 * rules concerning calls here are the same as for dma_unmap_single().
 */
void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
	__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
}

/**
 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
 * @dev: valid struct device pointer
 * @sg: list of buffers
 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 *
 * Unmap a set of streaming mode DMA translations.  Again, CPU access
 * rules concerning calls here are the same as for dma_unmap_single().
 */
void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			enum dma_data_direction dir, struct dma_attrs *attrs)
{
	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
}

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
/**
 * arm_iommu_sync_sg_for_cpu
 * @dev: valid struct device pointer
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
R
Rob Herring 已提交
1682
		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699

}

/**
 * arm_iommu_sync_sg_for_device
 * @dev: valid struct device pointer
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
R
Rob Herring 已提交
1700
		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1701 1702 1703 1704
}


/**
R
Rob Herring 已提交
1705
 * arm_coherent_iommu_map_page
1706 1707 1708 1709 1710 1711
 * @dev: valid struct device pointer
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
R
Rob Herring 已提交
1712
 * Coherent IOMMU aware version of arm_dma_map_page()
1713
 */
R
Rob Herring 已提交
1714
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1715 1716 1717 1718 1719
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t dma_addr;
1720
	int ret, prot, len = PAGE_ALIGN(size + offset);
1721 1722 1723 1724 1725

	dma_addr = __alloc_iova(mapping, len);
	if (dma_addr == DMA_ERROR_CODE)
		return dma_addr;

1726
	prot = __dma_direction_to_prot(dir);
1727 1728

	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1729 1730 1731 1732 1733 1734 1735 1736 1737
	if (ret < 0)
		goto fail;

	return dma_addr + offset;
fail:
	__free_iova(mapping, dma_addr, len);
	return DMA_ERROR_CODE;
}

R
Rob Herring 已提交
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
/**
 * arm_iommu_map_page
 * @dev: valid struct device pointer
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * IOMMU aware version of arm_dma_map_page()
 */
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
		__dma_page_cpu_to_dev(page, offset, size, dir);

	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
}

/**
 * arm_coherent_iommu_unmap_page
 * @dev: valid struct device pointer
 * @handle: DMA address of buffer
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
 *
 * Coherent IOMMU aware version of arm_dma_unmap_page()
 */
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	int offset = handle & ~PAGE_MASK;
	int len = PAGE_ALIGN(size + offset);

	if (!iova)
		return;

	iommu_unmap(mapping->domain, iova, len);
	__free_iova(mapping, iova, len);
}

1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
/**
 * arm_iommu_unmap_page
 * @dev: valid struct device pointer
 * @handle: DMA address of buffer
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
 *
 * IOMMU aware version of arm_dma_unmap_page()
 */
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	int offset = handle & ~PAGE_MASK;
	int len = PAGE_ALIGN(size + offset);

	if (!iova)
		return;

R
Rob Herring 已提交
1805
	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
		__dma_page_dev_to_cpu(page, offset, size, dir);

	iommu_unmap(mapping->domain, iova, len);
	__free_iova(mapping, iova, len);
}

static void arm_iommu_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
		return;

R
Rob Herring 已提交
1823
	__dma_page_dev_to_cpu(page, offset, size, dir);
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
}

static void arm_iommu_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
		return;

	__dma_page_cpu_to_dev(page, offset, size, dir);
}

struct dma_map_ops iommu_ops = {
	.alloc		= arm_iommu_alloc_attrs,
	.free		= arm_iommu_free_attrs,
	.mmap		= arm_iommu_mmap_attrs,
1844
	.get_sgtable	= arm_iommu_get_sgtable,
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854

	.map_page		= arm_iommu_map_page,
	.unmap_page		= arm_iommu_unmap_page,
	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
	.sync_single_for_device	= arm_iommu_sync_single_for_device,

	.map_sg			= arm_iommu_map_sg,
	.unmap_sg		= arm_iommu_unmap_sg,
	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
1855 1856

	.set_dma_mask		= arm_dma_set_mask,
1857 1858
};

R
Rob Herring 已提交
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
struct dma_map_ops iommu_coherent_ops = {
	.alloc		= arm_iommu_alloc_attrs,
	.free		= arm_iommu_free_attrs,
	.mmap		= arm_iommu_mmap_attrs,
	.get_sgtable	= arm_iommu_get_sgtable,

	.map_page	= arm_coherent_iommu_map_page,
	.unmap_page	= arm_coherent_iommu_unmap_page,

	.map_sg		= arm_coherent_iommu_map_sg,
	.unmap_sg	= arm_coherent_iommu_unmap_sg,
1870 1871

	.set_dma_mask	= arm_dma_set_mask,
R
Rob Herring 已提交
1872 1873
};

1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
/**
 * arm_iommu_create_mapping
 * @bus: pointer to the bus holding the client device (for IOMMU calls)
 * @base: start address of the valid IO address space
 * @size: size of the valid IO address space
 * @order: accuracy of the IO addresses allocations
 *
 * Creates a mapping structure which holds information about used/unused
 * IO address ranges, which is required to perform memory allocation and
 * mapping with IOMMU aware functions.
 *
 * The client device need to be attached to the mapping with
 * arm_iommu_attach_device function.
 */
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
			 int order)
{
	unsigned int count = size >> (PAGE_SHIFT + order);
	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
	struct dma_iommu_mapping *mapping;
	int err = -ENOMEM;

	if (!count)
		return ERR_PTR(-EINVAL);

	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
	if (!mapping)
		goto err;

	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
	if (!mapping->bitmap)
		goto err2;

	mapping->base = base;
	mapping->bits = BITS_PER_BYTE * bitmap_size;
	mapping->order = order;
	spin_lock_init(&mapping->lock);

	mapping->domain = iommu_domain_alloc(bus);
	if (!mapping->domain)
		goto err3;

	kref_init(&mapping->kref);
	return mapping;
err3:
	kfree(mapping->bitmap);
err2:
	kfree(mapping);
err:
	return ERR_PTR(err);
}
1926
EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942

static void release_iommu_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	iommu_domain_free(mapping->domain);
	kfree(mapping->bitmap);
	kfree(mapping);
}

void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{
	if (mapping)
		kref_put(&mapping->kref, release_iommu_mapping);
}
1943
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968

/**
 * arm_iommu_attach_device
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version. More than one client might be attached to
 * the same io address space mapping.
 */
int arm_iommu_attach_device(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int err;

	err = iommu_attach_device(mapping->domain, dev);
	if (err)
		return err;

	kref_get(&mapping->kref);
	dev->archdata.mapping = mapping;
	set_dma_ops(dev, &iommu_ops);

1969
	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1970 1971
	return 0;
}
1972
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1973

1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
/**
 * arm_iommu_detach_device
 * @dev: valid struct device pointer
 *
 * Detaches the provided device from a previously attached map.
 * This voids the dma operations (dma_map_ops pointer)
 */
void arm_iommu_detach_device(struct device *dev)
{
	struct dma_iommu_mapping *mapping;

	mapping = to_dma_iommu_mapping(dev);
	if (!mapping) {
		dev_warn(dev, "Not attached\n");
		return;
	}

	iommu_detach_device(mapping->domain, dev);
	kref_put(&mapping->kref, release_iommu_mapping);
1993
	dev->archdata.mapping = NULL;
1994 1995 1996 1997
	set_dma_ops(dev, NULL);

	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
1998
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1999

2000
#endif