dma-mapping.c 16.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  linux/arch/arm/mm/dma-mapping.c
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 *  Copyright (C) 2000-2004 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  DMA uncached mapping support.
 */
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>

21
#include <asm/memory.h>
22
#include <asm/highmem.h>
L
Linus Torvalds 已提交
23 24
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
25 26 27 28 29 30
#include <asm/sizes.h>

/* Sanity check size */
#if (CONSISTENT_DMA_SIZE % SZ_2M)
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
L
Linus Torvalds 已提交
31 32

#define CONSISTENT_END	(0xffe00000)
33 34
#define CONSISTENT_BASE	(CONSISTENT_END - CONSISTENT_DMA_SIZE)

L
Linus Torvalds 已提交
35
#define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
36 37 38
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)

L
Linus Torvalds 已提交
39 40

/*
41
 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
L
Linus Torvalds 已提交
42
 */
43
static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
static DEFINE_SPINLOCK(consistent_lock);

/*
 * VM region handling support.
 *
 * This should become something generic, handling VM region allocations for
 * vmalloc and similar (ioremap, module space, etc).
 *
 * I envisage vmalloc()'s supporting vm_struct becoming:
 *
 *  struct vm_struct {
 *    struct vm_region	region;
 *    unsigned long	flags;
 *    struct page	**pages;
 *    unsigned int	nr_pages;
 *    unsigned long	phys_addr;
 *  };
 *
 * get_vm_area() would then call vm_region_alloc with an appropriate
 * struct vm_region head (eg):
 *
 *  struct vm_region vmalloc_head = {
 *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
 *	.vm_start	= VMALLOC_START,
 *	.vm_end		= VMALLOC_END,
 *  };
 *
 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
 * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
 * would have to initialise this each time prior to calling vm_region_alloc().
 */
75
struct arm_vm_region {
L
Linus Torvalds 已提交
76 77 78 79
	struct list_head	vm_list;
	unsigned long		vm_start;
	unsigned long		vm_end;
	struct page		*vm_pages;
80
	int			vm_active;
L
Linus Torvalds 已提交
81 82
};

83
static struct arm_vm_region consistent_head = {
L
Linus Torvalds 已提交
84 85 86 87 88
	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
	.vm_start	= CONSISTENT_BASE,
	.vm_end		= CONSISTENT_END,
};

89 90
static struct arm_vm_region *
arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
L
Linus Torvalds 已提交
91 92 93
{
	unsigned long addr = head->vm_start, end = head->vm_end - size;
	unsigned long flags;
94
	struct arm_vm_region *c, *new;
L
Linus Torvalds 已提交
95

96
	new = kmalloc(sizeof(struct arm_vm_region), gfp);
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
	if (!new)
		goto out;

	spin_lock_irqsave(&consistent_lock, flags);

	list_for_each_entry(c, &head->vm_list, vm_list) {
		if ((addr + size) < addr)
			goto nospc;
		if ((addr + size) <= c->vm_start)
			goto found;
		addr = c->vm_end;
		if (addr > end)
			goto nospc;
	}

 found:
	/*
	 * Insert this entry _before_ the one we found.
	 */
	list_add_tail(&new->vm_list, &c->vm_list);
	new->vm_start = addr;
	new->vm_end = addr + size;
119
	new->vm_active = 1;
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128 129 130

	spin_unlock_irqrestore(&consistent_lock, flags);
	return new;

 nospc:
	spin_unlock_irqrestore(&consistent_lock, flags);
	kfree(new);
 out:
	return NULL;
}

131
static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
L
Linus Torvalds 已提交
132
{
133
	struct arm_vm_region *c;
L
Linus Torvalds 已提交
134 135
	
	list_for_each_entry(c, &head->vm_list, vm_list) {
136
		if (c->vm_active && c->vm_start == addr)
L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146 147 148
			goto out;
	}
	c = NULL;
 out:
	return c;
}

#ifdef CONFIG_HUGETLB_PAGE
#error ARM Coherent DMA allocator does not (yet) support huge TLB
#endif

static void *
A
Al Viro 已提交
149
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
L
Linus Torvalds 已提交
150 151 152
	    pgprot_t prot)
{
	struct page *page;
153
	struct arm_vm_region *c;
L
Linus Torvalds 已提交
154 155 156
	unsigned long order;
	u64 mask = ISA_DMA_THRESHOLD, limit;

157
	if (!consistent_pte[0]) {
L
Linus Torvalds 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
		printk(KERN_ERR "%s: not initialised\n", __func__);
		dump_stack();
		return NULL;
	}

	if (dev) {
		mask = dev->coherent_dma_mask;

		/*
		 * Sanity check the DMA mask - it must be non-zero, and
		 * must be able to be satisfied by a DMA allocation.
		 */
		if (mask == 0) {
			dev_warn(dev, "coherent DMA mask is unset\n");
			goto no_page;
		}

		if ((~mask) & ISA_DMA_THRESHOLD) {
			dev_warn(dev, "coherent DMA mask %#llx is smaller "
				 "than system GFP_DMA mask %#llx\n",
				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
			goto no_page;
		}
	}

	/*
	 * Sanity check the allocation size.
	 */
	size = PAGE_ALIGN(size);
	limit = (mask + 1) & ~mask;
	if ((limit && size >= limit) ||
	    size >= (CONSISTENT_END - CONSISTENT_BASE)) {
		printk(KERN_WARNING "coherent allocation too big "
		       "(requested %#x mask %#llx)\n", size, mask);
		goto no_page;
	}

	order = get_order(size);

	if (mask != 0xffffffff)
		gfp |= GFP_DMA;

	page = alloc_pages(gfp, order);
	if (!page)
		goto no_page;

	/*
	 * Invalidate any data that might be lurking in the
	 * kernel direct-mapped region for device DMA.
	 */
	{
209 210 211 212
		void *ptr = page_address(page);
		memset(ptr, 0, size);
		dmac_flush_range(ptr, ptr + size);
		outer_flush_range(__pa(ptr), __pa(ptr) + size);
L
Linus Torvalds 已提交
213 214 215 216 217
	}

	/*
	 * Allocate a virtual address in the consistent mapping region.
	 */
218
	c = arm_vm_region_alloc(&consistent_head, size,
L
Linus Torvalds 已提交
219 220
			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
	if (c) {
221
		pte_t *pte;
L
Linus Torvalds 已提交
222
		struct page *end = page + (1 << order);
223 224
		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
L
Linus Torvalds 已提交
225

226
		pte = consistent_pte[idx] + off;
L
Linus Torvalds 已提交
227 228
		c->vm_pages = page;

N
Nick Piggin 已提交
229 230
		split_page(page, order);

L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242
		/*
		 * Set the "dma handle"
		 */
		*handle = page_to_dma(dev, page);

		do {
			BUG_ON(!pte_none(*pte));

			/*
			 * x86 does not mark the pages reserved...
			 */
			SetPageReserved(page);
R
Russell King 已提交
243
			set_pte_ext(pte, mk_pte(page, prot), 0);
L
Linus Torvalds 已提交
244 245
			page++;
			pte++;
246 247 248 249 250
			off++;
			if (off >= PTRS_PER_PTE) {
				off = 0;
				pte = consistent_pte[++idx];
			}
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
		} while (size -= PAGE_SIZE);

		/*
		 * Free the otherwise unused pages.
		 */
		while (page < end) {
			__free_page(page);
			page++;
		}

		return (void *)c->vm_start;
	}

	if (page)
		__free_pages(page, order);
 no_page:
	*handle = ~0;
	return NULL;
}

/*
 * Allocate DMA-coherent memory space and return both the kernel remapped
 * virtual and bus address for that space.
 */
void *
A
Al Viro 已提交
276
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
L
Linus Torvalds 已提交
277
{
278 279 280 281 282
	void *memory;

	if (dma_alloc_from_coherent(dev, size, handle, &memory))
		return memory;

283 284 285 286 287 288 289 290 291 292 293
	if (arch_is_coherent()) {
		void *virt;

		virt = kmalloc(size, gfp);
		if (!virt)
			return NULL;
		*handle =  virt_to_dma(dev, virt);

		return virt;
	}

L
Linus Torvalds 已提交
294 295 296 297 298 299 300 301 302 303
	return __dma_alloc(dev, size, handle, gfp,
			   pgprot_noncached(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_coherent);

/*
 * Allocate a writecombining region, in much the same way as
 * dma_alloc_coherent above.
 */
void *
A
Al Viro 已提交
304
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
L
Linus Torvalds 已提交
305 306 307 308 309 310 311 312 313 314
{
	return __dma_alloc(dev, size, handle, gfp,
			   pgprot_writecombine(pgprot_kernel));
}
EXPORT_SYMBOL(dma_alloc_writecombine);

static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	unsigned long flags, user_size, kern_size;
315
	struct arm_vm_region *c;
L
Linus Torvalds 已提交
316 317 318 319 320
	int ret = -ENXIO;

	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;

	spin_lock_irqsave(&consistent_lock, flags);
321
	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	spin_unlock_irqrestore(&consistent_lock, flags);

	if (c) {
		unsigned long off = vma->vm_pgoff;

		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;

		if (off < kern_size &&
		    user_size <= (kern_size - off)) {
			ret = remap_pfn_range(vma, vma->vm_start,
					      page_to_pfn(c->vm_pages) + off,
					      user_size << PAGE_SHIFT,
					      vma->vm_page_prot);
		}
	}

	return ret;
}

int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
}
EXPORT_SYMBOL(dma_mmap_coherent);

int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
}
EXPORT_SYMBOL(dma_mmap_writecombine);

/*
 * free a page as defined by the above mapping.
359
 * Must not be called with IRQs disabled.
L
Linus Torvalds 已提交
360 361 362
 */
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
363
	struct arm_vm_region *c;
L
Linus Torvalds 已提交
364 365
	unsigned long flags, addr;
	pte_t *ptep;
366 367
	int idx;
	u32 off;
L
Linus Torvalds 已提交
368

369 370
	WARN_ON(irqs_disabled());

371 372 373
	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
		return;

374 375 376 377 378
	if (arch_is_coherent()) {
		kfree(cpu_addr);
		return;
	}

L
Linus Torvalds 已提交
379 380 381
	size = PAGE_ALIGN(size);

	spin_lock_irqsave(&consistent_lock, flags);
382
	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
L
Linus Torvalds 已提交
383 384 385
	if (!c)
		goto no_area;

386 387 388
	c->vm_active = 0;
	spin_unlock_irqrestore(&consistent_lock, flags);

L
Linus Torvalds 已提交
389 390 391 392 393 394 395
	if ((c->vm_end - c->vm_start) != size) {
		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
		       __func__, c->vm_end - c->vm_start, size);
		dump_stack();
		size = c->vm_end - c->vm_start;
	}

396 397 398
	idx = CONSISTENT_PTE_INDEX(c->vm_start);
	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
	ptep = consistent_pte[idx] + off;
L
Linus Torvalds 已提交
399 400 401 402 403 404 405
	addr = c->vm_start;
	do {
		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
		unsigned long pfn;

		ptep++;
		addr += PAGE_SIZE;
406 407 408 409 410
		off++;
		if (off >= PTRS_PER_PTE) {
			off = 0;
			ptep = consistent_pte[++idx];
		}
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

		if (!pte_none(pte) && pte_present(pte)) {
			pfn = pte_pfn(pte);

			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

				/*
				 * x86 does not mark the pages reserved...
				 */
				ClearPageReserved(page);

				__free_page(page);
				continue;
			}
		}

		printk(KERN_CRIT "%s: bad page in kernel page table\n",
		       __func__);
	} while (size -= PAGE_SIZE);

	flush_tlb_kernel_range(c->vm_start, c->vm_end);

434
	spin_lock_irqsave(&consistent_lock, flags);
L
Linus Torvalds 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
	list_del(&c->vm_list);
	spin_unlock_irqrestore(&consistent_lock, flags);

	kfree(c);
	return;

 no_area:
	spin_unlock_irqrestore(&consistent_lock, flags);
	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
	       __func__, cpu_addr);
	dump_stack();
}
EXPORT_SYMBOL(dma_free_coherent);

/*
 * Initialise the consistent memory allocation.
 */
static int __init consistent_init(void)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
457 458
	int ret = 0, i = 0;
	u32 base = CONSISTENT_BASE;
L
Linus Torvalds 已提交
459 460

	do {
461 462
		pgd = pgd_offset(&init_mm, base);
		pmd = pmd_alloc(&init_mm, pgd, base);
L
Linus Torvalds 已提交
463 464 465 466 467 468 469
		if (!pmd) {
			printk(KERN_ERR "%s: no pmd tables\n", __func__);
			ret = -ENOMEM;
			break;
		}
		WARN_ON(!pmd_none(*pmd));

470
		pte = pte_alloc_kernel(pmd, base);
L
Linus Torvalds 已提交
471 472 473 474 475 476
		if (!pte) {
			printk(KERN_ERR "%s: no pte tables\n", __func__);
			ret = -ENOMEM;
			break;
		}

477 478 479
		consistent_pte[i++] = pte;
		base += (1 << PGDIR_SHIFT);
	} while (base < CONSISTENT_END);
L
Linus Torvalds 已提交
480 481 482 483 484 485 486 487

	return ret;
}

core_initcall(consistent_init);

/*
 * Make an area consistent for devices.
488 489 490
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
L
Linus Torvalds 已提交
491
 */
492
void dma_cache_maint(const void *start, size_t size, int direction)
L
Linus Torvalds 已提交
493
{
494 495
	void (*inner_op)(const void *, const void *);
	void (*outer_op)(unsigned long, unsigned long);
L
Linus Torvalds 已提交
496

497
	BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
498

L
Linus Torvalds 已提交
499 500
	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
501 502
		inner_op = dmac_inv_range;
		outer_op = outer_inv_range;
L
Linus Torvalds 已提交
503 504
		break;
	case DMA_TO_DEVICE:		/* writeback only */
505 506
		inner_op = dmac_clean_range;
		outer_op = outer_clean_range;
L
Linus Torvalds 已提交
507 508
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
509 510
		inner_op = dmac_flush_range;
		outer_op = outer_flush_range;
L
Linus Torvalds 已提交
511 512 513 514
		break;
	default:
		BUG();
	}
515 516 517

	inner_op(start, start + size);
	outer_op(__pa(start), __pa(start) + size);
L
Linus Torvalds 已提交
518
}
519
EXPORT_SYMBOL(dma_cache_maint);
520

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
				       size_t size, int direction)
{
	void *vaddr;
	unsigned long paddr;
	void (*inner_op)(const void *, const void *);
	void (*outer_op)(unsigned long, unsigned long);

	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
		inner_op = dmac_inv_range;
		outer_op = outer_inv_range;
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		inner_op = dmac_clean_range;
		outer_op = outer_clean_range;
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
		inner_op = dmac_flush_range;
		outer_op = outer_flush_range;
		break;
	default:
		BUG();
	}

	if (!PageHighMem(page)) {
		vaddr = page_address(page) + offset;
		inner_op(vaddr, vaddr + size);
	} else {
		vaddr = kmap_high_get(page);
		if (vaddr) {
			vaddr += offset;
			inner_op(vaddr, vaddr + size);
			kunmap_high(page);
		}
	}

	paddr = page_to_phys(page) + offset;
	outer_op(paddr, paddr + size);
}

void dma_cache_maint_page(struct page *page, unsigned long offset,
			  size_t size, int dir)
{
	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */
	size_t left = size;
	do {
		size_t len = left;
		if (PageHighMem(page) && len + offset > PAGE_SIZE) {
			if (offset >= PAGE_SIZE) {
				page += offset / PAGE_SIZE;
				offset %= PAGE_SIZE;
			}
			len = PAGE_SIZE - offset;
		}
		dma_cache_maint_contiguous(page, offset, len, dir);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}
EXPORT_SYMBOL(dma_cache_maint_page);

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
	struct scatterlist *s;
609
	int i, j;
610 611

	for_each_sg(sg, s, nents, i) {
612 613 614 615
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
						s->length, dir);
		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
616 617
	}
	return nents;
618 619 620 621 622

 bad_mapping:
	for_each_sg(sg, s, i, j)
		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
	return 0;
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
}
EXPORT_SYMBOL(dma_map_sg);

/**
 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to unmap (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 *
 * Unmap a set of streaming mode DMA translations.  Again, CPU access
 * rules concerning calls here are the same as for dma_unmap_single().
 */
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
639 640 641 642 643
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
}
EXPORT_SYMBOL(dma_unmap_sg);

/**
 * dma_sync_sg_for_cpu
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
661 662
		dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
					sg_dma_len(s), dir);
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);

/**
 * dma_sync_sg_for_device
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map (returned from dma_map_sg)
 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
 */
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
681 682 683 684
		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
					sg_dma_len(s), dir))
			continue;

685
		if (!arch_is_coherent())
686 687
			dma_cache_maint_page(sg_page(s), s->offset,
					     s->length, dir);
688 689 690
	}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);