swiotlb.c 25.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
int swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_unmap_single and
 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
118 119 120 121 122 123 124 125 126
{
	return alloc_bootmem_low_pages(size);
}

void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
{
	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}

127
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128 129 130 131 132 133 134 135 136
{
	return paddr;
}

phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
{
	return baddr;
}

137 138
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
139
{
140
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141 142 143 144 145 146 147
}

static void *swiotlb_bus_to_virt(dma_addr_t address)
{
	return phys_to_virt(swiotlb_bus_to_phys(address));
}

148
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
149 150 151 152
{
	return 0;
}

153 154 155 156 157 158 159 160 161
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
162 163 164
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
165 166
}

L
Linus Torvalds 已提交
167 168
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
169
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
170
 */
J
Jan Beulich 已提交
171 172
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
173
{
J
Jan Beulich 已提交
174
	unsigned long i, bytes;
L
Linus Torvalds 已提交
175 176

	if (!io_tlb_nslabs) {
177
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
178 179 180
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
181 182
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
183 184 185
	/*
	 * Get IO TLB memory from the low pages
	 */
186
	io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
L
Linus Torvalds 已提交
187 188
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
189
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
190 191 192 193 194 195 196

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
197
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
198 199
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
200
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
201 202 203 204 205

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
206 207 208
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

209
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
210 211
}

J
Jan Beulich 已提交
212 213
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
214
{
215
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
216 217
}

218 219 220 221 222 223
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
224
swiotlb_late_init_with_default_size(size_t default_size)
225
{
J
Jan Beulich 已提交
226
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
227 228 229 230 231 232 233 234 235 236
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
237
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
238
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
239
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
240 241

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
242
		io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
243 244 245 246 247 248 249 250
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
251
	if (order != get_order(bytes)) {
252 253 254
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
255
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
256
	}
J
Jan Beulich 已提交
257 258
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

274 275 276 277
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
278 279 280
	if (!io_tlb_orig_addr)
		goto cleanup3;

281
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
282 283 284 285 286 287 288 289 290

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

291
	swiotlb_print_info(bytes);
292 293 294 295

	return 0;

cleanup4:
296 297
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
298 299
	io_tlb_orig_addr = NULL;
cleanup3:
300 301
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
302 303
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
304
	io_tlb_end = NULL;
305 306 307 308 309 310 311
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

A
Andrew Morton 已提交
312
static int
313
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
314
{
315
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
L
Linus Torvalds 已提交
316 317
}

318
static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
319
{
320
	return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
321 322
}

323 324 325 326 327
static int is_swiotlb_buffer(char *addr)
{
	return addr >= io_tlb_start && addr < io_tlb_end;
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
			sz = min(PAGE_SIZE - offset, size);

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
351
			else
352 353
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
354
			local_irq_restore(flags);
355 356 357 358 359

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
360 361 362
		}
	} else {
		if (dir == DMA_TO_DEVICE)
363
			memcpy(dma_addr, phys_to_virt(phys), size);
364
		else
365
			memcpy(phys_to_virt(phys), dma_addr, size);
366
	}
367 368
}

L
Linus Torvalds 已提交
369 370 371 372
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
373
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
374 375 376 377 378
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
379 380 381 382 383 384
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
385
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
386 387

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
388 389 390 391

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
392 393 394
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403 404 405

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

406
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
407 408 409 410 411 412

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
413 414 415 416 417 418
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
419 420
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
421 422 423
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
441

A
Andrew Morton 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
460 461 462 463 464 465 466
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
467 468
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
469
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
470
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
484
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
485 486 487 488

	/*
	 * First, sync the memory before unmapping the entry
	 */
489
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
490
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
519 520
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
521
{
522 523 524 525
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
526

527 528 529
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
530
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
531 532
		else
			BUG_ON(dir != DMA_TO_DEVICE);
533 534 535
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
536
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
537 538
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
539 540
		break;
	default:
L
Linus Torvalds 已提交
541
		BUG();
542
	}
L
Linus Torvalds 已提交
543 544 545 546
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
547
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
548
{
J
Jan Beulich 已提交
549
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
550 551
	void *ret;
	int order = get_order(size);
552 553 554 555
	u64 dma_mask = DMA_32BIT_MASK;

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
556

557
	ret = (void *)__get_free_pages(flags, order);
558 559 560
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574
		/*
		 * The allocated memory isn't reachable by the device.
		 * Fall back on swiotlb_map_single().
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
		 * to GFP_DMA memory; fall back on
		 * swiotlb_map_single(), which will grab memory from
		 * the lowest available address range.
		 */
575
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
576
		if (!ret)
L
Linus Torvalds 已提交
577 578 579 580
			return NULL;
	}

	memset(ret, 0, size);
581
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
582 583

	/* Confirm address can be DMA'd by device */
584
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
585
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
586
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
587
		       (unsigned long long)dev_addr);
588 589 590 591

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
		unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
		return NULL;
L
Linus Torvalds 已提交
592 593 594 595
	}
	*dma_handle = dev_addr;
	return ret;
}
596
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
597 598 599 600 601

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
602
	WARN_ON(irqs_disabled());
603
	if (!is_swiotlb_buffer(vaddr))
L
Linus Torvalds 已提交
604 605 606
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
607
		unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
608
}
609
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
610 611 612 613 614 615 616

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
617
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
618 619 620
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
621
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
622
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
623 624

	if (size > io_tlb_overflow && do_panic) {
625 626 627 628
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
629 630 631 632 633
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
634
 * physical address to use is returned.
L
Linus Torvalds 已提交
635 636 637 638
 *
 * Once the device is given the dma address, the device owns this memory until
 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 */
639 640 641 642
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
643
{
644 645 646
	phys_addr_t phys = page_to_phys(page) + offset;
	void *ptr = page_address(page) + offset;
	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
L
Linus Torvalds 已提交
647 648
	void *map;

649
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
650 651 652 653 654
	/*
	 * If the pointer passed in happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
655
	if (!address_needs_mapping(dev, dev_addr, size) &&
656
	    !range_needs_mapping(virt_to_phys(ptr), size))
L
Linus Torvalds 已提交
657 658 659 660 661
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
662
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
663
	if (!map) {
664
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
665 666 667
		map = io_tlb_overflow_buffer;
	}

668
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
669 670 671 672

	/*
	 * Ensure that the address returned is DMA'ble
	 */
673
	if (address_needs_mapping(dev, dev_addr, size))
L
Linus Torvalds 已提交
674 675 676 677
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
678
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
679 680 681 682 683 684 685 686 687

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous swiotlb_map_single call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
688 689 690
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
L
Linus Torvalds 已提交
691
{
692
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
693

694
	BUG_ON(dir == DMA_NONE);
695
	if (is_swiotlb_buffer(dma_addr))
L
Linus Torvalds 已提交
696 697
		unmap_single(hwdev, dma_addr, size, dir);
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
698
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
699
}
700
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
701

L
Linus Torvalds 已提交
702 703 704 705 706
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
707 708
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
709 710 711
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
712
static void
713
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
714
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
715
{
716
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
717

718
	BUG_ON(dir == DMA_NONE);
719
	if (is_swiotlb_buffer(dma_addr))
720
		sync_single(hwdev, dma_addr, size, dir, target);
L
Linus Torvalds 已提交
721
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
722
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
723 724
}

725 726
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
727
			    size_t size, enum dma_data_direction dir)
728
{
729
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
730
}
731
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
732

L
Linus Torvalds 已提交
733 734
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
735
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
736
{
737
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
738
}
739
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
740

741 742 743
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
744
static void
745
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
746 747
			  unsigned long offset, size_t size,
			  int dir, int target)
748
{
749
	char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
750

751
	BUG_ON(dir == DMA_NONE);
752
	if (is_swiotlb_buffer(dma_addr))
753
		sync_single(hwdev, dma_addr, size, dir, target);
754
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
755
		dma_mark_clean(dma_addr, size);
756 757 758 759
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
760 761
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
762
{
763 764
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
765
}
766
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
767 768 769

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
770 771
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
772
{
773 774
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
775
}
776
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
777

L
Linus Torvalds 已提交
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above swiotlb_map_single
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for swiotlb_map_single are the
 * same here.
 */
int
795
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
796
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
797
{
J
Jens Axboe 已提交
798
	struct scatterlist *sg;
L
Linus Torvalds 已提交
799 800
	int i;

801
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
802

J
Jens Axboe 已提交
803
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
804 805
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
806

I
Ian Campbell 已提交
807
		if (range_needs_mapping(paddr, sg->length) ||
808
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
809 810
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
811
			if (!map) {
L
Linus Torvalds 已提交
812 813 814
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
815 816
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
817
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
818 819
				return 0;
			}
820
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
821 822 823 824 825 826
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
827 828 829 830 831 832 833 834
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
835
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
836 837 838 839 840 841

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_single() above.
 */
void
842
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
843
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
844
{
J
Jens Axboe 已提交
845
	struct scatterlist *sg;
L
Linus Torvalds 已提交
846 847
	int i;

848
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
849

J
Jens Axboe 已提交
850
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
851
		if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
852
			unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
853
				     sg->dma_length, dir);
L
Linus Torvalds 已提交
854
		else if (dir == DMA_FROM_DEVICE)
I
Ian Campbell 已提交
855
			dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
J
Jens Axboe 已提交
856
	}
L
Linus Torvalds 已提交
857
}
858 859 860 861 862 863 864 865
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
866
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
867 868 869 870 871 872 873 874

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
875
static void
J
Jens Axboe 已提交
876
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
877
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
878
{
J
Jens Axboe 已提交
879
	struct scatterlist *sg;
L
Linus Torvalds 已提交
880 881
	int i;

882
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
883

J
Jens Axboe 已提交
884
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
885
		if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
886
			sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
887
				    sg->dma_length, dir, target);
J
Jan Beulich 已提交
888
		else if (dir == DMA_FROM_DEVICE)
I
Ian Campbell 已提交
889
			dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
J
Jens Axboe 已提交
890
	}
L
Linus Torvalds 已提交
891 892
}

893 894
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
895
			int nelems, enum dma_data_direction dir)
896
{
897
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
898
}
899
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
900

L
Linus Torvalds 已提交
901 902
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
903
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
904
{
905
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
906
}
907
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
908 909

int
910
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
911
{
912
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
913
}
914
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
915 916

/*
917
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
918
 * properly.  For example, if your device can only drive the low 24-bits
919
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
920 921 922
 * this function.
 */
int
J
Jan Beulich 已提交
923
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
924
{
925
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
926 927
}
EXPORT_SYMBOL(swiotlb_dma_supported);