swiotlb.c 25.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

100 101
static int late_alloc;

L
Linus Torvalds 已提交
102 103 104 105
static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
106
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
107 108 109 110 111
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
112
	if (!strcmp(str, "force"))
L
Linus Torvalds 已提交
113
		swiotlb_force = 1;
114

L
Linus Torvalds 已提交
115 116 117 118 119
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

120
/* Note that this doesn't work with highmem page */
121 122
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
123
{
124
	return phys_to_dma(hwdev, virt_to_phys(address));
125 126
}

127
void swiotlb_print_info(void)
128
{
129
	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
130 131 132 133 134 135 136
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
137 138 139
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
140 141
}

L
Linus Torvalds 已提交
142 143
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
144
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
145
 */
J
Jan Beulich 已提交
146
void __init
147
swiotlb_init_with_default_size(size_t default_size, int verbose)
L
Linus Torvalds 已提交
148
{
J
Jan Beulich 已提交
149
	unsigned long i, bytes;
L
Linus Torvalds 已提交
150 151

	if (!io_tlb_nslabs) {
152
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
153 154 155
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
156 157
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
158 159 160
	/*
	 * Get IO TLB memory from the low pages
	 */
161
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
162 163
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
164
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
165 166 167 168 169 170 171

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
172
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
173 174
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
175
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
176 177 178 179 180

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
181 182
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");
183 184
	if (verbose)
		swiotlb_print_info();
L
Linus Torvalds 已提交
185 186
}

J
Jan Beulich 已提交
187
void __init
188
swiotlb_init(int verbose)
L
Linus Torvalds 已提交
189
{
190
	swiotlb_init_with_default_size(64 * (1<<20), verbose);	/* default to 64MB */
L
Linus Torvalds 已提交
191 192
}

193 194 195 196 197 198
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
199
swiotlb_late_init_with_default_size(size_t default_size)
200
{
J
Jan Beulich 已提交
201
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
202 203 204 205 206 207 208 209 210 211
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
212
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
213
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
214
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
215 216

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
217 218
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
219 220 221 222 223 224 225 226
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
227
	if (order != get_order(bytes)) {
228 229 230
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
231
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
232
	}
J
Jan Beulich 已提交
233 234
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

250 251 252 253
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
254 255 256
	if (!io_tlb_orig_addr)
		goto cleanup3;

257
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
258 259 260 261 262 263 264 265 266

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

267
	swiotlb_print_info();
268

269 270
	late_alloc = 1;

271 272 273
	return 0;

cleanup4:
274 275
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
276 277
	io_tlb_orig_addr = NULL;
cleanup3:
278 279
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
280 281
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
282
	io_tlb_end = NULL;
283 284 285 286 287 288 289
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
void __init swiotlb_free(void)
{
	if (!io_tlb_overflow_buffer)
		return;

	if (late_alloc) {
		free_pages((unsigned long)io_tlb_overflow_buffer,
			   get_order(io_tlb_overflow));
		free_pages((unsigned long)io_tlb_orig_addr,
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
								 sizeof(int)));
		free_pages((unsigned long)io_tlb_start,
			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
	} else {
		free_bootmem_late(__pa(io_tlb_overflow_buffer),
				  io_tlb_overflow);
		free_bootmem_late(__pa(io_tlb_orig_addr),
				  io_tlb_nslabs * sizeof(phys_addr_t));
		free_bootmem_late(__pa(io_tlb_list),
				  io_tlb_nslabs * sizeof(int));
		free_bootmem_late(__pa(io_tlb_start),
				  io_tlb_nslabs << IO_TLB_SHIFT);
	}
}

316
static int is_swiotlb_buffer(phys_addr_t paddr)
317
{
318 319
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
320 321
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
338
			sz = min_t(size_t, PAGE_SIZE - offset, size);
339 340 341 342 343 344

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
345
			else
346 347
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
348
			local_irq_restore(flags);
349 350 351 352 353

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
354 355 356
		}
	} else {
		if (dir == DMA_TO_DEVICE)
357
			memcpy(dma_addr, phys_to_virt(phys), size);
358
		else
359
			memcpy(phys_to_virt(phys), dma_addr, size);
360
	}
361 362
}

L
Linus Torvalds 已提交
363 364 365 366
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
367
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
368 369 370 371 372
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
373 374 375 376 377 378
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
379
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
380 381

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
382 383 384 385

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
386 387 388
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
389 390 391 392 393 394 395 396 397 398 399

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

400
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
401 402 403 404 405 406

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
407 408 409 410 411 412
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
413 414
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
415 416 417
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
435

A
Andrew Morton 已提交
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
454 455 456 457 458 459 460
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
461 462
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
463
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
464
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
465 466 467 468 469 470 471 472

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
473
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
474 475 476 477
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
478
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
479 480 481 482

	/*
	 * First, sync the memory before unmapping the entry
	 */
483
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
484
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
513 514
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
515
{
516 517 518 519
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
520

521 522 523
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
524
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
525 526
		else
			BUG_ON(dir != DMA_TO_DEVICE);
527 528 529
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
530
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
531 532
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
533 534
		break;
	default:
L
Linus Torvalds 已提交
535
		BUG();
536
	}
L
Linus Torvalds 已提交
537 538 539 540
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
541
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
542
{
J
Jan Beulich 已提交
543
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
544 545
	void *ret;
	int order = get_order(size);
546
	u64 dma_mask = DMA_BIT_MASK(32);
547 548 549

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
550

551
	ret = (void *)__get_free_pages(flags, order);
F
FUJITA Tomonori 已提交
552
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
L
Linus Torvalds 已提交
553 554 555 556 557 558 559 560 561
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
562 563
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
564
		 */
565
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
566
		if (!ret)
L
Linus Torvalds 已提交
567 568 569 570
			return NULL;
	}

	memset(ret, 0, size);
571
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
572 573

	/* Confirm address can be DMA'd by device */
F
FUJITA Tomonori 已提交
574
	if (dev_addr + size > dma_mask) {
J
Jan Beulich 已提交
575
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
576
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
577
		       (unsigned long long)dev_addr);
578 579

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
580
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
581
		return NULL;
L
Linus Torvalds 已提交
582 583 584 585
	}
	*dma_handle = dev_addr;
	return ret;
}
586
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
587 588 589

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
590
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
591
{
592
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
593

594
	WARN_ON(irqs_disabled());
595 596
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
597 598
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
599
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
600
}
601
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
602 603 604 605 606 607 608

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
609
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
610 611 612
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
613
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
614
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
615

616 617 618 619 620 621 622 623 624
	if (size <= io_tlb_overflow || !do_panic)
		return;

	if (dir == DMA_BIDIRECTIONAL)
		panic("DMA: Random memory could be DMA accessed\n");
	if (dir == DMA_FROM_DEVICE)
		panic("DMA: Random memory could be DMA written\n");
	if (dir == DMA_TO_DEVICE)
		panic("DMA: Random memory could be DMA read\n");
L
Linus Torvalds 已提交
625 626 627 628
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
629
 * physical address to use is returned.
L
Linus Torvalds 已提交
630 631
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
632
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
633
 */
634 635 636 637
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
638
{
639
	phys_addr_t phys = page_to_phys(page) + offset;
640
	dma_addr_t dev_addr = phys_to_dma(dev, phys);
L
Linus Torvalds 已提交
641 642
	void *map;

643
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
644
	/*
B
Becky Bruce 已提交
645
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
646 647 648
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
F
FUJITA Tomonori 已提交
649
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
650 651 652 653 654
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
655
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
656
	if (!map) {
657
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
658 659 660
		map = io_tlb_overflow_buffer;
	}

661
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
662 663 664 665

	/*
	 * Ensure that the address returned is DMA'ble
	 */
F
FUJITA Tomonori 已提交
666
	if (!dma_capable(dev, dev_addr, size))
L
Linus Torvalds 已提交
667 668 669 670
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
671
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
672 673 674

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
675
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
676 677 678 679 680
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
681 682
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
683
{
684
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
685

686
	BUG_ON(dir == DMA_NONE);
687

688 689
	if (is_swiotlb_buffer(paddr)) {
		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
690 691 692 693 694 695
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

696 697 698 699 700 701 702
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
703 704 705 706 707 708 709
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
710
}
711
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
712

L
Linus Torvalds 已提交
713 714 715 716
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
717
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
718 719
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
720 721 722
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
723
static void
724
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
725
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
726
{
727
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
728

729
	BUG_ON(dir == DMA_NONE);
730

731 732
	if (is_swiotlb_buffer(paddr)) {
		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
733 734 735 736 737 738
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

739
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
740 741
}

742 743
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
744
			    size_t size, enum dma_data_direction dir)
745
{
746
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
747
}
748
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
749

L
Linus Torvalds 已提交
750 751
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
752
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
753
{
754
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
755
}
756
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
757

758 759 760
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
761
static void
762
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
763 764
			  unsigned long offset, size_t size,
			  int dir, int target)
765
{
766
	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
767 768 769 770
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
771 772
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
773
{
774 775
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
776
}
777
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778 779 780

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 782
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
783
{
784 785
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
786
}
787
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788

L
Linus Torvalds 已提交
789 790
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
791
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
792 793 794 795 796 797 798 799 800 801
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
802
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
803 804 805
 * same here.
 */
int
806
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
807
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
808
{
J
Jens Axboe 已提交
809
	struct scatterlist *sg;
L
Linus Torvalds 已提交
810 811
	int i;

812
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
813

J
Jens Axboe 已提交
814
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
815
		phys_addr_t paddr = sg_phys(sg);
816
		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
817

818
		if (swiotlb_force ||
F
FUJITA Tomonori 已提交
819
		    !dma_capable(hwdev, dev_addr, sg->length)) {
820 821
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
822
			if (!map) {
L
Linus Torvalds 已提交
823 824 825
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
826 827
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
828
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
829 830
				return 0;
			}
831
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
832 833 834 835 836 837
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
838 839 840 841 842 843 844 845
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
846
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
847 848 849

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
850
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
851 852
 */
void
853
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
854
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
855
{
J
Jens Axboe 已提交
856
	struct scatterlist *sg;
L
Linus Torvalds 已提交
857 858
	int i;

859
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
860

861 862 863
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
864
}
865 866 867 868 869 870 871 872
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
873
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
874 875 876 877 878 879 880 881

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
882
static void
J
Jens Axboe 已提交
883
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
884
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
885
{
J
Jens Axboe 已提交
886
	struct scatterlist *sg;
L
Linus Torvalds 已提交
887 888
	int i;

889 890
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
891
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
892 893
}

894 895
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
896
			int nelems, enum dma_data_direction dir)
897
{
898
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
899
}
900
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
901

L
Linus Torvalds 已提交
902 903
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
904
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
905
{
906
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
907
}
908
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
909 910

int
911
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
912
{
913
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
914
}
915
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
916 917

/*
918
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
919
 * properly.  For example, if your device can only drive the low 24-bits
920
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
921 922 923
 * this function.
 */
int
J
Jan Beulich 已提交
924
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
925
{
926
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
927 928
}
EXPORT_SYMBOL(swiotlb_dma_supported);