swiotlb.c 25.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
118 119 120 121 122 123 124 125 126
{
	return alloc_bootmem_low_pages(size);
}

void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
{
	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}

127
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128 129 130 131 132 133 134 135 136
{
	return paddr;
}

phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
{
	return baddr;
}

137 138
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
139
{
140
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141 142 143 144 145 146 147
}

static void *swiotlb_bus_to_virt(dma_addr_t address)
{
	return phys_to_virt(swiotlb_bus_to_phys(address));
}

148 149 150 151 152 153
int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
					       dma_addr_t addr, size_t size)
{
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}

154
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155 156 157 158
{
	return 0;
}

159 160 161 162 163 164 165 166 167
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
168 169 170
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
171 172
}

L
Linus Torvalds 已提交
173 174
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
175
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
176
 */
J
Jan Beulich 已提交
177 178
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
179
{
J
Jan Beulich 已提交
180
	unsigned long i, bytes;
L
Linus Torvalds 已提交
181 182

	if (!io_tlb_nslabs) {
183
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
184 185 186
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
187 188
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
189 190 191
	/*
	 * Get IO TLB memory from the low pages
	 */
192
	io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
L
Linus Torvalds 已提交
193 194
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
195
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
196 197 198 199 200 201 202

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
203
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
204 205
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
206
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
207 208 209 210 211

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
212 213 214
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

215
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
216 217
}

J
Jan Beulich 已提交
218 219
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
220
{
221
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
222 223
}

224 225 226 227 228 229
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
230
swiotlb_late_init_with_default_size(size_t default_size)
231
{
J
Jan Beulich 已提交
232
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
233 234 235 236 237 238 239 240 241 242
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
243
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
244
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
245
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 247

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248
		io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
249 250 251 252 253 254 255 256
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
257
	if (order != get_order(bytes)) {
258 259 260
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
261
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
262
	}
J
Jan Beulich 已提交
263 264
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

280 281 282 283
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
284 285 286
	if (!io_tlb_orig_addr)
		goto cleanup3;

287
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
288 289 290 291 292 293 294 295 296

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

297
	swiotlb_print_info(bytes);
298 299 300 301

	return 0;

cleanup4:
302 303
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
304 305
	io_tlb_orig_addr = NULL;
cleanup3:
306 307
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
308 309
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
310
	io_tlb_end = NULL;
311 312 313 314 315 316 317
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

318
static inline int
319
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
320
{
321
	return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
L
Linus Torvalds 已提交
322 323
}

324
static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325
{
326
	return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327 328
}

329 330 331 332 333
static int is_swiotlb_buffer(char *addr)
{
	return addr >= io_tlb_start && addr < io_tlb_end;
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
350
			sz = min_t(size_t, PAGE_SIZE - offset, size);
351 352 353 354 355 356

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
357
			else
358 359
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
360
			local_irq_restore(flags);
361 362 363 364 365

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
366 367 368
		}
	} else {
		if (dir == DMA_TO_DEVICE)
369
			memcpy(dma_addr, phys_to_virt(phys), size);
370
		else
371
			memcpy(phys_to_virt(phys), dma_addr, size);
372
	}
373 374
}

L
Linus Torvalds 已提交
375 376 377 378
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
379
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
380 381 382 383 384
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
385 386 387 388 389 390
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
391
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
392 393

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
394 395 396 397

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
398 399 400
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410 411

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

412
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
413 414 415 416 417 418

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
419 420 421 422 423 424
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
425 426
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
427 428 429
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
447

A
Andrew Morton 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
466 467 468 469 470 471 472
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
473 474
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
475
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
476
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
477 478 479 480 481 482 483 484

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
485
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
486 487 488 489
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
490
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
491 492 493 494

	/*
	 * First, sync the memory before unmapping the entry
	 */
495
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
496
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
525 526
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
527
{
528 529 530 531
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
532

533 534 535
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
536
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
537 538
		else
			BUG_ON(dir != DMA_TO_DEVICE);
539 540 541
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
542
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
543 544
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
545 546
		break;
	default:
L
Linus Torvalds 已提交
547
		BUG();
548
	}
L
Linus Torvalds 已提交
549 550 551 552
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
553
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
554
{
J
Jan Beulich 已提交
555
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
556 557
	void *ret;
	int order = get_order(size);
558
	u64 dma_mask = DMA_BIT_MASK(32);
559 560 561

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
562

563
	ret = (void *)__get_free_pages(flags, order);
564 565 566
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
L
Linus Torvalds 已提交
567 568 569 570 571 572 573 574 575
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
576 577
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
578
		 */
579
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
580
		if (!ret)
L
Linus Torvalds 已提交
581 582 583 584
			return NULL;
	}

	memset(ret, 0, size);
585
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
586 587

	/* Confirm address can be DMA'd by device */
588
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
589
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
591
		       (unsigned long long)dev_addr);
592 593

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
594
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
595
		return NULL;
L
Linus Torvalds 已提交
596 597 598 599
	}
	*dma_handle = dev_addr;
	return ret;
}
600
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
601 602 603 604 605

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
606
	WARN_ON(irqs_disabled());
607
	if (!is_swiotlb_buffer(vaddr))
L
Linus Torvalds 已提交
608 609 610
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
612
}
613
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
614 615 616 617 618 619 620

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
621
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
622 623 624
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
625
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
627 628

	if (size > io_tlb_overflow && do_panic) {
629 630 631 632
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
633 634 635 636 637
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
638
 * physical address to use is returned.
L
Linus Torvalds 已提交
639 640
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
641
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
642
 */
643 644 645 646
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
647
{
648 649
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
L
Linus Torvalds 已提交
650 651
	void *map;

652
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
653
	/*
B
Becky Bruce 已提交
654
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
655 656 657
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
658
	if (!address_needs_mapping(dev, dev_addr, size) &&
659
	    !range_needs_mapping(phys, size))
L
Linus Torvalds 已提交
660 661 662 663 664
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
665
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
666
	if (!map) {
667
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
668 669 670
		map = io_tlb_overflow_buffer;
	}

671
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
672 673 674 675

	/*
	 * Ensure that the address returned is DMA'ble
	 */
676
	if (address_needs_mapping(dev, dev_addr, size))
L
Linus Torvalds 已提交
677 678 679 680
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
681
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
682 683 684

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
685
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
686 687 688 689 690
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
691 692
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
693
{
694
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
695

696
	BUG_ON(dir == DMA_NONE);
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713

	if (is_swiotlb_buffer(dma_addr)) {
		do_unmap_single(hwdev, dma_addr, size, dir);
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

	dma_mark_clean(dma_addr, size);
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
714
}
715
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
716

L
Linus Torvalds 已提交
717 718 719 720
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
721
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
722 723
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
724 725 726
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
727
static void
728
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
730
{
731
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
732

733
	BUG_ON(dir == DMA_NONE);
734
	if (is_swiotlb_buffer(dma_addr))
735
		sync_single(hwdev, dma_addr, size, dir, target);
L
Linus Torvalds 已提交
736
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
737
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
738 739
}

740 741
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
742
			    size_t size, enum dma_data_direction dir)
743
{
744
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
745
}
746
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
747

L
Linus Torvalds 已提交
748 749
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
750
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
751
{
752
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
753
}
754
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
755

756 757 758
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
759
static void
760
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
761 762
			  unsigned long offset, size_t size,
			  int dir, int target)
763
{
764
	char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
765

766
	BUG_ON(dir == DMA_NONE);
767
	if (is_swiotlb_buffer(dma_addr))
768
		sync_single(hwdev, dma_addr, size, dir, target);
769
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
770
		dma_mark_clean(dma_addr, size);
771 772 773 774
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
775 776
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
777
{
778 779
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
780
}
781
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
782 783 784

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
785 786
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
787
{
788 789
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
790
}
791
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
792

L
Linus Torvalds 已提交
793 794
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
795
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
796 797 798 799 800 801 802 803 804 805
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
806
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
807 808 809
 * same here.
 */
int
810
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
811
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
812
{
J
Jens Axboe 已提交
813
	struct scatterlist *sg;
L
Linus Torvalds 已提交
814 815
	int i;

816
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
817

J
Jens Axboe 已提交
818
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
819 820
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
821

I
Ian Campbell 已提交
822
		if (range_needs_mapping(paddr, sg->length) ||
823
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
824 825
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
826
			if (!map) {
L
Linus Torvalds 已提交
827 828 829
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
830 831
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
832
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
833 834
				return 0;
			}
835
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
836 837 838 839 840 841
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
842 843 844 845 846 847 848 849
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
850
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
851 852 853

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
854
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
855 856
 */
void
857
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
858
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
859
{
J
Jens Axboe 已提交
860
	struct scatterlist *sg;
L
Linus Torvalds 已提交
861 862
	int i;

863
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
864

865 866 867
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
868
}
869 870 871 872 873 874 875 876
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
877
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
878 879 880 881 882 883 884 885

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
886
static void
J
Jens Axboe 已提交
887
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
888
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
889
{
J
Jens Axboe 已提交
890
	struct scatterlist *sg;
L
Linus Torvalds 已提交
891 892
	int i;

893
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
894

J
Jens Axboe 已提交
895
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
896
		if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
897
			sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
898
				    sg->dma_length, dir, target);
J
Jan Beulich 已提交
899
		else if (dir == DMA_FROM_DEVICE)
I
Ian Campbell 已提交
900
			dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
J
Jens Axboe 已提交
901
	}
L
Linus Torvalds 已提交
902 903
}

904 905
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
906
			int nelems, enum dma_data_direction dir)
907
{
908
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
909
}
910
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
911

L
Linus Torvalds 已提交
912 913
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
914
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
915
{
916
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
917
}
918
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
919 920

int
921
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
922
{
923
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
924
}
925
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
926 927

/*
928
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
929
 * properly.  For example, if your device can only drive the low 24-bits
930
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
931 932 933
 * this function.
 */
int
J
Jan Beulich 已提交
934
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
935
{
936
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
937 938
}
EXPORT_SYMBOL(swiotlb_dma_supported);