swiotlb.c 24.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
L
Linus Torvalds 已提交
17 18 19
 */

#include <linux/cache.h>
20
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
21 22 23
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
24
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
25
#include <linux/string.h>
26
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <linux/types.h>
#include <linux/ctype.h>

#include <asm/io.h>
#include <asm/dma.h>
32
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
33 34 35

#include <linux/init.h>
#include <linux/bootmem.h>
36
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
37 38 39 40

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

J
Jens Axboe 已提交
41
#define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
42
#define SG_ENT_PHYS_ADDRESS(sg)	virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
L
Linus Torvalds 已提交
43

44 45 46 47 48 49 50 51 52
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

53 54 55 56 57 58 59 60
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
int swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_unmap_single and
 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
94
static unsigned char **io_tlb_orig_addr;
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102 103 104

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
105
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115 116 117
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

118 119 120 121 122 123 124 125 126 127
void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
{
	return alloc_bootmem_low_pages(size);
}

void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
{
	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
{
	return paddr;
}

phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
{
	return baddr;
}

static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
{
	return swiotlb_phys_to_bus(virt_to_phys(address));
}

static void *swiotlb_bus_to_virt(dma_addr_t address)
{
	return phys_to_virt(swiotlb_bus_to_phys(address));
}

148 149 150 151 152
int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
{
	return 0;
}

L
Linus Torvalds 已提交
153 154
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
155
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
156
 */
J
Jan Beulich 已提交
157 158
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
159
{
J
Jan Beulich 已提交
160
	unsigned long i, bytes;
L
Linus Torvalds 已提交
161 162

	if (!io_tlb_nslabs) {
163
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
164 165 166
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
167 168
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
169 170 171
	/*
	 * Get IO TLB memory from the low pages
	 */
172
	io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
L
Linus Torvalds 已提交
173 174
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
175
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
176 177 178 179 180 181 182

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
183
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
184 185
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
186
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
L
Linus Torvalds 已提交
187 188 189 190 191

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
192 193 194
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

195
	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
196
	       swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end));
L
Linus Torvalds 已提交
197 198
}

J
Jan Beulich 已提交
199 200
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
201
{
202
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
203 204
}

205 206 207 208 209 210
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
211
swiotlb_late_init_with_default_size(size_t default_size)
212
{
J
Jan Beulich 已提交
213
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
214 215 216 217 218 219 220 221 222 223
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
224
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
225
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
226
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
227 228

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
229
		io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
230 231 232 233 234 235 236 237
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
238
	if (order != get_order(bytes)) {
239 240 241
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
242
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
243
	}
J
Jan Beulich 已提交
244 245
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

261 262
	io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
	                           get_order(io_tlb_nslabs * sizeof(char *)));
263 264 265
	if (!io_tlb_orig_addr)
		goto cleanup3;

266
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
267 268 269 270 271 272 273 274 275

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

276 277
	printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
	       "0x%lx\n", bytes >> 20,
278
	       swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end));
279 280 281 282

	return 0;

cleanup4:
283 284
	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
	                                                      sizeof(char *)));
285 286
	io_tlb_orig_addr = NULL;
cleanup3:
287 288
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
289 290
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
291
	io_tlb_end = NULL;
292 293 294 295 296 297 298
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

A
Andrew Morton 已提交
299
static int
300
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
301
{
302
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
L
Linus Torvalds 已提交
303 304
}

305 306 307 308 309
static inline int range_needs_mapping(void *ptr, size_t size)
{
	return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
}

310 311 312 313 314
static int is_swiotlb_buffer(char *addr)
{
	return addr >= io_tlb_start && addr < io_tlb_end;
}

L
Linus Torvalds 已提交
315 316 317 318
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
319
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
L
Linus Torvalds 已提交
320 321 322 323 324
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
325 326 327 328 329 330
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
331
	start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
332 333

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
334 335 336 337

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
338 339 340
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
341 342 343 344 345 346 347 348 349 350 351

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

352
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
353 354 355 356 357 358

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
359 360 361 362 363 364
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
365 366
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
367 368 369
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
387

A
Andrew Morton 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
406 407 408 409 410 411 412
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
K
Keir Fraser 已提交
413 414
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
415
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
416
		memcpy(dma_addr, buffer, size);
L
Linus Torvalds 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
430
	char *buffer = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
431 432 433 434

	/*
	 * First, sync the memory before unmapping the entry
	 */
435
	if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
L
Linus Torvalds 已提交
436 437 438 439
		/*
		 * bounce... copy the data back into the original buffer * and
		 * delete the bounce buffer.
		 */
440
		memcpy(buffer, dma_addr, size);
L
Linus Torvalds 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
469 470
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
471 472
{
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
473
	char *buffer = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
474

K
Keir Fraser 已提交
475 476
	buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));

477 478 479
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
480
			memcpy(buffer, dma_addr, size);
481 482
		else
			BUG_ON(dir != DMA_TO_DEVICE);
483 484 485
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
486
			memcpy(dma_addr, buffer, size);
487 488
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
489 490
		break;
	default:
L
Linus Torvalds 已提交
491
		BUG();
492
	}
L
Linus Torvalds 已提交
493 494 495 496
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
497
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
498
{
J
Jan Beulich 已提交
499
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
500 501
	void *ret;
	int order = get_order(size);
502 503 504 505
	u64 dma_mask = DMA_32BIT_MASK;

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
506

507
	ret = (void *)__get_free_pages(flags, order);
508
	if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
L
Linus Torvalds 已提交
509 510 511 512 513 514 515 516 517 518 519 520 521 522
		/*
		 * The allocated memory isn't reachable by the device.
		 * Fall back on swiotlb_map_single().
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
		 * to GFP_DMA memory; fall back on
		 * swiotlb_map_single(), which will grab memory from
		 * the lowest available address range.
		 */
523 524
		ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
		if (!ret)
L
Linus Torvalds 已提交
525 526 527 528
			return NULL;
	}

	memset(ret, 0, size);
529
	dev_addr = swiotlb_virt_to_bus(ret);
L
Linus Torvalds 已提交
530 531

	/* Confirm address can be DMA'd by device */
532
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
533
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
534
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
535
		       (unsigned long long)dev_addr);
536 537 538 539

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
		unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
		return NULL;
L
Linus Torvalds 已提交
540 541 542 543 544 545 546 547 548
	}
	*dma_handle = dev_addr;
	return ret;
}

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
549
	WARN_ON(irqs_disabled());
550
	if (!is_swiotlb_buffer(vaddr))
L
Linus Torvalds 已提交
551 552 553
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
554
		unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562
}

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
563
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
564 565 566
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
567
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
L
Linus Torvalds 已提交
568 569 570
	       "device %s\n", size, dev ? dev->bus_id : "?");

	if (size > io_tlb_overflow && do_panic) {
571 572 573 574
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
575 576 577 578 579
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
580
 * physical address to use is returned.
L
Linus Torvalds 已提交
581 582 583 584 585
 *
 * Once the device is given the dma address, the device owns this memory until
 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 */
dma_addr_t
586 587
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
			 int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
588
{
589
	dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
L
Linus Torvalds 已提交
590 591
	void *map;

592
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
593 594 595 596 597
	/*
	 * If the pointer passed in happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
598 599
	if (!address_needs_mapping(hwdev, dev_addr, size) &&
	    !range_needs_mapping(ptr, size))
L
Linus Torvalds 已提交
600 601 602 603 604
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
605
	map = map_single(hwdev, ptr, size, dir);
L
Linus Torvalds 已提交
606 607 608 609 610
	if (!map) {
		swiotlb_full(hwdev, size, dir, 1);
		map = io_tlb_overflow_buffer;
	}

611
	dev_addr = swiotlb_virt_to_bus(map);
L
Linus Torvalds 已提交
612 613 614 615

	/*
	 * Ensure that the address returned is DMA'ble
	 */
616
	if (address_needs_mapping(hwdev, dev_addr, size))
L
Linus Torvalds 已提交
617 618 619 620
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
621 622 623 624 625 626 627
EXPORT_SYMBOL(swiotlb_map_single_attrs);

dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
	return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
}
L
Linus Torvalds 已提交
628 629 630 631 632 633 634 635 636 637

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous swiotlb_map_single call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
void
638 639
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
			   size_t size, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
640
{
641
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
642

643
	BUG_ON(dir == DMA_NONE);
644
	if (is_swiotlb_buffer(dma_addr))
L
Linus Torvalds 已提交
645 646
		unmap_single(hwdev, dma_addr, size, dir);
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
647
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
648
}
649
EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
L
Linus Torvalds 已提交
650

651 652 653 654 655 656
void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
		     int dir)
{
	return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
}
L
Linus Torvalds 已提交
657 658 659 660 661
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
662 663
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
664 665 666
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
667
static void
668
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
669
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
670
{
671
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
672

673
	BUG_ON(dir == DMA_NONE);
674
	if (is_swiotlb_buffer(dma_addr))
675
		sync_single(hwdev, dma_addr, size, dir, target);
L
Linus Torvalds 已提交
676
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
677
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
678 679
}

680 681 682 683
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
			    size_t size, int dir)
{
684
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
685 686
}

L
Linus Torvalds 已提交
687 688 689 690
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
			       size_t size, int dir)
{
691
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
692 693
}

694 695 696
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
697
static void
698
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
699 700
			  unsigned long offset, size_t size,
			  int dir, int target)
701
{
702
	char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
703

704
	BUG_ON(dir == DMA_NONE);
705
	if (is_swiotlb_buffer(dma_addr))
706
		sync_single(hwdev, dma_addr, size, dir, target);
707
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
708
		dma_mark_clean(dma_addr, size);
709 710 711 712 713 714
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
				  unsigned long offset, size_t size, int dir)
{
715 716
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
717 718 719 720 721 722
}

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
				     unsigned long offset, size_t size, int dir)
{
723 724
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
725 726
}

727 728
void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
			    struct dma_attrs *);
L
Linus Torvalds 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above swiotlb_map_single
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for swiotlb_map_single are the
 * same here.
 */
int
746 747
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
		     int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
748
{
J
Jens Axboe 已提交
749
	struct scatterlist *sg;
750
	void *addr;
J
Jan Beulich 已提交
751
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
752 753
	int i;

754
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
755

J
Jens Axboe 已提交
756
	for_each_sg(sgl, sg, nelems, i) {
757
		addr = SG_ENT_VIRT_ADDRESS(sg);
758
		dev_addr = swiotlb_virt_to_bus(addr);
759
		if (range_needs_mapping(sg_virt(sg), sg->length) ||
760
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
761
			void *map = map_single(hwdev, addr, sg->length, dir);
762
			if (!map) {
L
Linus Torvalds 已提交
763 764 765
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
766 767
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
768
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
769 770
				return 0;
			}
771
			sg->dma_address = swiotlb_virt_to_bus(map);
L
Linus Torvalds 已提交
772 773 774 775 776 777
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
778 779 780 781 782 783 784 785
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
786 787 788 789 790 791

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_single() above.
 */
void
792 793
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
		       int nelems, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
794
{
J
Jens Axboe 已提交
795
	struct scatterlist *sg;
L
Linus Torvalds 已提交
796 797
	int i;

798
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
799

J
Jens Axboe 已提交
800
	for_each_sg(sgl, sg, nelems, i) {
L
Linus Torvalds 已提交
801
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
802
			unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
803
				     sg->dma_length, dir);
L
Linus Torvalds 已提交
804
		else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
805
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
J
Jens Axboe 已提交
806
	}
L
Linus Torvalds 已提交
807
}
808 809 810 811 812 813 814 815
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
816 817 818 819 820 821 822 823

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
824
static void
J
Jens Axboe 已提交
825
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
826
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
827
{
J
Jens Axboe 已提交
828
	struct scatterlist *sg;
L
Linus Torvalds 已提交
829 830
	int i;

831
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
832

J
Jens Axboe 已提交
833
	for_each_sg(sgl, sg, nelems, i) {
L
Linus Torvalds 已提交
834
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
835
			sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
836
				    sg->dma_length, dir, target);
J
Jan Beulich 已提交
837 838
		else if (dir == DMA_FROM_DEVICE)
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
J
Jens Axboe 已提交
839
	}
L
Linus Torvalds 已提交
840 841
}

842 843 844 845
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
			int nelems, int dir)
{
846
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
847 848
}

L
Linus Torvalds 已提交
849 850 851 852
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
			   int nelems, int dir)
{
853
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
854 855 856
}

int
857
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
858
{
859
	return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
860 861 862
}

/*
863
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
864
 * properly.  For example, if your device can only drive the low 24-bits
865
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
866 867 868
 * this function.
 */
int
J
Jan Beulich 已提交
869
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
870
{
871
	return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
872 873 874 875 876 877 878 879
}

EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
880 881
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
L
Linus Torvalds 已提交
882 883 884
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
885 886
EXPORT_SYMBOL(swiotlb_alloc_coherent);
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
887
EXPORT_SYMBOL(swiotlb_dma_supported);