swiotlb.c 23.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
L
Linus Torvalds 已提交
17 18 19
 */

#include <linux/cache.h>
20
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ctype.h>

#include <asm/io.h>
#include <asm/dma.h>
30
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
31 32 33

#include <linux/init.h>
#include <linux/bootmem.h>
34
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
35 36 37 38

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

J
Jens Axboe 已提交
39
#define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
40
#define SG_ENT_PHYS_ADDRESS(sg)	virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54

/*
 * Maximum allowable number of contiguous slabs to map,
 * must be a power of 2.  What is the appropriate value ?
 * The complexity of {map,unmap}_single is linearly dependent on this value.
 */
#define IO_TLB_SEGSIZE	128

/*
 * log of the size of each IO TLB slab.  The number of slabs is command line
 * controllable.
 */
#define IO_TLB_SHIFT 11

55 56 57 58 59 60 61 62 63
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

64 65 66 67 68 69 70 71
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
int swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_unmap_single and
 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
105
static unsigned char **io_tlb_orig_addr;
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
116
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
131
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
132
 */
J
Jan Beulich 已提交
133 134
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
135
{
J
Jan Beulich 已提交
136
	unsigned long i, bytes;
L
Linus Torvalds 已提交
137 138

	if (!io_tlb_nslabs) {
139
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
140 141 142
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
143 144
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
145 146 147
	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
148
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
149 150
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
151
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
159
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
160 161
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
162
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
L
Linus Torvalds 已提交
163 164 165 166 167

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
168 169 170
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

171 172
	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
	       virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
L
Linus Torvalds 已提交
173 174
}

J
Jan Beulich 已提交
175 176
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
177
{
178
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
179 180
}

181 182 183 184 185 186
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
187
swiotlb_late_init_with_default_size(size_t default_size)
188
{
J
Jan Beulich 已提交
189
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
190 191 192 193 194 195 196 197 198 199
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
200
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
201
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
202
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
203 204 205 206 207 208 209 210 211 212 213 214

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
		io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
		                                        order);
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
215
	if (order != get_order(bytes)) {
216 217 218
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
219
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
220
	}
J
Jan Beulich 已提交
221 222
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

238 239
	io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
	                           get_order(io_tlb_nslabs * sizeof(char *)));
240 241 242
	if (!io_tlb_orig_addr)
		goto cleanup3;

243
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
244 245 246 247 248 249 250 251 252

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

253 254 255
	printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
	       "0x%lx\n", bytes >> 20,
	       virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
256 257 258 259

	return 0;

cleanup4:
260 261
	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
	                                                      sizeof(char *)));
262 263
	io_tlb_orig_addr = NULL;
cleanup3:
264 265
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
266 267
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
268
	io_tlb_end = NULL;
269 270 271 272 273 274 275
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

A
Andrew Morton 已提交
276
static int
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
	dma_addr_t mask = 0xffffffff;
	/* If the device has a mask, use it, otherwise default to 32 bits */
	if (hwdev && hwdev->dma_mask)
		mask = *hwdev->dma_mask;
	return (addr & ~mask) != 0;
}

/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
290
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
L
Linus Torvalds 已提交
291 292 293 294 295
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
296 297 298 299 300 301 302 303 304
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
	start_dma_addr = virt_to_bus(io_tlb_start) & mask;

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
305 306 307
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
308 309 310 311 312 313 314 315 316 317 318

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

319
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
320 321 322 323 324 325

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
326 327 328 329 330 331
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
332 333
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
334 335 336
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
354

A
Andrew Morton 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
373 374 375 376 377 378 379
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
K
Keir Fraser 已提交
380 381
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
382
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
383
		memcpy(dma_addr, buffer, size);
L
Linus Torvalds 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
397
	char *buffer = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
398 399 400 401

	/*
	 * First, sync the memory before unmapping the entry
	 */
402
	if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
L
Linus Torvalds 已提交
403 404 405 406
		/*
		 * bounce... copy the data back into the original buffer * and
		 * delete the bounce buffer.
		 */
407
		memcpy(buffer, dma_addr, size);
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
436 437
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
438 439
{
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
440
	char *buffer = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
441

K
Keir Fraser 已提交
442 443
	buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));

444 445 446
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
447
			memcpy(buffer, dma_addr, size);
448 449
		else
			BUG_ON(dir != DMA_TO_DEVICE);
450 451 452
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
453
			memcpy(dma_addr, buffer, size);
454 455
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
456 457
		break;
	default:
L
Linus Torvalds 已提交
458
		BUG();
459
	}
L
Linus Torvalds 已提交
460 461 462 463
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
464
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
465
{
J
Jan Beulich 已提交
466
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
467 468 469
	void *ret;
	int order = get_order(size);

470
	ret = (void *)__get_free_pages(flags, order);
471
	if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485
		/*
		 * The allocated memory isn't reachable by the device.
		 * Fall back on swiotlb_map_single().
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
		 * to GFP_DMA memory; fall back on
		 * swiotlb_map_single(), which will grab memory from
		 * the lowest available address range.
		 */
486 487
		ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
		if (!ret)
L
Linus Torvalds 已提交
488 489 490 491
			return NULL;
	}

	memset(ret, 0, size);
492
	dev_addr = virt_to_bus(ret);
L
Linus Torvalds 已提交
493 494 495

	/* Confirm address can be DMA'd by device */
	if (address_needs_mapping(hwdev, dev_addr)) {
J
Jan Beulich 已提交
496 497 498
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
		       (unsigned long long)*hwdev->dma_mask,
		       (unsigned long long)dev_addr);
L
Linus Torvalds 已提交
499 500 501 502 503 504 505 506 507 508 509
		panic("swiotlb_alloc_coherent: allocated memory is out of "
		      "range for device");
	}
	*dma_handle = dev_addr;
	return ret;
}

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
510
	WARN_ON(irqs_disabled());
L
Linus Torvalds 已提交
511 512 513 514 515
	if (!(vaddr >= (void *)io_tlb_start
                    && vaddr < (void *)io_tlb_end))
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
516
		unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524
}

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
525
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
526 527 528
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
529
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
L
Linus Torvalds 已提交
530 531 532
	       "device %s\n", size, dev ? dev->bus_id : "?");

	if (size > io_tlb_overflow && do_panic) {
533 534 535 536
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
537 538 539 540 541
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
542
 * physical address to use is returned.
L
Linus Torvalds 已提交
543 544 545 546 547
 *
 * Once the device is given the dma address, the device owns this memory until
 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 */
dma_addr_t
548 549
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
			 int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
550
{
J
Jan Beulich 已提交
551
	dma_addr_t dev_addr = virt_to_bus(ptr);
L
Linus Torvalds 已提交
552 553
	void *map;

554
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
555 556 557 558 559
	/*
	 * If the pointer passed in happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
560
	if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
L
Linus Torvalds 已提交
561 562 563 564 565
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
566
	map = map_single(hwdev, ptr, size, dir);
L
Linus Torvalds 已提交
567 568 569 570 571
	if (!map) {
		swiotlb_full(hwdev, size, dir, 1);
		map = io_tlb_overflow_buffer;
	}

572
	dev_addr = virt_to_bus(map);
L
Linus Torvalds 已提交
573 574 575 576 577 578 579 580 581

	/*
	 * Ensure that the address returned is DMA'ble
	 */
	if (address_needs_mapping(hwdev, dev_addr))
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
582 583 584 585 586 587 588
EXPORT_SYMBOL(swiotlb_map_single_attrs);

dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
	return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
}
L
Linus Torvalds 已提交
589 590 591 592 593 594 595 596 597 598

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous swiotlb_map_single call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
void
599 600
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
			   size_t size, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
601
{
602
	char *dma_addr = bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
603

604
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
605 606 607
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		unmap_single(hwdev, dma_addr, size, dir);
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
608
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
609
}
610
EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
L
Linus Torvalds 已提交
611

612 613 614 615 616 617
void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
		     int dir)
{
	return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
}
L
Linus Torvalds 已提交
618 619 620 621 622
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
623 624
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
625 626 627
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
628
static void
629
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
630
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
631
{
632
	char *dma_addr = bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
633

634
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
635
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
636
		sync_single(hwdev, dma_addr, size, dir, target);
L
Linus Torvalds 已提交
637
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
638
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
639 640
}

641 642 643 644
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
			    size_t size, int dir)
{
645
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
646 647
}

L
Linus Torvalds 已提交
648 649 650 651
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
			       size_t size, int dir)
{
652
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
653 654
}

655 656 657
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
658
static void
659
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
660 661
			  unsigned long offset, size_t size,
			  int dir, int target)
662
{
663
	char *dma_addr = bus_to_virt(dev_addr) + offset;
664

665
	BUG_ON(dir == DMA_NONE);
666
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
667
		sync_single(hwdev, dma_addr, size, dir, target);
668
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
669
		dma_mark_clean(dma_addr, size);
670 671 672 673 674 675
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
				  unsigned long offset, size_t size, int dir)
{
676 677
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
678 679 680 681 682 683
}

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
				     unsigned long offset, size_t size, int dir)
{
684 685
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
686 687
}

688 689
void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
			    struct dma_attrs *);
L
Linus Torvalds 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above swiotlb_map_single
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for swiotlb_map_single are the
 * same here.
 */
int
707 708
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
		     int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
709
{
J
Jens Axboe 已提交
710
	struct scatterlist *sg;
711
	void *addr;
J
Jan Beulich 已提交
712
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
713 714
	int i;

715
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
716

J
Jens Axboe 已提交
717
	for_each_sg(sgl, sg, nelems, i) {
718 719 720 721
		addr = SG_ENT_VIRT_ADDRESS(sg);
		dev_addr = virt_to_bus(addr);
		if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
			void *map = map_single(hwdev, addr, sg->length, dir);
722
			if (!map) {
L
Linus Torvalds 已提交
723 724 725
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
726 727
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
728
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
729 730
				return 0;
			}
J
Jan Beulich 已提交
731
			sg->dma_address = virt_to_bus(map);
L
Linus Torvalds 已提交
732 733 734 735 736 737
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
738 739 740 741 742 743 744 745
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
746 747 748 749 750 751

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_single() above.
 */
void
752 753
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
		       int nelems, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
754
{
J
Jens Axboe 已提交
755
	struct scatterlist *sg;
L
Linus Torvalds 已提交
756 757
	int i;

758
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
759

J
Jens Axboe 已提交
760
	for_each_sg(sgl, sg, nelems, i) {
L
Linus Torvalds 已提交
761
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
762 763
			unmap_single(hwdev, bus_to_virt(sg->dma_address),
				     sg->dma_length, dir);
L
Linus Torvalds 已提交
764
		else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
765
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
J
Jens Axboe 已提交
766
	}
L
Linus Torvalds 已提交
767
}
768 769 770 771 772 773 774 775
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
776 777 778 779 780 781 782 783

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
784
static void
J
Jens Axboe 已提交
785
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
786
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
787
{
J
Jens Axboe 已提交
788
	struct scatterlist *sg;
L
Linus Torvalds 已提交
789 790
	int i;

791
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
792

J
Jens Axboe 已提交
793
	for_each_sg(sgl, sg, nelems, i) {
L
Linus Torvalds 已提交
794
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
795
			sync_single(hwdev, bus_to_virt(sg->dma_address),
796
				    sg->dma_length, dir, target);
J
Jan Beulich 已提交
797 798
		else if (dir == DMA_FROM_DEVICE)
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
J
Jens Axboe 已提交
799
	}
L
Linus Torvalds 已提交
800 801
}

802 803 804 805
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
			int nelems, int dir)
{
806
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
807 808
}

L
Linus Torvalds 已提交
809 810 811 812
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
			   int nelems, int dir)
{
813
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
814 815 816
}

int
817
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
818
{
819
	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
820 821 822
}

/*
823
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
824
 * properly.  For example, if your device can only drive the low 24-bits
825
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
826 827 828
 * this function.
 */
int
J
Jan Beulich 已提交
829
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
830
{
831
	return virt_to_bus(io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
832 833 834 835 836 837 838 839
}

EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
840 841
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
L
Linus Torvalds 已提交
842 843 844
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
845 846
EXPORT_SYMBOL(swiotlb_alloc_coherent);
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
847
EXPORT_SYMBOL(swiotlb_dma_supported);