swiotlb.c 24.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
118 119 120 121
{
	return paddr;
}

122
phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
123 124 125 126
{
	return baddr;
}

127
/* Note that this doesn't work with highmem page */
128 129
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
130
{
131
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
132 133
}

134 135 136 137 138 139 140 141 142
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
143 144 145
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
146 147
}

L
Linus Torvalds 已提交
148 149
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
150
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
151
 */
J
Jan Beulich 已提交
152 153
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
154
{
J
Jan Beulich 已提交
155
	unsigned long i, bytes;
L
Linus Torvalds 已提交
156 157

	if (!io_tlb_nslabs) {
158
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
159 160 161
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
162 163
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
164 165 166
	/*
	 * Get IO TLB memory from the low pages
	 */
167
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
168 169
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
170
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
171 172 173 174 175 176 177

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
178
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
179 180
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
181
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
182 183 184 185 186

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
187 188 189
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

190
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
191 192
}

J
Jan Beulich 已提交
193 194
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
195
{
196
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
197 198
}

199 200 201 202 203 204
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
205
swiotlb_late_init_with_default_size(size_t default_size)
206
{
J
Jan Beulich 已提交
207
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
208 209 210 211 212 213 214 215 216 217
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
218
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
219
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
220
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
221 222

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
223 224
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
225 226 227 228 229 230 231 232
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
233
	if (order != get_order(bytes)) {
234 235 236
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
237
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
238
	}
J
Jan Beulich 已提交
239 240
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

256 257 258 259
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
260 261 262
	if (!io_tlb_orig_addr)
		goto cleanup3;

263
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
264 265 266 267 268 269 270 271 272

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

273
	swiotlb_print_info(bytes);
274 275 276 277

	return 0;

cleanup4:
278 279
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
280 281
	io_tlb_orig_addr = NULL;
cleanup3:
282 283
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
284 285
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
286
	io_tlb_end = NULL;
287 288 289 290 291 292 293
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

294
static int is_swiotlb_buffer(phys_addr_t paddr)
295
{
296 297
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
298 299
}

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
316
			sz = min_t(size_t, PAGE_SIZE - offset, size);
317 318 319 320 321 322

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
323
			else
324 325
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
326
			local_irq_restore(flags);
327 328 329 330 331

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
332 333 334
		}
	} else {
		if (dir == DMA_TO_DEVICE)
335
			memcpy(dma_addr, phys_to_virt(phys), size);
336
		else
337
			memcpy(phys_to_virt(phys), dma_addr, size);
338
	}
339 340
}

L
Linus Torvalds 已提交
341 342 343 344
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
345
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
346 347 348 349 350
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
351 352 353 354 355 356
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
357
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
358 359

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
360 361 362 363

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
364 365 366
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
367 368 369 370 371 372 373 374 375 376 377

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

378
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
379 380 381 382 383 384

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
385 386 387 388 389 390
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
391 392
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
393 394 395
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
413

A
Andrew Morton 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
432 433 434 435 436 437 438
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
439 440
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
441
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
442
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
451
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
452 453 454 455
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
456
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
457 458 459 460

	/*
	 * First, sync the memory before unmapping the entry
	 */
461
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
462
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
491 492
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
493
{
494 495 496 497
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
498

499 500 501
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
502
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
503 504
		else
			BUG_ON(dir != DMA_TO_DEVICE);
505 506 507
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
508
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
509 510
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
511 512
		break;
	default:
L
Linus Torvalds 已提交
513
		BUG();
514
	}
L
Linus Torvalds 已提交
515 516 517 518
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
519
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
520
{
J
Jan Beulich 已提交
521
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
522 523
	void *ret;
	int order = get_order(size);
524
	u64 dma_mask = DMA_BIT_MASK(32);
525 526 527

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
528

529
	ret = (void *)__get_free_pages(flags, order);
F
FUJITA Tomonori 已提交
530
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
L
Linus Torvalds 已提交
531 532 533 534 535 536 537 538 539
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
540 541
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
542
		 */
543
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
544
		if (!ret)
L
Linus Torvalds 已提交
545 546 547 548
			return NULL;
	}

	memset(ret, 0, size);
549
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
550 551

	/* Confirm address can be DMA'd by device */
F
FUJITA Tomonori 已提交
552
	if (dev_addr + size > dma_mask) {
J
Jan Beulich 已提交
553
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
554
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
555
		       (unsigned long long)dev_addr);
556 557

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
558
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
559
		return NULL;
L
Linus Torvalds 已提交
560 561 562 563
	}
	*dma_handle = dev_addr;
	return ret;
}
564
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
565 566 567

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
568
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
569
{
570 571
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);

572
	WARN_ON(irqs_disabled());
573 574
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
575 576
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
577
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
578
}
579
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
580 581 582 583 584 585 586

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
587
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
588 589 590
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
591
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
592
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
593 594

	if (size > io_tlb_overflow && do_panic) {
595 596 597 598
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
599 600 601 602 603
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
604
 * physical address to use is returned.
L
Linus Torvalds 已提交
605 606
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
607
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
608
 */
609 610 611 612
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
613
{
614 615
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
L
Linus Torvalds 已提交
616 617
	void *map;

618
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
619
	/*
B
Becky Bruce 已提交
620
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
621 622 623
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
F
FUJITA Tomonori 已提交
624
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
625 626 627 628 629
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
630
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
631
	if (!map) {
632
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
633 634 635
		map = io_tlb_overflow_buffer;
	}

636
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
637 638 639 640

	/*
	 * Ensure that the address returned is DMA'ble
	 */
F
FUJITA Tomonori 已提交
641
	if (!dma_capable(dev, dev_addr, size))
L
Linus Torvalds 已提交
642 643 644 645
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
646
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
647 648 649

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
650
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
651 652 653 654 655
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
656 657
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
658
{
659
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
660

661
	BUG_ON(dir == DMA_NONE);
662

663 664
	if (is_swiotlb_buffer(paddr)) {
		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
665 666 667 668 669 670
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

671 672 673 674 675 676 677
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
678 679 680 681 682 683 684
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
685
}
686
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
687

L
Linus Torvalds 已提交
688 689 690 691
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
692
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
693 694
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
695 696 697
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
698
static void
699
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
700
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
701
{
702
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
703

704
	BUG_ON(dir == DMA_NONE);
705

706 707
	if (is_swiotlb_buffer(paddr)) {
		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
708 709 710 711 712 713
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

714
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
715 716
}

717 718
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
719
			    size_t size, enum dma_data_direction dir)
720
{
721
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
722
}
723
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
724

L
Linus Torvalds 已提交
725 726
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
727
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
728
{
729
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
730
}
731
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
732

733 734 735
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
736
static void
737
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
738 739
			  unsigned long offset, size_t size,
			  int dir, int target)
740
{
741
	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
742 743 744 745
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
746 747
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
748
{
749 750
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
751
}
752
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
753 754 755

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
756 757
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
758
{
759 760
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
761
}
762
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
763

L
Linus Torvalds 已提交
764 765
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
766
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
767 768 769 770 771 772 773 774 775 776
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
777
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
778 779 780
 * same here.
 */
int
781
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
782
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
783
{
J
Jens Axboe 已提交
784
	struct scatterlist *sg;
L
Linus Torvalds 已提交
785 786
	int i;

787
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
788

J
Jens Axboe 已提交
789
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
790 791
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
792

793
		if (swiotlb_force ||
F
FUJITA Tomonori 已提交
794
		    !dma_capable(hwdev, dev_addr, sg->length)) {
795 796
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
797
			if (!map) {
L
Linus Torvalds 已提交
798 799 800
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
801 802
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
803
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
804 805
				return 0;
			}
806
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
807 808 809 810 811 812
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
813 814 815 816 817 818 819 820
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
821
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
822 823 824

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
825
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
826 827
 */
void
828
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
829
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
830
{
J
Jens Axboe 已提交
831
	struct scatterlist *sg;
L
Linus Torvalds 已提交
832 833
	int i;

834
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
835

836 837 838
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
839
}
840 841 842 843 844 845 846 847
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
848
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
849 850 851 852 853 854 855 856

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
857
static void
J
Jens Axboe 已提交
858
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
859
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
860
{
J
Jens Axboe 已提交
861
	struct scatterlist *sg;
L
Linus Torvalds 已提交
862 863
	int i;

864 865
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
866
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
867 868
}

869 870
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
871
			int nelems, enum dma_data_direction dir)
872
{
873
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
874
}
875
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
876

L
Linus Torvalds 已提交
877 878
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
879
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
880
{
881
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
882
}
883
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
884 885

int
886
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
887
{
888
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
889
}
890
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
891 892

/*
893
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
894
 * properly.  For example, if your device can only drive the low 24-bits
895
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
896 897 898
 * this function.
 */
int
J
Jan Beulich 已提交
899
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
900
{
901
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
902 903
}
EXPORT_SYMBOL(swiotlb_dma_supported);