swiotlb.c 24.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
/* Note that this doesn't work with highmem page */
118 119
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
120
{
121
	return phys_to_dma(hwdev, virt_to_phys(address));
122 123
}

124 125 126 127 128 129 130 131 132
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
133 134 135
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
136 137
}

L
Linus Torvalds 已提交
138 139
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
140
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
141
 */
J
Jan Beulich 已提交
142 143
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
144
{
J
Jan Beulich 已提交
145
	unsigned long i, bytes;
L
Linus Torvalds 已提交
146 147

	if (!io_tlb_nslabs) {
148
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
149 150 151
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
152 153
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
154 155 156
	/*
	 * Get IO TLB memory from the low pages
	 */
157
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
158 159
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
160
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
161 162 163 164 165 166 167

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
168
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
169 170
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
171
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
172 173 174 175 176

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
177 178 179
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

180
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
181 182
}

J
Jan Beulich 已提交
183 184
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
185
{
186
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
187 188
}

189 190 191 192 193 194
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
195
swiotlb_late_init_with_default_size(size_t default_size)
196
{
J
Jan Beulich 已提交
197
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
198 199 200 201 202 203 204 205 206 207
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
208
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
209
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
210
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
211 212

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
213 214
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
215 216 217 218 219 220 221 222
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
223
	if (order != get_order(bytes)) {
224 225 226
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
227
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
228
	}
J
Jan Beulich 已提交
229 230
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

246 247 248 249
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
250 251 252
	if (!io_tlb_orig_addr)
		goto cleanup3;

253
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
254 255 256 257 258 259 260 261 262

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

263
	swiotlb_print_info(bytes);
264 265 266 267

	return 0;

cleanup4:
268 269
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
270 271
	io_tlb_orig_addr = NULL;
cleanup3:
272 273
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
274 275
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
276
	io_tlb_end = NULL;
277 278 279 280 281 282 283
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

284
static int is_swiotlb_buffer(phys_addr_t paddr)
285
{
286 287
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
288 289
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
306
			sz = min_t(size_t, PAGE_SIZE - offset, size);
307 308 309 310 311 312

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
313
			else
314 315
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
316
			local_irq_restore(flags);
317 318 319 320 321

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
322 323 324
		}
	} else {
		if (dir == DMA_TO_DEVICE)
325
			memcpy(dma_addr, phys_to_virt(phys), size);
326
		else
327
			memcpy(phys_to_virt(phys), dma_addr, size);
328
	}
329 330
}

L
Linus Torvalds 已提交
331 332 333 334
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
335
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
336 337 338 339 340
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
341 342 343 344 345 346
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
347
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
348 349

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
350 351 352 353

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
354 355 356
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

368
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
369 370 371 372 373 374

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
375 376 377 378 379 380
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
381 382
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
383 384 385
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
403

A
Andrew Morton 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
422 423 424 425 426 427 428
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
429 430
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
431
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
432
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
433 434 435 436 437 438 439 440

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
441
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
442 443 444 445
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
446
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
447 448 449 450

	/*
	 * First, sync the memory before unmapping the entry
	 */
451
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
452
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
481 482
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
483
{
484 485 486 487
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
488

489 490 491
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
492
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
493 494
		else
			BUG_ON(dir != DMA_TO_DEVICE);
495 496 497
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
498
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
499 500
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
501 502
		break;
	default:
L
Linus Torvalds 已提交
503
		BUG();
504
	}
L
Linus Torvalds 已提交
505 506 507 508
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
509
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
510
{
J
Jan Beulich 已提交
511
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
512 513
	void *ret;
	int order = get_order(size);
514
	u64 dma_mask = DMA_BIT_MASK(32);
515 516 517

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
518

519
	ret = (void *)__get_free_pages(flags, order);
F
FUJITA Tomonori 已提交
520
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
L
Linus Torvalds 已提交
521 522 523 524 525 526 527 528 529
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
530 531
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
532
		 */
533
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
534
		if (!ret)
L
Linus Torvalds 已提交
535 536 537 538
			return NULL;
	}

	memset(ret, 0, size);
539
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
540 541

	/* Confirm address can be DMA'd by device */
F
FUJITA Tomonori 已提交
542
	if (dev_addr + size > dma_mask) {
J
Jan Beulich 已提交
543
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
544
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
545
		       (unsigned long long)dev_addr);
546 547

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
548
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
549
		return NULL;
L
Linus Torvalds 已提交
550 551 552 553
	}
	*dma_handle = dev_addr;
	return ret;
}
554
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
555 556 557

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
558
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
559
{
560
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561

562
	WARN_ON(irqs_disabled());
563 564
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
565 566
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
567
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
568
}
569
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
570 571 572 573 574 575 576

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
577
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
578 579 580
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
581
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
582
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
583

584 585 586 587 588 589 590 591 592
	if (size <= io_tlb_overflow || !do_panic)
		return;

	if (dir == DMA_BIDIRECTIONAL)
		panic("DMA: Random memory could be DMA accessed\n");
	if (dir == DMA_FROM_DEVICE)
		panic("DMA: Random memory could be DMA written\n");
	if (dir == DMA_TO_DEVICE)
		panic("DMA: Random memory could be DMA read\n");
L
Linus Torvalds 已提交
593 594 595 596
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
597
 * physical address to use is returned.
L
Linus Torvalds 已提交
598 599
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
600
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
601
 */
602 603 604 605
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
606
{
607
	phys_addr_t phys = page_to_phys(page) + offset;
608
	dma_addr_t dev_addr = phys_to_dma(dev, phys);
L
Linus Torvalds 已提交
609 610
	void *map;

611
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
612
	/*
B
Becky Bruce 已提交
613
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
614 615 616
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
F
FUJITA Tomonori 已提交
617
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
618 619 620 621 622
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
623
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
624
	if (!map) {
625
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
626 627 628
		map = io_tlb_overflow_buffer;
	}

629
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
630 631 632 633

	/*
	 * Ensure that the address returned is DMA'ble
	 */
F
FUJITA Tomonori 已提交
634
	if (!dma_capable(dev, dev_addr, size))
L
Linus Torvalds 已提交
635 636 637 638
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
639
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
640 641 642

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
643
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
644 645 646 647 648
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
649 650
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
651
{
652
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
653

654
	BUG_ON(dir == DMA_NONE);
655

656 657
	if (is_swiotlb_buffer(paddr)) {
		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
658 659 660 661 662 663
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

664 665 666 667 668 669 670
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
671 672 673 674 675 676 677
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
678
}
679
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
680

L
Linus Torvalds 已提交
681 682 683 684
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
685
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
686 687
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
688 689 690
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
691
static void
692
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
693
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
694
{
695
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
696

697
	BUG_ON(dir == DMA_NONE);
698

699 700
	if (is_swiotlb_buffer(paddr)) {
		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
701 702 703 704 705 706
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

707
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
708 709
}

710 711
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
712
			    size_t size, enum dma_data_direction dir)
713
{
714
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
715
}
716
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
717

L
Linus Torvalds 已提交
718 719
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
720
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
721
{
722
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
723
}
724
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
725

726 727 728
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
729
static void
730
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
731 732
			  unsigned long offset, size_t size,
			  int dir, int target)
733
{
734
	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
735 736 737 738
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 740
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
741
{
742 743
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
744
}
745
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
746 747 748

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
749 750
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
751
{
752 753
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
754
}
755
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
756

L
Linus Torvalds 已提交
757 758
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
759
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
760 761 762 763 764 765 766 767 768 769
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
770
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
771 772 773
 * same here.
 */
int
774
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
775
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
776
{
J
Jens Axboe 已提交
777
	struct scatterlist *sg;
L
Linus Torvalds 已提交
778 779
	int i;

780
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
781

J
Jens Axboe 已提交
782
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
783
		phys_addr_t paddr = sg_phys(sg);
784
		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
785

786
		if (swiotlb_force ||
F
FUJITA Tomonori 已提交
787
		    !dma_capable(hwdev, dev_addr, sg->length)) {
788 789
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
790
			if (!map) {
L
Linus Torvalds 已提交
791 792 793
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
794 795
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
796
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
797 798
				return 0;
			}
799
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
800 801 802 803 804 805
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
806 807 808 809 810 811 812 813
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
814
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
815 816 817

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
818
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
819 820
 */
void
821
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
822
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
823
{
J
Jens Axboe 已提交
824
	struct scatterlist *sg;
L
Linus Torvalds 已提交
825 826
	int i;

827
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
828

829 830 831
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
832
}
833 834 835 836 837 838 839 840
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
841
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
842 843 844 845 846 847 848 849

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
850
static void
J
Jens Axboe 已提交
851
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
852
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
853
{
J
Jens Axboe 已提交
854
	struct scatterlist *sg;
L
Linus Torvalds 已提交
855 856
	int i;

857 858
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
859
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
860 861
}

862 863
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
864
			int nelems, enum dma_data_direction dir)
865
{
866
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
867
}
868
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
869

L
Linus Torvalds 已提交
870 871
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
872
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
873
{
874
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
875
}
876
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
877 878

int
879
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
880
{
881
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
882
}
883
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
884 885

/*
886
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
887
 * properly.  For example, if your device can only drive the low 24-bits
888
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
889 890 891
 * this function.
 */
int
J
Jan Beulich 已提交
892
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
893
{
894
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
895 896
}
EXPORT_SYMBOL(swiotlb_dma_supported);