swiotlb.c 24.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
31
#include <linux/gfp.h>
L
Linus Torvalds 已提交
32 33 34

#include <asm/io.h>
#include <asm/dma.h>
35
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
36 37 38

#include <linux/init.h>
#include <linux/bootmem.h>
39
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
40 41 42 43

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

44 45 46 47 48 49 50 51 52
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

53 54 55 56 57 58 59 60
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
61 62 63
int swiotlb_force;

/*
B
Becky Bruce 已提交
64 65
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
94
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
95 96 97 98 99 100

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

101 102
static int late_alloc;

L
Linus Torvalds 已提交
103 104 105 106
static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
107
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
108 109 110 111 112
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
113
	if (!strcmp(str, "force"))
L
Linus Torvalds 已提交
114
		swiotlb_force = 1;
115

L
Linus Torvalds 已提交
116 117 118 119 120
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

121
/* Note that this doesn't work with highmem page */
122 123
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
124
{
125
	return phys_to_dma(hwdev, virt_to_phys(address));
126 127
}

128
void swiotlb_print_info(void)
129
{
130
	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
131 132 133 134 135 136 137
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
138 139 140
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
141 142
}

L
Linus Torvalds 已提交
143 144
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
145
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
146
 */
J
Jan Beulich 已提交
147
void __init
148
swiotlb_init_with_default_size(size_t default_size, int verbose)
L
Linus Torvalds 已提交
149
{
J
Jan Beulich 已提交
150
	unsigned long i, bytes;
L
Linus Torvalds 已提交
151 152

	if (!io_tlb_nslabs) {
153
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
154 155 156
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
157 158
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
159 160 161
	/*
	 * Get IO TLB memory from the low pages
	 */
162
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
163 164
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
165
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
166 167 168 169 170 171 172

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
173
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
174 175
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
176
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
177 178 179 180 181

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
182 183
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");
184 185
	if (verbose)
		swiotlb_print_info();
L
Linus Torvalds 已提交
186 187
}

J
Jan Beulich 已提交
188
void __init
189
swiotlb_init(int verbose)
L
Linus Torvalds 已提交
190
{
191
	swiotlb_init_with_default_size(64 * (1<<20), verbose);	/* default to 64MB */
L
Linus Torvalds 已提交
192 193
}

194 195 196 197 198 199
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
200
swiotlb_late_init_with_default_size(size_t default_size)
201
{
J
Jan Beulich 已提交
202
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
203 204 205 206 207 208 209 210 211 212
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
213
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
214
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
215
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
216 217

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
218 219
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
220 221 222 223 224 225 226 227
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
228
	if (order != get_order(bytes)) {
229 230 231
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
232
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
233
	}
J
Jan Beulich 已提交
234 235
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

251 252 253 254
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
255 256 257
	if (!io_tlb_orig_addr)
		goto cleanup3;

258
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
259 260 261 262 263 264 265 266 267

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

268
	swiotlb_print_info();
269

270 271
	late_alloc = 1;

272 273 274
	return 0;

cleanup4:
275 276
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
277 278
	io_tlb_orig_addr = NULL;
cleanup3:
279 280
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
281 282
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
283
	io_tlb_end = NULL;
284 285 286 287 288 289 290
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
void __init swiotlb_free(void)
{
	if (!io_tlb_overflow_buffer)
		return;

	if (late_alloc) {
		free_pages((unsigned long)io_tlb_overflow_buffer,
			   get_order(io_tlb_overflow));
		free_pages((unsigned long)io_tlb_orig_addr,
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
								 sizeof(int)));
		free_pages((unsigned long)io_tlb_start,
			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
	} else {
		free_bootmem_late(__pa(io_tlb_overflow_buffer),
				  io_tlb_overflow);
		free_bootmem_late(__pa(io_tlb_orig_addr),
				  io_tlb_nslabs * sizeof(phys_addr_t));
		free_bootmem_late(__pa(io_tlb_list),
				  io_tlb_nslabs * sizeof(int));
		free_bootmem_late(__pa(io_tlb_start),
				  io_tlb_nslabs << IO_TLB_SHIFT);
	}
}

317
static int is_swiotlb_buffer(phys_addr_t paddr)
318
{
319 320
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
321 322
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
339
			sz = min_t(size_t, PAGE_SIZE - offset, size);
340 341 342 343 344 345

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
346
			else
347 348
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
349
			local_irq_restore(flags);
350 351 352 353 354

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
355 356 357
		}
	} else {
		if (dir == DMA_TO_DEVICE)
358
			memcpy(dma_addr, phys_to_virt(phys), size);
359
		else
360
			memcpy(phys_to_virt(phys), dma_addr, size);
361
	}
362 363
}

L
Linus Torvalds 已提交
364 365 366 367
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
368
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
369 370 371 372 373
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
374 375 376 377 378 379
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
380
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
381 382

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
383 384 385 386

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
387 388 389
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

401
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
402 403 404 405 406 407

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
408 409 410 411 412 413
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
414 415
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
416 417 418
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
436

A
Andrew Morton 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
455 456 457 458 459 460 461
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
462 463
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
464
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
465
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
466 467 468 469 470 471 472 473

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
474
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
475 476 477 478
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
479
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
480 481 482 483

	/*
	 * First, sync the memory before unmapping the entry
	 */
484
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
485
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
486 487 488

	/*
	 * Return the buffer to the free list by setting the corresponding
489
	 * entries to indicate the number of contiguous entries available.
L
Linus Torvalds 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
514 515
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
516
{
517 518 519 520
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
521

522 523 524
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
525
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
526 527
		else
			BUG_ON(dir != DMA_TO_DEVICE);
528 529 530
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
531
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
532 533
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
534 535
		break;
	default:
L
Linus Torvalds 已提交
536
		BUG();
537
	}
L
Linus Torvalds 已提交
538 539 540 541
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
542
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
543
{
J
Jan Beulich 已提交
544
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
545 546
	void *ret;
	int order = get_order(size);
547
	u64 dma_mask = DMA_BIT_MASK(32);
548 549 550

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
551

552
	ret = (void *)__get_free_pages(flags, order);
553
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561 562
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
563 564
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
565
		 */
566
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
567
		if (!ret)
L
Linus Torvalds 已提交
568 569 570 571
			return NULL;
	}

	memset(ret, 0, size);
572
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
573 574

	/* Confirm address can be DMA'd by device */
575
	if (dev_addr + size - 1 > dma_mask) {
J
Jan Beulich 已提交
576
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
577
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
578
		       (unsigned long long)dev_addr);
579 580

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
581
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
582
		return NULL;
L
Linus Torvalds 已提交
583 584 585 586
	}
	*dma_handle = dev_addr;
	return ret;
}
587
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
588 589 590

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
591
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
592
{
593
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
594

595
	WARN_ON(irqs_disabled());
596 597
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
598 599
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
600
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
601
}
602
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
603 604 605 606 607 608 609

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
610
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
611 612 613
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
614
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
615
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
616

617 618 619 620 621 622 623 624 625
	if (size <= io_tlb_overflow || !do_panic)
		return;

	if (dir == DMA_BIDIRECTIONAL)
		panic("DMA: Random memory could be DMA accessed\n");
	if (dir == DMA_FROM_DEVICE)
		panic("DMA: Random memory could be DMA written\n");
	if (dir == DMA_TO_DEVICE)
		panic("DMA: Random memory could be DMA read\n");
L
Linus Torvalds 已提交
626 627 628 629
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
630
 * physical address to use is returned.
L
Linus Torvalds 已提交
631 632
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
633
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
634
 */
635 636 637 638
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
639
{
640
	phys_addr_t phys = page_to_phys(page) + offset;
641
	dma_addr_t dev_addr = phys_to_dma(dev, phys);
L
Linus Torvalds 已提交
642 643
	void *map;

644
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
645
	/*
B
Becky Bruce 已提交
646
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
647 648 649
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
F
FUJITA Tomonori 已提交
650
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
651 652 653 654 655
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
656
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
657
	if (!map) {
658
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
659 660 661
		map = io_tlb_overflow_buffer;
	}

662
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
663 664 665 666

	/*
	 * Ensure that the address returned is DMA'ble
	 */
F
FUJITA Tomonori 已提交
667
	if (!dma_capable(dev, dev_addr, size))
L
Linus Torvalds 已提交
668 669 670 671
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
672
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
673 674 675

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
676
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
677 678 679 680 681
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
682 683
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
684
{
685
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
686

687
	BUG_ON(dir == DMA_NONE);
688

689 690
	if (is_swiotlb_buffer(paddr)) {
		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
691 692 693 694 695 696
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

697 698 699 700 701 702 703
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
704 705 706 707 708 709 710
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
711
}
712
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
713

L
Linus Torvalds 已提交
714 715 716 717
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
718
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
719 720
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
721 722 723
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
724
static void
725
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
726
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
727
{
728
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
729

730
	BUG_ON(dir == DMA_NONE);
731

732 733
	if (is_swiotlb_buffer(paddr)) {
		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
734 735 736 737 738 739
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

740
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
741 742
}

743 744
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
745
			    size_t size, enum dma_data_direction dir)
746
{
747
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
748
}
749
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
750

L
Linus Torvalds 已提交
751 752
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
753
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
754
{
755
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
756
}
757
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
758 759 760

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
761
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
762 763 764 765 766 767 768 769 770 771
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
772
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
773 774 775
 * same here.
 */
int
776
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
777
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
778
{
J
Jens Axboe 已提交
779
	struct scatterlist *sg;
L
Linus Torvalds 已提交
780 781
	int i;

782
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
783

J
Jens Axboe 已提交
784
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
785
		phys_addr_t paddr = sg_phys(sg);
786
		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
787

788
		if (swiotlb_force ||
F
FUJITA Tomonori 已提交
789
		    !dma_capable(hwdev, dev_addr, sg->length)) {
790 791
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
792
			if (!map) {
L
Linus Torvalds 已提交
793 794 795
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
796 797
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
798
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
799 800
				return 0;
			}
801
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
802 803 804 805 806 807
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
808 809 810 811 812 813 814 815
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
816
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
817 818 819

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
820
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
821 822
 */
void
823
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
824
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
825
{
J
Jens Axboe 已提交
826
	struct scatterlist *sg;
L
Linus Torvalds 已提交
827 828
	int i;

829
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
830

831 832 833
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
834
}
835 836 837 838 839 840 841 842
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
843
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
844 845 846 847 848 849 850 851

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
852
static void
J
Jens Axboe 已提交
853
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
854
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
855
{
J
Jens Axboe 已提交
856
	struct scatterlist *sg;
L
Linus Torvalds 已提交
857 858
	int i;

859 860
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
861
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
862 863
}

864 865
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
866
			int nelems, enum dma_data_direction dir)
867
{
868
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
869
}
870
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
871

L
Linus Torvalds 已提交
872 873
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
874
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
875
{
876
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
877
}
878
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
879 880

int
881
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
882
{
883
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
884
}
885
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
886 887

/*
888
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
889
 * properly.  For example, if your device can only drive the low 24-bits
890
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
891 892 893
 * this function.
 */
int
J
Jan Beulich 已提交
894
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
895
{
896
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
897 898
}
EXPORT_SYMBOL(swiotlb_dma_supported);