swiotlb.c 26.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
L
Linus Torvalds 已提交
17 18 19
 */

#include <linux/cache.h>
20
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
21 22 23
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
24
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
25
#include <linux/string.h>
26
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
27 28
#include <linux/types.h>
#include <linux/ctype.h>
29
#include <linux/highmem.h>
L
Linus Torvalds 已提交
30 31 32

#include <asm/io.h>
#include <asm/dma.h>
33
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
34 35 36

#include <linux/init.h>
#include <linux/bootmem.h>
37
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
38 39 40 41

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

42 43 44 45 46 47 48 49 50
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

51 52 53 54 55 56 57 58
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
int swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_unmap_single and
 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
92 93 94 95
static struct swiotlb_phys_addr {
	struct page *page;
	unsigned int offset;
} *io_tlb_orig_addr;
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
106
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116 117 118
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

119 120 121 122 123 124 125 126 127 128
void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
{
	return alloc_bootmem_low_pages(size);
}

void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
{
	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}

129
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
130 131 132 133 134 135 136 137 138
{
	return paddr;
}

phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
{
	return baddr;
}

139 140
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
141
{
142
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
143 144 145 146 147 148 149
}

static void *swiotlb_bus_to_virt(dma_addr_t address)
{
	return phys_to_virt(swiotlb_bus_to_phys(address));
}

150 151 152 153 154
int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
{
	return 0;
}

155
static dma_addr_t swiotlb_sg_to_bus(struct device *hwdev, struct scatterlist *sg)
156
{
157
	return swiotlb_phys_to_bus(hwdev, page_to_phys(sg_page(sg)) + sg->offset);
158 159
}

160 161 162 163 164 165 166 167 168
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
169 170 171
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
172 173
}

L
Linus Torvalds 已提交
174 175
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
176
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
177
 */
J
Jan Beulich 已提交
178 179
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
180
{
J
Jan Beulich 已提交
181
	unsigned long i, bytes;
L
Linus Torvalds 已提交
182 183

	if (!io_tlb_nslabs) {
184
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
185 186 187
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
188 189
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
190 191 192
	/*
	 * Get IO TLB memory from the low pages
	 */
193
	io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
L
Linus Torvalds 已提交
194 195
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
196
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
197 198 199 200 201 202 203

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
204
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
205 206
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
207
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
L
Linus Torvalds 已提交
208 209 210 211 212

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
213 214 215
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

216
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
217 218
}

J
Jan Beulich 已提交
219 220
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
221
{
222
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
223 224
}

225 226 227 228 229 230
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
231
swiotlb_late_init_with_default_size(size_t default_size)
232
{
J
Jan Beulich 已提交
233
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
234 235 236 237 238 239 240 241 242 243
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
244
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
245
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
246
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
247 248

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
249
		io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
250 251 252 253 254 255 256 257
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
258
	if (order != get_order(bytes)) {
259 260 261
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
262
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
263
	}
J
Jan Beulich 已提交
264 265
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

281 282
	io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
	                           get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
283 284 285
	if (!io_tlb_orig_addr)
		goto cleanup3;

286
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
287 288 289 290 291 292 293 294 295

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

296
	swiotlb_print_info(bytes);
297 298 299 300

	return 0;

cleanup4:
301 302
	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
	                                                      sizeof(char *)));
303 304
	io_tlb_orig_addr = NULL;
cleanup3:
305 306
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
307 308
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
309
	io_tlb_end = NULL;
310 311 312 313 314 315 316
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

A
Andrew Morton 已提交
317
static int
318
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
319
{
320
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
L
Linus Torvalds 已提交
321 322
}

323 324 325 326 327
static inline int range_needs_mapping(void *ptr, size_t size)
{
	return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
}

328 329 330 331 332
static int is_swiotlb_buffer(char *addr)
{
	return addr >= io_tlb_start && addr < io_tlb_end;
}

333
static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
334
{
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
	buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
	buffer.page += buffer.offset >> PAGE_SHIFT;
	buffer.offset &= PAGE_SIZE - 1;
	return buffer;
}

static void
__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
{
	if (PageHighMem(buffer.page)) {
		size_t len, bytes;
		char *dev, *host, *kmp;

		len = size;
		while (len != 0) {
			unsigned long flags;

			bytes = len;
			if ((bytes + buffer.offset) > PAGE_SIZE)
				bytes = PAGE_SIZE - buffer.offset;
			local_irq_save(flags); /* protects KM_BOUNCE_READ */
			kmp  = kmap_atomic(buffer.page, KM_BOUNCE_READ);
			dev  = dma_addr + size - len;
			host = kmp + buffer.offset;
			if (dir == DMA_FROM_DEVICE)
				memcpy(host, dev, bytes);
			else
				memcpy(dev, host, bytes);
			kunmap_atomic(kmp, KM_BOUNCE_READ);
			local_irq_restore(flags);
			len -= bytes;
			buffer.page++;
			buffer.offset = 0;
		}
	} else {
		void *v = page_address(buffer.page) + buffer.offset;

		if (dir == DMA_TO_DEVICE)
			memcpy(dma_addr, v, size);
		else
			memcpy(v, dma_addr, size);
	}
379 380
}

L
Linus Torvalds 已提交
381 382 383 384
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
385
map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
L
Linus Torvalds 已提交
386 387 388 389 390
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
391 392 393 394
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;
395
	struct swiotlb_phys_addr slot_buf;
396 397

	mask = dma_get_seg_boundary(hwdev);
398
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
399 400

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
401 402 403 404

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
405 406 407
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415 416 417 418

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

419
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
420 421 422 423 424 425

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
426 427 428 429 430 431
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
432 433
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
434 435 436
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
454

A
Andrew Morton 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
473 474 475 476 477 478 479
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
480 481 482 483 484 485 486
	slot_buf = buffer;
	for (i = 0; i < nslots; i++) {
		slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
		slot_buf.offset &= PAGE_SIZE - 1;
		io_tlb_orig_addr[index+i] = slot_buf;
		slot_buf.offset += 1 << IO_TLB_SHIFT;
	}
L
Linus Torvalds 已提交
487
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
488
		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
502
	struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
L
Linus Torvalds 已提交
503 504 505 506

	/*
	 * First, sync the memory before unmapping the entry
	 */
507
	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
L
Linus Torvalds 已提交
508 509 510 511
		/*
		 * bounce... copy the data back into the original buffer * and
		 * delete the bounce buffer.
		 */
512
		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
541 542
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
543
{
544
	struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
K
Keir Fraser 已提交
545

546 547 548
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
549
			__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
550 551
		else
			BUG_ON(dir != DMA_TO_DEVICE);
552 553 554
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
555
			__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
556 557
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
558 559
		break;
	default:
L
Linus Torvalds 已提交
560
		BUG();
561
	}
L
Linus Torvalds 已提交
562 563 564 565
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
566
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
567
{
J
Jan Beulich 已提交
568
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
569 570
	void *ret;
	int order = get_order(size);
571 572 573 574
	u64 dma_mask = DMA_32BIT_MASK;

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
575

576
	ret = (void *)__get_free_pages(flags, order);
577 578 579
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
L
Linus Torvalds 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593
		/*
		 * The allocated memory isn't reachable by the device.
		 * Fall back on swiotlb_map_single().
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
		 * to GFP_DMA memory; fall back on
		 * swiotlb_map_single(), which will grab memory from
		 * the lowest available address range.
		 */
594 595 596 597
		struct swiotlb_phys_addr buffer;
		buffer.page = virt_to_page(NULL);
		buffer.offset = 0;
		ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
598
		if (!ret)
L
Linus Torvalds 已提交
599 600 601 602
			return NULL;
	}

	memset(ret, 0, size);
603
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
604 605

	/* Confirm address can be DMA'd by device */
606
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
607
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
608
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
609
		       (unsigned long long)dev_addr);
610 611 612 613

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
		unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
		return NULL;
L
Linus Torvalds 已提交
614 615 616 617 618 619 620 621 622
	}
	*dma_handle = dev_addr;
	return ret;
}

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
623
	WARN_ON(irqs_disabled());
624
	if (!is_swiotlb_buffer(vaddr))
L
Linus Torvalds 已提交
625 626 627
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
628
		unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
629 630 631 632 633 634 635 636
}

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
637
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
638 639 640
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
641
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
L
Linus Torvalds 已提交
642 643 644
	       "device %s\n", size, dev ? dev->bus_id : "?");

	if (size > io_tlb_overflow && do_panic) {
645 646 647 648
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
649 650 651 652 653
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
654
 * physical address to use is returned.
L
Linus Torvalds 已提交
655 656 657 658 659
 *
 * Once the device is given the dma address, the device owns this memory until
 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 */
dma_addr_t
660 661
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
			 int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
662
{
663
	dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
L
Linus Torvalds 已提交
664
	void *map;
665
	struct swiotlb_phys_addr buffer;
L
Linus Torvalds 已提交
666

667
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
668 669 670 671 672
	/*
	 * If the pointer passed in happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
673 674
	if (!address_needs_mapping(hwdev, dev_addr, size) &&
	    !range_needs_mapping(ptr, size))
L
Linus Torvalds 已提交
675 676 677 678 679
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
680 681 682
	buffer.page   = virt_to_page(ptr);
	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
	map = map_single(hwdev, buffer, size, dir);
L
Linus Torvalds 已提交
683 684 685 686 687
	if (!map) {
		swiotlb_full(hwdev, size, dir, 1);
		map = io_tlb_overflow_buffer;
	}

688
	dev_addr = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
689 690 691 692

	/*
	 * Ensure that the address returned is DMA'ble
	 */
693
	if (address_needs_mapping(hwdev, dev_addr, size))
L
Linus Torvalds 已提交
694 695 696 697
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
698 699 700 701 702 703 704
EXPORT_SYMBOL(swiotlb_map_single_attrs);

dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
	return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
}
L
Linus Torvalds 已提交
705 706 707 708 709 710 711 712 713 714

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous swiotlb_map_single call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
void
715 716
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
			   size_t size, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
717
{
718
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
719

720
	BUG_ON(dir == DMA_NONE);
721
	if (is_swiotlb_buffer(dma_addr))
L
Linus Torvalds 已提交
722 723
		unmap_single(hwdev, dma_addr, size, dir);
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
724
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
725
}
726
EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
L
Linus Torvalds 已提交
727

728 729 730 731 732 733
void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
		     int dir)
{
	return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
}
L
Linus Torvalds 已提交
734 735 736 737 738
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
739 740
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
741 742 743
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
744
static void
745
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
746
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
747
{
748
	char *dma_addr = swiotlb_bus_to_virt(dev_addr);
L
Linus Torvalds 已提交
749

750
	BUG_ON(dir == DMA_NONE);
751
	if (is_swiotlb_buffer(dma_addr))
752
		sync_single(hwdev, dma_addr, size, dir, target);
L
Linus Torvalds 已提交
753
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
754
		dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
755 756
}

757 758 759 760
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
			    size_t size, int dir)
{
761
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
762 763
}

L
Linus Torvalds 已提交
764 765 766 767
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
			       size_t size, int dir)
{
768
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
769 770
}

771 772 773
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
774
static void
775
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
776 777
			  unsigned long offset, size_t size,
			  int dir, int target)
778
{
779
	char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
780

781
	BUG_ON(dir == DMA_NONE);
782
	if (is_swiotlb_buffer(dma_addr))
783
		sync_single(hwdev, dma_addr, size, dir, target);
784
	else if (dir == DMA_FROM_DEVICE)
J
Jan Beulich 已提交
785
		dma_mark_clean(dma_addr, size);
786 787 788 789 790 791
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
				  unsigned long offset, size_t size, int dir)
{
792 793
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
794 795 796 797 798 799
}

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
				     unsigned long offset, size_t size, int dir)
{
800 801
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
802 803
}

804 805
void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
			    struct dma_attrs *);
L
Linus Torvalds 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above swiotlb_map_single
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for swiotlb_map_single are the
 * same here.
 */
int
823 824
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
		     int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
825
{
J
Jens Axboe 已提交
826
	struct scatterlist *sg;
827
	struct swiotlb_phys_addr buffer;
J
Jan Beulich 已提交
828
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
829 830
	int i;

831
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
832

J
Jens Axboe 已提交
833
	for_each_sg(sgl, sg, nelems, i) {
834
		dev_addr = swiotlb_sg_to_bus(hwdev, sg);
835
		if (range_needs_mapping(sg_virt(sg), sg->length) ||
836
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
837 838 839 840
			void *map;
			buffer.page   = sg_page(sg);
			buffer.offset = sg->offset;
			map = map_single(hwdev, buffer, sg->length, dir);
841
			if (!map) {
L
Linus Torvalds 已提交
842 843 844
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
845 846
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
847
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
848 849
				return 0;
			}
850
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
851 852 853 854 855 856
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
857 858 859 860 861 862 863 864
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
865 866 867 868 869 870

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_single() above.
 */
void
871 872
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
		       int nelems, int dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
873
{
J
Jens Axboe 已提交
874
	struct scatterlist *sg;
L
Linus Torvalds 已提交
875 876
	int i;

877
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
878

J
Jens Axboe 已提交
879
	for_each_sg(sgl, sg, nelems, i) {
880
		if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg))
881
			unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
882
				     sg->dma_length, dir);
L
Linus Torvalds 已提交
883
		else if (dir == DMA_FROM_DEVICE)
884
			dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
J
Jens Axboe 已提交
885
	}
L
Linus Torvalds 已提交
886
}
887 888 889 890 891 892 893 894
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
L
Linus Torvalds 已提交
895 896 897 898 899 900 901 902

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
903
static void
J
Jens Axboe 已提交
904
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
905
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
906
{
J
Jens Axboe 已提交
907
	struct scatterlist *sg;
L
Linus Torvalds 已提交
908 909
	int i;

910
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
911

J
Jens Axboe 已提交
912
	for_each_sg(sgl, sg, nelems, i) {
913
		if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg))
914
			sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
915
				    sg->dma_length, dir, target);
J
Jan Beulich 已提交
916
		else if (dir == DMA_FROM_DEVICE)
917
			dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
J
Jens Axboe 已提交
918
	}
L
Linus Torvalds 已提交
919 920
}

921 922 923 924
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
			int nelems, int dir)
{
925
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
926 927
}

L
Linus Torvalds 已提交
928 929 930 931
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
			   int nelems, int dir)
{
932
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
933 934 935
}

int
936
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
937
{
938
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
939 940 941
}

/*
942
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
943
 * properly.  For example, if your device can only drive the low 24-bits
944
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
945 946 947
 * this function.
 */
int
J
Jan Beulich 已提交
948
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
949
{
950
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
951 952 953 954 955 956 957 958
}

EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
959 960
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
L
Linus Torvalds 已提交
961 962 963
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
964 965
EXPORT_SYMBOL(swiotlb_alloc_coherent);
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
966
EXPORT_SYMBOL(swiotlb_dma_supported);