swiotlb.c 25.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
118 119 120 121
{
	return paddr;
}

122
phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
123 124 125 126
{
	return baddr;
}

127
/* Note that this doesn't work with highmem page */
128 129
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
130
{
131
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
132 133
}

134 135 136 137 138 139
int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
					       dma_addr_t addr, size_t size)
{
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}

140 141 142 143 144 145 146 147 148
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
149 150 151
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
152 153
}

L
Linus Torvalds 已提交
154 155
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
156
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
157
 */
J
Jan Beulich 已提交
158 159
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
160
{
J
Jan Beulich 已提交
161
	unsigned long i, bytes;
L
Linus Torvalds 已提交
162 163

	if (!io_tlb_nslabs) {
164
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
165 166 167
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
168 169
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
170 171 172
	/*
	 * Get IO TLB memory from the low pages
	 */
173
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
174 175
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
176
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
177 178 179 180 181 182 183

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
184
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
185 186
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
187
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
188 189 190 191 192

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
193 194 195
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

196
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
197 198
}

J
Jan Beulich 已提交
199 200
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
201
{
202
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
203 204
}

205 206 207 208 209 210
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
211
swiotlb_late_init_with_default_size(size_t default_size)
212
{
J
Jan Beulich 已提交
213
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
214 215 216 217 218 219 220 221 222 223
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
224
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
225
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
226
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
227 228

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
229 230
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
231 232 233 234 235 236 237 238
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
239
	if (order != get_order(bytes)) {
240 241 242
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
243
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
244
	}
J
Jan Beulich 已提交
245 246
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

262 263 264 265
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
266 267 268
	if (!io_tlb_orig_addr)
		goto cleanup3;

269
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
270 271 272 273 274 275 276 277 278

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

279
	swiotlb_print_info(bytes);
280 281 282 283

	return 0;

cleanup4:
284 285
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
286 287
	io_tlb_orig_addr = NULL;
cleanup3:
288 289
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
290 291
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
292
	io_tlb_end = NULL;
293 294 295 296 297 298 299
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

300
static inline int
301
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
302
{
303
	return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
L
Linus Torvalds 已提交
304 305
}

306
static int is_swiotlb_buffer(phys_addr_t paddr)
307
{
308 309
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
310 311
}

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
328
			sz = min_t(size_t, PAGE_SIZE - offset, size);
329 330 331 332 333 334

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
335
			else
336 337
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
338
			local_irq_restore(flags);
339 340 341 342 343

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
344 345 346
		}
	} else {
		if (dir == DMA_TO_DEVICE)
347
			memcpy(dma_addr, phys_to_virt(phys), size);
348
		else
349
			memcpy(phys_to_virt(phys), dma_addr, size);
350
	}
351 352
}

L
Linus Torvalds 已提交
353 354 355 356
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
357
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
358 359 360 361 362
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
363 364 365 366 367 368
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
369
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
370 371

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
372 373 374 375

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
376 377 378
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
379 380 381 382 383 384 385 386 387 388 389

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

390
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
391 392 393 394 395 396

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
397 398 399 400 401 402
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
403 404
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
405 406 407
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
425

A
Andrew Morton 已提交
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
444 445 446 447 448 449 450
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
451 452
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
453
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
454
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
463
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
464 465 466 467
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
468
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
469 470 471 472

	/*
	 * First, sync the memory before unmapping the entry
	 */
473
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
474
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
503 504
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
505
{
506 507 508 509
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
510

511 512 513
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
514
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
515 516
		else
			BUG_ON(dir != DMA_TO_DEVICE);
517 518 519
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
520
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
521 522
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
523 524
		break;
	default:
L
Linus Torvalds 已提交
525
		BUG();
526
	}
L
Linus Torvalds 已提交
527 528 529 530
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
531
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
532
{
J
Jan Beulich 已提交
533
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
534 535
	void *ret;
	int order = get_order(size);
536
	u64 dma_mask = DMA_BIT_MASK(32);
537 538 539

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
540

541
	ret = (void *)__get_free_pages(flags, order);
542 543 544
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
L
Linus Torvalds 已提交
545 546 547 548 549 550 551 552 553
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
554 555
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
556
		 */
557
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
558
		if (!ret)
L
Linus Torvalds 已提交
559 560 561 562
			return NULL;
	}

	memset(ret, 0, size);
563
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
564 565

	/* Confirm address can be DMA'd by device */
566
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
567
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
568
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
569
		       (unsigned long long)dev_addr);
570 571

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
572
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
573
		return NULL;
L
Linus Torvalds 已提交
574 575 576 577
	}
	*dma_handle = dev_addr;
	return ret;
}
578
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
579 580 581

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
582
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
583
{
584 585
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);

586
	WARN_ON(irqs_disabled());
587 588
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
589 590
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
591
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
592
}
593
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
594 595 596 597 598 599 600

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
601
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
602 603 604
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
605
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
606
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
607 608

	if (size > io_tlb_overflow && do_panic) {
609 610 611 612
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
613 614 615 616 617
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
618
 * physical address to use is returned.
L
Linus Torvalds 已提交
619 620
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
621
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
622
 */
623 624 625 626
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
627
{
628 629
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
L
Linus Torvalds 已提交
630 631
	void *map;

632
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
633
	/*
B
Becky Bruce 已提交
634
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
635 636 637
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
638
	if (!address_needs_mapping(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
639 640 641 642 643
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
644
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
645
	if (!map) {
646
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
647 648 649
		map = io_tlb_overflow_buffer;
	}

650
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
651 652 653 654

	/*
	 * Ensure that the address returned is DMA'ble
	 */
655
	if (address_needs_mapping(dev, dev_addr, size))
L
Linus Torvalds 已提交
656 657 658 659
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
660
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
661 662 663

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
664
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
665 666 667 668 669
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
670 671
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
672
{
673
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
674

675
	BUG_ON(dir == DMA_NONE);
676

677 678
	if (is_swiotlb_buffer(paddr)) {
		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
679 680 681 682 683 684
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

685 686 687 688 689 690 691
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
692 693 694 695 696 697 698
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
699
}
700
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
701

L
Linus Torvalds 已提交
702 703 704 705
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
706
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
707 708
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
709 710 711
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
712
static void
713
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
714
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
715
{
716
	phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
717

718
	BUG_ON(dir == DMA_NONE);
719

720 721
	if (is_swiotlb_buffer(paddr)) {
		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
722 723 724 725 726 727
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

728
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
729 730
}

731 732
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
733
			    size_t size, enum dma_data_direction dir)
734
{
735
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
736
}
737
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
738

L
Linus Torvalds 已提交
739 740
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
741
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
742
{
743
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
744
}
745
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
746

747 748 749
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
750
static void
751
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
752 753
			  unsigned long offset, size_t size,
			  int dir, int target)
754
{
755
	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
756 757 758 759
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
760 761
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
762
{
763 764
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
765
}
766
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
767 768 769

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
770 771
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
772
{
773 774
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
775
}
776
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
777

L
Linus Torvalds 已提交
778 779
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
780
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
781 782 783 784 785 786 787 788 789 790
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
791
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
792 793 794
 * same here.
 */
int
795
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
796
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
797
{
J
Jens Axboe 已提交
798
	struct scatterlist *sg;
L
Linus Torvalds 已提交
799 800
	int i;

801
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
802

J
Jens Axboe 已提交
803
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
804 805
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
806

807
		if (swiotlb_force ||
808
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
809 810
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
811
			if (!map) {
L
Linus Torvalds 已提交
812 813 814
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
815 816
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
817
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
818 819
				return 0;
			}
820
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
821 822 823 824 825 826
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
827 828 829 830 831 832 833 834
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
835
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
836 837 838

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
839
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
840 841
 */
void
842
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
843
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
844
{
J
Jens Axboe 已提交
845
	struct scatterlist *sg;
L
Linus Torvalds 已提交
846 847
	int i;

848
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
849

850 851 852
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
853
}
854 855 856 857 858 859 860 861
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
862
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
863 864 865 866 867 868 869 870

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
871
static void
J
Jens Axboe 已提交
872
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
873
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
874
{
J
Jens Axboe 已提交
875
	struct scatterlist *sg;
L
Linus Torvalds 已提交
876 877
	int i;

878 879
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
880
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
881 882
}

883 884
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
885
			int nelems, enum dma_data_direction dir)
886
{
887
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
888
}
889
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
890

L
Linus Torvalds 已提交
891 892
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
893
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
894
{
895
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
896
}
897
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
898 899

int
900
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
901
{
902
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
903
}
904
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
905 906

/*
907
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
908
 * properly.  For example, if your device can only drive the low 24-bits
909
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
910 911 912
 * this function.
 */
int
J
Jan Beulich 已提交
913
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
914
{
915
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
916 917
}
EXPORT_SYMBOL(swiotlb_dma_supported);