swiotlb.c 25.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/io.h>
#include <asm/dma.h>
34
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
35 36 37

#include <linux/init.h>
#include <linux/bootmem.h>
38
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
39 40 41 42

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

43 44 45 46 47 48 49 50 51
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

52 53 54 55 56 57 58 59
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
60 61 62
int swiotlb_force;

/*
B
Becky Bruce 已提交
63 64
 * Used to do a quick range check in unmap_single and
 * sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
93
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
104
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (!strcmp(str, "force"))
		swiotlb_force = 1;
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

117
dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
118 119 120 121
{
	return paddr;
}

122
phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
123 124 125 126
{
	return baddr;
}

127 128
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
129
{
130
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
131 132
}

133
void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
134
{
135
	return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
136 137
}

138 139 140 141 142 143
int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
					       dma_addr_t addr, size_t size)
{
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}

144
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
145 146 147 148
{
	return 0;
}

149 150 151 152 153 154 155 156 157
static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
158 159 160
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
161 162
}

L
Linus Torvalds 已提交
163 164
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
165
 * structures for the software IO TLB used to implement the DMA API.
L
Linus Torvalds 已提交
166
 */
J
Jan Beulich 已提交
167 168
void __init
swiotlb_init_with_default_size(size_t default_size)
L
Linus Torvalds 已提交
169
{
J
Jan Beulich 已提交
170
	unsigned long i, bytes;
L
Linus Torvalds 已提交
171 172

	if (!io_tlb_nslabs) {
173
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
L
Linus Torvalds 已提交
174 175 176
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

J
Jan Beulich 已提交
177 178
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

L
Linus Torvalds 已提交
179 180 181
	/*
	 * Get IO TLB memory from the low pages
	 */
182
	io_tlb_start = alloc_bootmem_low_pages(bytes);
L
Linus Torvalds 已提交
183 184
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");
J
Jan Beulich 已提交
185
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
186 187 188 189 190 191 192

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
193
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
194 195
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
196
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
197 198 199 200 201

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
202 203 204
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");

205
	swiotlb_print_info(bytes);
L
Linus Torvalds 已提交
206 207
}

J
Jan Beulich 已提交
208 209
void __init
swiotlb_init(void)
L
Linus Torvalds 已提交
210
{
211
	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
L
Linus Torvalds 已提交
212 213
}

214 215 216 217 218 219
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
220
swiotlb_late_init_with_default_size(size_t default_size)
221
{
J
Jan Beulich 已提交
222
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
223 224 225 226 227 228 229 230 231 232
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
233
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
234
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
235
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
236 237

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
238 239
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
240 241 242 243 244 245 246 247
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
248
	if (order != get_order(bytes)) {
249 250 251
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
252
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
253
	}
J
Jan Beulich 已提交
254 255
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

271 272 273 274
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
275 276 277
	if (!io_tlb_orig_addr)
		goto cleanup3;

278
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
279 280 281 282 283 284 285 286 287

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

288
	swiotlb_print_info(bytes);
289 290 291 292

	return 0;

cleanup4:
293 294
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
295 296
	io_tlb_orig_addr = NULL;
cleanup3:
297 298
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
299 300
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
301
	io_tlb_end = NULL;
302 303 304 305 306 307 308
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

309
static inline int
310
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
L
Linus Torvalds 已提交
311
{
312
	return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
L
Linus Torvalds 已提交
313 314
}

315
static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
316
{
317
	return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
318 319
}

320 321 322 323 324
static int is_swiotlb_buffer(char *addr)
{
	return addr >= io_tlb_start && addr < io_tlb_end;
}

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
341
			sz = min_t(size_t, PAGE_SIZE - offset, size);
342 343 344 345 346 347

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
348
			else
349 350
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
351
			local_irq_restore(flags);
352 353 354 355 356

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
357 358 359
		}
	} else {
		if (dir == DMA_TO_DEVICE)
360
			memcpy(dma_addr, phys_to_virt(phys), size);
361
		else
362
			memcpy(phys_to_virt(phys), dma_addr, size);
363
	}
364 365
}

L
Linus Torvalds 已提交
366 367 368 369
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
370
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
L
Linus Torvalds 已提交
371 372 373 374 375
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
376 377 378 379 380 381
	unsigned long start_dma_addr;
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);
382
	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
383 384

	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
385 386 387 388

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
389 390 391
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399 400 401 402

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

403
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
404 405 406 407 408 409

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
410 411 412 413 414 415
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
416 417
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
418 419 420
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
438

A
Andrew Morton 已提交
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
457 458 459 460 461 462 463
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
464 465
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
466
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
467
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
468 469 470 471 472 473 474 475

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
476
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
L
Linus Torvalds 已提交
477 478 479 480
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
481
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
482 483 484 485

	/*
	 * First, sync the memory before unmapping the entry
	 */
486
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
487
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contigous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
516 517
sync_single(struct device *hwdev, char *dma_addr, size_t size,
	    int dir, int target)
L
Linus Torvalds 已提交
518
{
519 520 521 522
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
523

524 525 526
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
527
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
528 529
		else
			BUG_ON(dir != DMA_TO_DEVICE);
530 531 532
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
533
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
534 535
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
536 537
		break;
	default:
L
Linus Torvalds 已提交
538
		BUG();
539
	}
L
Linus Torvalds 已提交
540 541 542 543
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
544
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
545
{
J
Jan Beulich 已提交
546
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
547 548
	void *ret;
	int order = get_order(size);
549
	u64 dma_mask = DMA_BIT_MASK(32);
550 551 552

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
553

554
	ret = (void *)__get_free_pages(flags, order);
555 556 557
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
L
Linus Torvalds 已提交
558 559 560 561 562 563 564 565 566
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
		 * We are either out of memory or the device can't DMA
B
Becky Bruce 已提交
567 568
		 * to GFP_DMA memory; fall back on map_single(), which
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
569
		 */
570
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
571
		if (!ret)
L
Linus Torvalds 已提交
572 573 574 575
			return NULL;
	}

	memset(ret, 0, size);
576
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
577 578

	/* Confirm address can be DMA'd by device */
579
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
J
Jan Beulich 已提交
580
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
581
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
582
		       (unsigned long long)dev_addr);
583 584

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
585
		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
586
		return NULL;
L
Linus Torvalds 已提交
587 588 589 590
	}
	*dma_handle = dev_addr;
	return ret;
}
591
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
592 593 594 595 596

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
		      dma_addr_t dma_handle)
{
597
	WARN_ON(irqs_disabled());
598
	if (!is_swiotlb_buffer(vaddr))
L
Linus Torvalds 已提交
599 600 601
		free_pages((unsigned long) vaddr, get_order(size));
	else
		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
602
		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
603
}
604
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
605 606 607 608 609 610 611

static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
612
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
613 614 615
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
616
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
617
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
618 619

	if (size > io_tlb_overflow && do_panic) {
620 621 622 623
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Memory would be corrupted\n");
		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
			panic("DMA: Random memory would be DMAed\n");
L
Linus Torvalds 已提交
624 625 626 627 628
	}
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
629
 * physical address to use is returned.
L
Linus Torvalds 已提交
630 631
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
632
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
633
 */
634 635 636 637
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
638
{
639 640
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
L
Linus Torvalds 已提交
641 642
	void *map;

643
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
644
	/*
B
Becky Bruce 已提交
645
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
646 647 648
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
649
	if (!address_needs_mapping(dev, dev_addr, size) &&
650
	    !range_needs_mapping(phys, size))
L
Linus Torvalds 已提交
651 652 653 654 655
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
656
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
657
	if (!map) {
658
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
659 660 661
		map = io_tlb_overflow_buffer;
	}

662
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
663 664 665 666

	/*
	 * Ensure that the address returned is DMA'ble
	 */
667
	if (address_needs_mapping(dev, dev_addr, size))
L
Linus Torvalds 已提交
668 669 670 671
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
672
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
673 674 675

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
676
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
677 678 679 680 681
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
682 683
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
			 size_t size, int dir)
L
Linus Torvalds 已提交
684
{
685
	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
L
Linus Torvalds 已提交
686

687
	BUG_ON(dir == DMA_NONE);
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704

	if (is_swiotlb_buffer(dma_addr)) {
		do_unmap_single(hwdev, dma_addr, size, dir);
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

	dma_mark_clean(dma_addr, size);
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
705
}
706
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
707

L
Linus Torvalds 已提交
708 709 710 711
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
712
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
713 714
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
715 716 717
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
718
static void
719
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
720
		    size_t size, int dir, int target)
L
Linus Torvalds 已提交
721
{
722
	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
L
Linus Torvalds 已提交
723

724
	BUG_ON(dir == DMA_NONE);
725 726

	if (is_swiotlb_buffer(dma_addr)) {
727
		sync_single(hwdev, dma_addr, size, dir, target);
728 729 730 731 732 733 734
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

	dma_mark_clean(dma_addr, size);
L
Linus Torvalds 已提交
735 736
}

737 738
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739
			    size_t size, enum dma_data_direction dir)
740
{
741
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
742
}
743
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
744

L
Linus Torvalds 已提交
745 746
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
747
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
748
{
749
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
750
}
751
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
752

753 754 755
/*
 * Same as above, but for a sub-range of the mapping.
 */
A
Andrew Morton 已提交
756
static void
757
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
758 759
			  unsigned long offset, size_t size,
			  int dir, int target)
760
{
761
	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
762 763 764 765
}

void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
766 767
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir)
768
{
769 770
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_CPU);
771
}
772
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
773 774 775

void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
776 777
				     unsigned long offset, size_t size,
				     enum dma_data_direction dir)
778
{
779 780
	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
				  SYNC_FOR_DEVICE);
781
}
782
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
783

L
Linus Torvalds 已提交
784 785
/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
786
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
787 788 789 790 791 792 793 794 795 796
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
797
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
798 799 800
 * same here.
 */
int
801
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
802
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
803
{
J
Jens Axboe 已提交
804
	struct scatterlist *sg;
L
Linus Torvalds 已提交
805 806
	int i;

807
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
808

J
Jens Axboe 已提交
809
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
810 811
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
812

I
Ian Campbell 已提交
813
		if (range_needs_mapping(paddr, sg->length) ||
814
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
815 816
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
817
			if (!map) {
L
Linus Torvalds 已提交
818 819 820
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
821 822
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
823
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
824 825
				return 0;
			}
826
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
827 828 829 830 831 832
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
833 834 835 836 837 838 839 840
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	       int dir)
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
841
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
842 843 844

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
845
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
846 847
 */
void
848
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
849
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
850
{
J
Jens Axboe 已提交
851
	struct scatterlist *sg;
L
Linus Torvalds 已提交
852 853
	int i;

854
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
855

856 857 858
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
859
}
860 861 862 863 864 865 866 867
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
		 int dir)
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
868
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
869 870 871 872 873 874 875 876

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
877
static void
J
Jens Axboe 已提交
878
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
879
		int nelems, int dir, int target)
L
Linus Torvalds 已提交
880
{
J
Jens Axboe 已提交
881
	struct scatterlist *sg;
L
Linus Torvalds 已提交
882 883
	int i;

884 885
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
886
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
887 888
}

889 890
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
891
			int nelems, enum dma_data_direction dir)
892
{
893
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
894
}
895
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
896

L
Linus Torvalds 已提交
897 898
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
899
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
900
{
901
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
902
}
903
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
904 905

int
906
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
907
{
908
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
909
}
910
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
911 912

/*
913
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
914
 * properly.  For example, if your device can only drive the low 24-bits
915
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
916 917 918
 * this function.
 */
int
J
Jan Beulich 已提交
919
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
920
{
921
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
922 923
}
EXPORT_SYMBOL(swiotlb_dma_supported);