swiotlb.c 25.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Dynamic DMA mapping support.
 *
J
Jan Beulich 已提交
4
 * This implementation is a fallback for platforms that do not support
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
L
Linus Torvalds 已提交
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
31
#include <linux/gfp.h>
L
Linus Torvalds 已提交
32 33 34

#include <asm/io.h>
#include <asm/dma.h>
35
#include <asm/scatterlist.h>
L
Linus Torvalds 已提交
36 37 38

#include <linux/init.h>
#include <linux/bootmem.h>
39
#include <linux/iommu-helper.h>
L
Linus Torvalds 已提交
40 41 42 43

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

44 45 46 47 48 49 50 51 52
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

53 54 55 56 57 58 59 60
/*
 * Enumeration for sync targets
 */
enum dma_sync_target {
	SYNC_FOR_CPU = 0,
	SYNC_FOR_DEVICE = 1,
};

L
Linus Torvalds 已提交
61 62 63
int swiotlb_force;

/*
64 65
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
 * API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

void *io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
94
static phys_addr_t *io_tlb_orig_addr;
L
Linus Torvalds 已提交
95 96 97 98 99 100

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

101 102
static int late_alloc;

L
Linus Torvalds 已提交
103 104 105 106
static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
107
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
L
Linus Torvalds 已提交
108 109 110 111 112
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
113
	if (!strcmp(str, "force"))
L
Linus Torvalds 已提交
114
		swiotlb_force = 1;
115

L
Linus Torvalds 已提交
116 117 118 119 120
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */

121
/* Note that this doesn't work with highmem page */
122 123
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
124
{
125
	return phys_to_dma(hwdev, virt_to_phys(address));
126 127
}

128
void swiotlb_print_info(void)
129
{
130
	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
131 132 133 134 135 136 137
	phys_addr_t pstart, pend;

	pstart = virt_to_phys(io_tlb_start);
	pend = virt_to_phys(io_tlb_end);

	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
	       bytes >> 20, io_tlb_start, io_tlb_end);
138 139 140
	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
	       (unsigned long long)pstart,
	       (unsigned long long)pend);
141 142
}

143
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
L
Linus Torvalds 已提交
144
{
J
Jan Beulich 已提交
145
	unsigned long i, bytes;
L
Linus Torvalds 已提交
146

147
	bytes = nslabs << IO_TLB_SHIFT;
L
Linus Torvalds 已提交
148

149 150
	io_tlb_nslabs = nslabs;
	io_tlb_start = tlb;
J
Jan Beulich 已提交
151
	io_tlb_end = io_tlb_start + bytes;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
159
	for (i = 0; i < io_tlb_nslabs; i++)
L
Linus Torvalds 已提交
160 161
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
162
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
L
Linus Torvalds 已提交
163 164 165 166 167

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
J
Jan Beulich 已提交
168 169
	if (!io_tlb_overflow_buffer)
		panic("Cannot allocate SWIOTLB overflow buffer!\n");
170 171
	if (verbose)
		swiotlb_print_info();
L
Linus Torvalds 已提交
172 173
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
 * structures for the software IO TLB used to implement the DMA API.
 */
void __init
swiotlb_init_with_default_size(size_t default_size, int verbose)
{
	unsigned long bytes;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

	/*
	 * Get IO TLB memory from the low pages
	 */
	io_tlb_start = alloc_bootmem_low_pages(bytes);
	if (!io_tlb_start)
		panic("Cannot allocate SWIOTLB buffer");

	swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
}

J
Jan Beulich 已提交
200
void __init
201
swiotlb_init(int verbose)
L
Linus Torvalds 已提交
202
{
203
	swiotlb_init_with_default_size(64 * (1<<20), verbose);	/* default to 64MB */
L
Linus Torvalds 已提交
204 205
}

206 207 208 209 210 211
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
J
Jan Beulich 已提交
212
swiotlb_late_init_with_default_size(size_t default_size)
213
{
J
Jan Beulich 已提交
214
	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
215 216 217 218 219 220 221 222 223 224
	unsigned int order;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
J
Jan Beulich 已提交
225
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
226
	io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
227
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
228 229

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
230 231
		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
							order);
232 233 234 235 236 237 238 239
		if (io_tlb_start)
			break;
		order--;
	}

	if (!io_tlb_start)
		goto cleanup1;

J
Jan Beulich 已提交
240
	if (order != get_order(bytes)) {
241 242 243
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
J
Jan Beulich 已提交
244
		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
245
	}
J
Jan Beulich 已提交
246 247
	io_tlb_end = io_tlb_start + bytes;
	memset(io_tlb_start, 0, bytes);
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
		goto cleanup2;

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

263 264 265 266
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
267 268 269
	if (!io_tlb_orig_addr)
		goto cleanup3;

270
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
271 272 273 274 275 276 277 278 279

	/*
	 * Get the overflow emergency buffer
	 */
	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
	                                          get_order(io_tlb_overflow));
	if (!io_tlb_overflow_buffer)
		goto cleanup4;

280
	swiotlb_print_info();
281

282 283
	late_alloc = 1;

284 285 286
	return 0;

cleanup4:
287 288
	free_pages((unsigned long)io_tlb_orig_addr,
		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
289 290
	io_tlb_orig_addr = NULL;
cleanup3:
291 292
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
293 294
	io_tlb_list = NULL;
cleanup2:
J
Jan Beulich 已提交
295
	io_tlb_end = NULL;
296 297 298 299 300 301 302
	free_pages((unsigned long)io_tlb_start, order);
	io_tlb_start = NULL;
cleanup1:
	io_tlb_nslabs = req_nslabs;
	return -ENOMEM;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
void __init swiotlb_free(void)
{
	if (!io_tlb_overflow_buffer)
		return;

	if (late_alloc) {
		free_pages((unsigned long)io_tlb_overflow_buffer,
			   get_order(io_tlb_overflow));
		free_pages((unsigned long)io_tlb_orig_addr,
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
								 sizeof(int)));
		free_pages((unsigned long)io_tlb_start,
			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
	} else {
		free_bootmem_late(__pa(io_tlb_overflow_buffer),
				  io_tlb_overflow);
		free_bootmem_late(__pa(io_tlb_orig_addr),
				  io_tlb_nslabs * sizeof(phys_addr_t));
		free_bootmem_late(__pa(io_tlb_list),
				  io_tlb_nslabs * sizeof(int));
		free_bootmem_late(__pa(io_tlb_start),
				  io_tlb_nslabs << IO_TLB_SHIFT);
	}
}

329
static int is_swiotlb_buffer(phys_addr_t paddr)
330
{
331 332
	return paddr >= virt_to_phys(io_tlb_start) &&
		paddr < virt_to_phys(io_tlb_end);
333 334
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
			   enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(phys);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = phys & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
B
Becky Bruce 已提交
351
			sz = min_t(size_t, PAGE_SIZE - offset, size);
352 353 354 355 356 357

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn),
					     KM_BOUNCE_READ);
			if (dir == DMA_TO_DEVICE)
				memcpy(dma_addr, buffer + offset, sz);
358
			else
359 360
				memcpy(buffer + offset, dma_addr, sz);
			kunmap_atomic(buffer, KM_BOUNCE_READ);
361
			local_irq_restore(flags);
362 363 364 365 366

			size -= sz;
			pfn++;
			dma_addr += sz;
			offset = 0;
367 368 369
		}
	} else {
		if (dir == DMA_TO_DEVICE)
370
			memcpy(dma_addr, phys_to_virt(phys), size);
371
		else
372
			memcpy(phys_to_virt(phys), dma_addr, size);
373
	}
374 375
}

376
void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
377 378
			     phys_addr_t phys, size_t size,
			     enum dma_data_direction dir)
L
Linus Torvalds 已提交
379 380 381 382 383
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;
384 385 386 387 388 389
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

	mask = dma_get_seg_boundary(hwdev);

390 391 392
	tbl_dma_addr &= mask;

	offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
393 394 395 396

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
397 398 399
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
L
Linus Torvalds 已提交
400 401 402 403 404 405 406 407 408 409 410

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

411
	BUG_ON(!nslots);
L
Linus Torvalds 已提交
412 413 414 415 416 417

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
A
Andrew Morton 已提交
418 419 420 421 422 423
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
424 425
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
426 427 428
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
A
Andrew Morton 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
446

A
Andrew Morton 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
	return NULL;
found:
L
Linus Torvalds 已提交
465 466 467 468 469 470 471
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
472 473
	for (i = 0; i < nslots; i++)
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
L
Linus Torvalds 已提交
474
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
475
		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
476 477 478 479

	return dma_addr;
}

480 481 482 483 484
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */

static void *
485 486
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
	   enum dma_data_direction dir)
487 488 489 490 491 492
{
	dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);

	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
}

L
Linus Torvalds 已提交
493 494 495 496
/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
497
swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
498
			enum dma_data_direction dir)
L
Linus Torvalds 已提交
499 500 501 502
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
503
	phys_addr_t phys = io_tlb_orig_addr[index];
L
Linus Torvalds 已提交
504 505 506 507

	/*
	 * First, sync the memory before unmapping the entry
	 */
508
	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
509
		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
510 511 512

	/*
	 * Return the buffer to the free list by setting the corresponding
513
	 * entries to indicate the number of contiguous entries available.
L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
538
swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
539
	    enum dma_data_direction dir, int target)
L
Linus Torvalds 已提交
540
{
541 542 543 544
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t phys = io_tlb_orig_addr[index];

	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
K
Keir Fraser 已提交
545

546 547 548
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
549
			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
550 551
		else
			BUG_ON(dir != DMA_TO_DEVICE);
552 553 554
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
555
			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
556 557
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
558 559
		break;
	default:
L
Linus Torvalds 已提交
560
		BUG();
561
	}
L
Linus Torvalds 已提交
562 563 564 565
}

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
A
Al Viro 已提交
566
		       dma_addr_t *dma_handle, gfp_t flags)
L
Linus Torvalds 已提交
567
{
J
Jan Beulich 已提交
568
	dma_addr_t dev_addr;
L
Linus Torvalds 已提交
569 570
	void *ret;
	int order = get_order(size);
571
	u64 dma_mask = DMA_BIT_MASK(32);
572 573 574

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
L
Linus Torvalds 已提交
575

576
	ret = (void *)__get_free_pages(flags, order);
577
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
L
Linus Torvalds 已提交
578 579 580 581 582 583 584 585
		/*
		 * The allocated memory isn't reachable by the device.
		 */
		free_pages((unsigned long) ret, order);
		ret = NULL;
	}
	if (!ret) {
		/*
586 587
		 * We are either out of memory or the device can't DMA to
		 * GFP_DMA memory; fall back on map_single(), which
B
Becky Bruce 已提交
588
		 * will grab memory from the lowest available address range.
L
Linus Torvalds 已提交
589
		 */
590
		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
591
		if (!ret)
L
Linus Torvalds 已提交
592 593 594 595
			return NULL;
	}

	memset(ret, 0, size);
596
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
L
Linus Torvalds 已提交
597 598

	/* Confirm address can be DMA'd by device */
599
	if (dev_addr + size - 1 > dma_mask) {
J
Jan Beulich 已提交
600
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
601
		       (unsigned long long)dma_mask,
J
Jan Beulich 已提交
602
		       (unsigned long long)dev_addr);
603 604

		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
605
		swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
606
		return NULL;
L
Linus Torvalds 已提交
607 608 609 610
	}
	*dma_handle = dev_addr;
	return ret;
}
611
EXPORT_SYMBOL(swiotlb_alloc_coherent);
L
Linus Torvalds 已提交
612 613 614

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
615
		      dma_addr_t dev_addr)
L
Linus Torvalds 已提交
616
{
617
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
618

619
	WARN_ON(irqs_disabled());
620 621
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
L
Linus Torvalds 已提交
622
	else
623 624
		/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
		swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
625
}
626
EXPORT_SYMBOL(swiotlb_free_coherent);
L
Linus Torvalds 已提交
627 628

static void
629 630
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
	     int do_panic)
L
Linus Torvalds 已提交
631 632 633 634
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
635
	 * unless they check for dma_mapping_error (most don't)
L
Linus Torvalds 已提交
636 637 638
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
J
Jan Beulich 已提交
639
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
640
	       "device %s\n", size, dev ? dev_name(dev) : "?");
L
Linus Torvalds 已提交
641

642 643 644 645 646 647 648 649 650
	if (size <= io_tlb_overflow || !do_panic)
		return;

	if (dir == DMA_BIDIRECTIONAL)
		panic("DMA: Random memory could be DMA accessed\n");
	if (dir == DMA_FROM_DEVICE)
		panic("DMA: Random memory could be DMA written\n");
	if (dir == DMA_TO_DEVICE)
		panic("DMA: Random memory could be DMA read\n");
L
Linus Torvalds 已提交
651 652 653 654
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
655
 * physical address to use is returned.
L
Linus Torvalds 已提交
656 657
 *
 * Once the device is given the dma address, the device owns this memory until
B
Becky Bruce 已提交
658
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
L
Linus Torvalds 已提交
659
 */
660 661 662 663
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
L
Linus Torvalds 已提交
664
{
665
	phys_addr_t phys = page_to_phys(page) + offset;
666
	dma_addr_t dev_addr = phys_to_dma(dev, phys);
L
Linus Torvalds 已提交
667 668
	void *map;

669
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
670
	/*
B
Becky Bruce 已提交
671
	 * If the address happens to be in the device's DMA window,
L
Linus Torvalds 已提交
672 673 674
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
F
FUJITA Tomonori 已提交
675
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
L
Linus Torvalds 已提交
676 677 678 679 680
		return dev_addr;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
681
	map = map_single(dev, phys, size, dir);
L
Linus Torvalds 已提交
682
	if (!map) {
683
		swiotlb_full(dev, size, dir, 1);
L
Linus Torvalds 已提交
684 685 686
		map = io_tlb_overflow_buffer;
	}

687
	dev_addr = swiotlb_virt_to_bus(dev, map);
L
Linus Torvalds 已提交
688 689 690 691

	/*
	 * Ensure that the address returned is DMA'ble
	 */
F
FUJITA Tomonori 已提交
692
	if (!dma_capable(dev, dev_addr, size))
L
Linus Torvalds 已提交
693 694 695 696
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
}
697
EXPORT_SYMBOL_GPL(swiotlb_map_page);
L
Linus Torvalds 已提交
698 699 700

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
B
Becky Bruce 已提交
701
 * match what was provided for in a previous swiotlb_map_page call.  All
L
Linus Torvalds 已提交
702 703 704 705 706
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
707
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
708
			 size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
709
{
710
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
711

712
	BUG_ON(dir == DMA_NONE);
713

714
	if (is_swiotlb_buffer(paddr)) {
715
		swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
716 717 718 719 720 721
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

722 723 724 725 726 727 728
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
729 730 731 732 733 734 735
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
L
Linus Torvalds 已提交
736
}
737
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
738

L
Linus Torvalds 已提交
739 740 741 742
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
B
Becky Bruce 已提交
743
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
744 745
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
L
Linus Torvalds 已提交
746 747 748
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
A
Andrew Morton 已提交
749
static void
750
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
751
		    size_t size, enum dma_data_direction dir, int target)
L
Linus Torvalds 已提交
752
{
753
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
L
Linus Torvalds 已提交
754

755
	BUG_ON(dir == DMA_NONE);
756

757
	if (is_swiotlb_buffer(paddr)) {
758 759
		swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
				       target);
760 761 762 763 764 765
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

766
	dma_mark_clean(phys_to_virt(paddr), size);
L
Linus Torvalds 已提交
767 768
}

769 770
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
771
			    size_t size, enum dma_data_direction dir)
772
{
773
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
774
}
775
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
776

L
Linus Torvalds 已提交
777 778
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
779
			       size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
780
{
781
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
782
}
783
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
L
Linus Torvalds 已提交
784 785 786

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
B
Becky Bruce 已提交
787
 * This is the scatter-gather version of the above swiotlb_map_page
L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796 797
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
B
Becky Bruce 已提交
798
 * Device ownership issues as mentioned above for swiotlb_map_page are the
L
Linus Torvalds 已提交
799 800 801
 * same here.
 */
int
802
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
803
		     enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
804
{
J
Jens Axboe 已提交
805
	struct scatterlist *sg;
L
Linus Torvalds 已提交
806 807
	int i;

808
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
809

J
Jens Axboe 已提交
810
	for_each_sg(sgl, sg, nelems, i) {
I
Ian Campbell 已提交
811
		phys_addr_t paddr = sg_phys(sg);
812
		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
813

814
		if (swiotlb_force ||
F
FUJITA Tomonori 已提交
815
		    !dma_capable(hwdev, dev_addr, sg->length)) {
816 817
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
818
			if (!map) {
L
Linus Torvalds 已提交
819 820 821
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
822 823
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
J
Jens Axboe 已提交
824
				sgl[0].dma_length = 0;
L
Linus Torvalds 已提交
825 826
				return 0;
			}
827
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
L
Linus Torvalds 已提交
828 829 830 831 832 833
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
834 835 836 837
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
838
	       enum dma_data_direction dir)
839 840 841
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
842
EXPORT_SYMBOL(swiotlb_map_sg);
L
Linus Torvalds 已提交
843 844 845

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
B
Becky Bruce 已提交
846
 * concerning calls here are the same as for swiotlb_unmap_page() above.
L
Linus Torvalds 已提交
847 848
 */
void
849
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
850
		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
851
{
J
Jens Axboe 已提交
852
	struct scatterlist *sg;
L
Linus Torvalds 已提交
853 854
	int i;

855
	BUG_ON(dir == DMA_NONE);
L
Linus Torvalds 已提交
856

857 858 859
	for_each_sg(sgl, sg, nelems, i)
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);

L
Linus Torvalds 已提交
860
}
861 862 863 864
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);

void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
865
		 enum dma_data_direction dir)
866 867 868
{
	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
869
EXPORT_SYMBOL(swiotlb_unmap_sg);
L
Linus Torvalds 已提交
870 871 872 873 874 875 876 877

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
A
Andrew Morton 已提交
878
static void
J
Jens Axboe 已提交
879
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
880
		int nelems, enum dma_data_direction dir, int target)
L
Linus Torvalds 已提交
881
{
J
Jens Axboe 已提交
882
	struct scatterlist *sg;
L
Linus Torvalds 已提交
883 884
	int i;

885 886
	for_each_sg(sgl, sg, nelems, i)
		swiotlb_sync_single(hwdev, sg->dma_address,
887
				    sg->dma_length, dir, target);
L
Linus Torvalds 已提交
888 889
}

890 891
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
892
			int nelems, enum dma_data_direction dir)
893
{
894
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
895
}
896
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
897

L
Linus Torvalds 已提交
898 899
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
900
			   int nelems, enum dma_data_direction dir)
L
Linus Torvalds 已提交
901
{
902
	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
L
Linus Torvalds 已提交
903
}
904
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
L
Linus Torvalds 已提交
905 906

int
907
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
908
{
909
	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
L
Linus Torvalds 已提交
910
}
911
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
L
Linus Torvalds 已提交
912 913

/*
914
 * Return whether the given device DMA address mask can be supported
L
Linus Torvalds 已提交
915
 * properly.  For example, if your device can only drive the low 24-bits
916
 * during bus mastering, then you would pass 0x00ffffff as the mask to
L
Linus Torvalds 已提交
917 918 919
 * this function.
 */
int
J
Jan Beulich 已提交
920
swiotlb_dma_supported(struct device *hwdev, u64 mask)
L
Linus Torvalds 已提交
921
{
922
	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
L
Linus Torvalds 已提交
923 924
}
EXPORT_SYMBOL(swiotlb_dma_supported);