iommu.c 18.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
A
Akinobu Mita 已提交
33
#include <linux/bitmap.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
L
Linus Torvalds 已提交
36 37 38 39 40
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
41
#include <asm/kdump.h>
L
Linus Torvalds 已提交
42 43 44

#define DBG(...)

45
static int novmerge;
46

47 48
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57 58 59
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

__setup("iommu=", setup_iommu);

60 61
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
62 63
                                       unsigned long npages,
                                       unsigned long *handle,
64
                                       unsigned long mask,
L
Linus Torvalds 已提交
65 66
                                       unsigned int align_order)
{ 
67
	unsigned long n, end, start;
L
Linus Torvalds 已提交
68 69 70 71
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
72
	unsigned long boundary_size;
L
Linus Torvalds 已提交
73 74 75 76 77 78

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
79
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;
102

L
Linus Torvalds 已提交
103 104
 again:

105 106 107 108 109 110 111 112 113 114 115 116
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

117 118 119 120 121 122
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
123

124 125 126 127
	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
L
Linus Torvalds 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
			return DMA_ERROR_CODE;
		}
	}

142
	end = n + npages;
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	return n;
}

161 162 163
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
164 165
			      unsigned long mask, unsigned int align_order,
			      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
166 167 168
{
	unsigned long entry, flags;
	dma_addr_t ret = DMA_ERROR_CODE;
169
	int build_fail;
170

L
Linus Torvalds 已提交
171 172
	spin_lock_irqsave(&(tbl->it_lock), flags);

173
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
174 175 176 177 178 179 180

	if (unlikely(entry == DMA_ERROR_CODE)) {
		spin_unlock_irqrestore(&(tbl->it_lock), flags);
		return DMA_ERROR_CODE;
	}

	entry += tbl->it_offset;	/* Offset into real TCE table */
181
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
182 183

	/* Put the TCEs in the HW table */
184 185 186 187 188 189 190 191 192 193 194
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
	                              direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
L
Linus Torvalds 已提交
195

196 197 198
		spin_unlock_irqrestore(&(tbl->it_lock), flags);
		return DMA_ERROR_CODE;
	}
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
			 unsigned int npages)
{
	unsigned long entry, free_entry;

217
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
218 219 220 221 222 223 224
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
225 226 227 228 229 230
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
231 232 233 234 235 236
			WARN_ON(1);
		}
		return;
	}

	ppc_md.tce_free(tbl, entry, npages);
A
Akinobu Mita 已提交
237
	bitmap_clear(tbl->it_map, free_entry, npages);
L
Linus Torvalds 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	unsigned long flags;

	spin_lock_irqsave(&(tbl->it_lock), flags);

	__iommu_free(tbl, dma_addr, npages);

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

259 260
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
261 262
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
263 264 265 266
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
267
	int outcount, incount, i, build_fail = 0;
268
	unsigned int align;
L
Linus Torvalds 已提交
269
	unsigned long handle;
270
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
279
	incount = nelems;
L
Linus Torvalds 已提交
280 281 282 283 284
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

285
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
286 287 288

	spin_lock_irqsave(&(tbl->it_lock), flags);

289
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
290
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
291 292 293 294 295 296 297 298 299
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
300
		vaddr = (unsigned long) sg_virt(s);
301
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
302 303 304 305
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
306
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
307
					  mask >> IOMMU_PAGE_SHIFT, align);
L
Linus Torvalds 已提交
308 309 310 311 312 313

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
314 315 316
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
317 318 319 320 321
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
322 323
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
324

325
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
326 327 328
			    npages, entry, dma_addr);

		/* Insert into HW table */
329 330 331 332 333
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
334 335 336 337 338 339 340

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
341 342
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
343 344
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
345 346
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
347 348 349
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
350
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
375
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
376 377
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
378
	if (outcount < incount) {
J
Jens Axboe 已提交
379
		outs = sg_next(outs);
L
Linus Torvalds 已提交
380 381 382
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
383 384 385 386

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
387 388 389
	return outcount;

 failure:
J
Jens Axboe 已提交
390
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
391 392 393
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

394
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
395 396
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
397
			__iommu_free(tbl, vaddr, npages);
398 399
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
400
		}
J
Jens Axboe 已提交
401 402
		if (s == outs)
			break;
L
Linus Torvalds 已提交
403 404 405 406 407 408 409
	}
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
410 411
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
L
Linus Torvalds 已提交
412
{
J
Jens Axboe 已提交
413
	struct scatterlist *sg;
L
Linus Torvalds 已提交
414 415 416 417 418 419 420 421 422
	unsigned long flags;

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

	spin_lock_irqsave(&(tbl->it_lock), flags);

J
Jens Axboe 已提交
423
	sg = sglist;
L
Linus Torvalds 已提交
424 425
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
426
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
427

J
Jens Axboe 已提交
428
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
429
			break;
430 431
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
432
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
433
		sg = sg_next(sg);
L
Linus Torvalds 已提交
434 435 436 437 438 439 440 441 442 443 444 445
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

446 447
static void iommu_table_clear(struct iommu_table *tbl)
{
M
Milton Miller 已提交
448
	if (!is_kdump_kernel()) {
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		return;
	}

#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
482 483 484 485
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
486
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
487 488 489
{
	unsigned long sz;
	static int welcomed = 0;
490
	struct page *page;
L
Linus Torvalds 已提交
491 492 493 494 495 496 497

	/* Set aside 1/4 of the table for large allocations. */
	tbl->it_halfpoint = tbl->it_size * 3 / 4;

	/* number of bytes needed for the bitmap */
	sz = (tbl->it_size + 7) >> 3;

498 499
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
500
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
501
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
502 503
	memset(tbl->it_map, 0, sz);

504 505 506 507 508 509 510 511
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

L
Linus Torvalds 已提交
512 513 514 515
	tbl->it_hint = 0;
	tbl->it_largehint = tbl->it_halfpoint;
	spin_lock_init(&tbl->it_lock);

516
	iommu_table_clear(tbl);
J
John Rose 已提交
517

L
Linus Torvalds 已提交
518 519 520 521 522 523 524 525 526
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

527
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
L
Linus Torvalds 已提交
528 529 530 531 532
{
	unsigned long bitmap_sz, i;
	unsigned int order;

	if (!tbl || !tbl->it_map) {
533
		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
534
				node_name);
L
Linus Torvalds 已提交
535 536 537 538 539 540 541 542
		return;
	}

	/* verify that table contains no entries */
	/* it_size is in entries, and we're examining 64 at a time */
	for (i = 0; i < (tbl->it_size/64); i++) {
		if (tbl->it_map[i] != 0) {
			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
543
				__func__, node_name);
L
Linus Torvalds 已提交
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
			break;
		}
	}

	/* calculate bitmap size in bytes */
	bitmap_sz = (tbl->it_size + 7) / 8;

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
560 561 562
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
563
 */
564 565 566 567
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
			  struct dma_attrs *attrs)
L
Linus Torvalds 已提交
568 569
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
570
	void *vaddr;
L
Linus Torvalds 已提交
571
	unsigned long uaddr;
572
	unsigned int npages, align;
L
Linus Torvalds 已提交
573 574 575

	BUG_ON(direction == DMA_NONE);

576
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
577
	uaddr = (unsigned long)vaddr;
578
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
579 580

	if (tbl) {
581 582 583 584 585
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

586
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
587 588
					 mask >> IOMMU_PAGE_SHIFT, align,
					 attrs);
L
Linus Torvalds 已提交
589 590
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
591 592 593
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %p npages %d\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
594 595
			}
		} else
596
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
597 598 599 600 601
	}

	return dma_handle;
}

602 603 604
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
		      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
605
{
606 607
	unsigned int npages;

L
Linus Torvalds 已提交
608 609
	BUG_ON(direction == DMA_NONE);

610
	if (tbl) {
611
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
612 613
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
614 615 616 617 618 619
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
620 621 622
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
623 624 625
{
	void *ret = NULL;
	dma_addr_t mapping;
626 627
	unsigned int order;
	unsigned int nio_pages, io_order;
628
	struct page *page;
L
Linus Torvalds 已提交
629 630 631 632 633 634 635 636 637 638

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
639 640
		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
			 size);
L
Linus Torvalds 已提交
641 642 643 644 645 646 647
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
648
	page = alloc_pages_node(node, flag, order);
649
	if (!page)
L
Linus Torvalds 已提交
650
		return NULL;
651
	ret = page_address(page);
L
Linus Torvalds 已提交
652 653 654
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
655 656
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
657
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
658
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
L
Linus Torvalds 已提交
659 660
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
661 662 663
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
664 665 666 667 668 669 670
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
671 672 673 674 675
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
676 677 678 679
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}