iommu.c 18.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
L
Linus Torvalds 已提交
36 37 38 39 40
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
41
#include <asm/kdump.h>
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50

#define DBG(...)

#ifdef CONFIG_IOMMU_VMERGE
static int novmerge = 0;
#else
static int novmerge = 1;
#endif

51 52
static int protect4gb = 1;

53 54
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

55 56 57 58 59 60 61 62 63 64
static int __init setup_protect4gb(char *str)
{
	if (strcmp(str, "on") == 0)
		protect4gb = 1;
	else if (strcmp(str, "off") == 0)
		protect4gb = 0;

	return 1;
}

L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

74
__setup("protect4gb=", setup_protect4gb);
L
Linus Torvalds 已提交
75 76
__setup("iommu=", setup_iommu);

77 78
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
79 80
                                       unsigned long npages,
                                       unsigned long *handle,
81
                                       unsigned long mask,
L
Linus Torvalds 已提交
82 83
                                       unsigned int align_order)
{ 
84
	unsigned long n, end, start;
L
Linus Torvalds 已提交
85 86 87 88
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
89
	unsigned long boundary_size;
L
Linus Torvalds 已提交
90 91 92 93 94 95

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
96
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;
119

L
Linus Torvalds 已提交
120 121
 again:

122 123 124 125 126 127 128 129 130 131 132 133
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

134 135 136 137 138 139
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
140

141 142 143 144
	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
L
Linus Torvalds 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
			return DMA_ERROR_CODE;
		}
	}

159
	end = n + npages;
L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	return n;
}

178 179 180
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
181 182
			      unsigned long mask, unsigned int align_order,
			      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
183 184 185
{
	unsigned long entry, flags;
	dma_addr_t ret = DMA_ERROR_CODE;
186
	int build_fail;
187

L
Linus Torvalds 已提交
188 189
	spin_lock_irqsave(&(tbl->it_lock), flags);

190
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
191 192 193 194 195 196 197

	if (unlikely(entry == DMA_ERROR_CODE)) {
		spin_unlock_irqrestore(&(tbl->it_lock), flags);
		return DMA_ERROR_CODE;
	}

	entry += tbl->it_offset;	/* Offset into real TCE table */
198
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
199 200

	/* Put the TCEs in the HW table */
201 202 203 204 205 206 207 208 209 210 211
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
	                              direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
L
Linus Torvalds 已提交
212

213 214 215
		spin_unlock_irqrestore(&(tbl->it_lock), flags);
		return DMA_ERROR_CODE;
	}
L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
			 unsigned int npages)
{
	unsigned long entry, free_entry;

234
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
235 236 237 238 239 240 241
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
242 243 244 245 246 247
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
248 249 250 251 252 253
			WARN_ON(1);
		}
		return;
	}

	ppc_md.tce_free(tbl, entry, npages);
254
	iommu_area_free(tbl->it_map, free_entry, npages);
L
Linus Torvalds 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	unsigned long flags;

	spin_lock_irqsave(&(tbl->it_lock), flags);

	__iommu_free(tbl, dma_addr, npages);

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

276 277
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
278 279
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
280 281 282 283
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
284
	int outcount, incount, i, build_fail = 0;
285
	unsigned int align;
L
Linus Torvalds 已提交
286
	unsigned long handle;
287
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
288 289 290 291 292 293 294 295

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
296
	incount = nelems;
L
Linus Torvalds 已提交
297 298 299 300 301
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

302
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
303 304 305

	spin_lock_irqsave(&(tbl->it_lock), flags);

306
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
307
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
308 309 310 311 312 313 314 315 316
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
317
		vaddr = (unsigned long) sg_virt(s);
318
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
319 320 321 322
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
323
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
324
					  mask >> IOMMU_PAGE_SHIFT, align);
L
Linus Torvalds 已提交
325 326 327 328 329 330 331 332 333 334 335 336 337

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
				       " npages %lx\n", tbl, vaddr, npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
338 339
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
340

341
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
342 343 344
			    npages, entry, dma_addr);

		/* Insert into HW table */
345 346 347 348 349
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
350 351 352 353 354 355 356

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
357 358
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
359 360
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
361 362
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
363 364 365
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
366
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
391
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
392 393
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
394
	if (outcount < incount) {
J
Jens Axboe 已提交
395
		outs = sg_next(outs);
L
Linus Torvalds 已提交
396 397 398
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
399 400 401 402

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
403 404 405
	return outcount;

 failure:
J
Jens Axboe 已提交
406
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
407 408 409
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

410
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
411 412
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
413
			__iommu_free(tbl, vaddr, npages);
414 415
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
416
		}
J
Jens Axboe 已提交
417 418
		if (s == outs)
			break;
L
Linus Torvalds 已提交
419 420 421 422 423 424 425
	}
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
426 427
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
L
Linus Torvalds 已提交
428
{
J
Jens Axboe 已提交
429
	struct scatterlist *sg;
L
Linus Torvalds 已提交
430 431 432 433 434 435 436 437 438
	unsigned long flags;

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

	spin_lock_irqsave(&(tbl->it_lock), flags);

J
Jens Axboe 已提交
439
	sg = sglist;
L
Linus Torvalds 已提交
440 441
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
442
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
443

J
Jens Axboe 已提交
444
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
445
			break;
446 447
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
448
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
449
		sg = sg_next(sg);
L
Linus Torvalds 已提交
450 451 452 453 454 455 456 457 458 459 460 461
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

462 463
static void iommu_table_clear(struct iommu_table *tbl)
{
M
Milton Miller 已提交
464
	if (!is_kdump_kernel()) {
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		return;
	}

#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
498 499 500 501
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
502
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
503 504 505
{
	unsigned long sz;
	static int welcomed = 0;
506
	struct page *page;
L
Linus Torvalds 已提交
507 508 509 510 511 512 513

	/* Set aside 1/4 of the table for large allocations. */
	tbl->it_halfpoint = tbl->it_size * 3 / 4;

	/* number of bytes needed for the bitmap */
	sz = (tbl->it_size + 7) >> 3;

514 515
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
516
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
517
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
518 519 520 521 522 523
	memset(tbl->it_map, 0, sz);

	tbl->it_hint = 0;
	tbl->it_largehint = tbl->it_halfpoint;
	spin_lock_init(&tbl->it_lock);

524
	iommu_table_clear(tbl);
J
John Rose 已提交
525

L
Linus Torvalds 已提交
526 527 528 529 530 531 532 533 534
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

535
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
L
Linus Torvalds 已提交
536 537 538 539 540
{
	unsigned long bitmap_sz, i;
	unsigned int order;

	if (!tbl || !tbl->it_map) {
541
		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
542
				node_name);
L
Linus Torvalds 已提交
543 544 545 546 547 548 549 550
		return;
	}

	/* verify that table contains no entries */
	/* it_size is in entries, and we're examining 64 at a time */
	for (i = 0; i < (tbl->it_size/64); i++) {
		if (tbl->it_map[i] != 0) {
			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
551
				__func__, node_name);
L
Linus Torvalds 已提交
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
			break;
		}
	}

	/* calculate bitmap size in bytes */
	bitmap_sz = (tbl->it_size + 7) / 8;

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
568 569 570
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
571
 */
572 573 574 575
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
			  struct dma_attrs *attrs)
L
Linus Torvalds 已提交
576 577
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
578
	void *vaddr;
L
Linus Torvalds 已提交
579
	unsigned long uaddr;
580
	unsigned int npages, align;
L
Linus Torvalds 已提交
581 582 583

	BUG_ON(direction == DMA_NONE);

584
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
585
	uaddr = (unsigned long)vaddr;
586
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
587 588

	if (tbl) {
589 590 591 592 593
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

594
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
595 596
					 mask >> IOMMU_PAGE_SHIFT, align,
					 attrs);
L
Linus Torvalds 已提交
597 598 599 600 601 602 603
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
				printk(KERN_INFO "iommu_alloc failed, "
						"tbl %p vaddr %p npages %d\n",
						tbl, vaddr, npages);
			}
		} else
604
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
605 606 607 608 609
	}

	return dma_handle;
}

610 611 612
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
		      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
613
{
614 615
	unsigned int npages;

L
Linus Torvalds 已提交
616 617
	BUG_ON(direction == DMA_NONE);

618
	if (tbl) {
619
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
620 621
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
622 623 624 625 626 627
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
628 629 630
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
631 632 633
{
	void *ret = NULL;
	dma_addr_t mapping;
634 635
	unsigned int order;
	unsigned int nio_pages, io_order;
636
	struct page *page;
L
Linus Torvalds 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
655
	page = alloc_pages_node(node, flag, order);
656
	if (!page)
L
Linus Torvalds 已提交
657
		return NULL;
658
	ret = page_address(page);
L
Linus Torvalds 已提交
659 660 661
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
662 663
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
664
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
665
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
L
Linus Torvalds 已提交
666 667
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
668 669 670
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
671 672 673 674 675 676 677
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
678 679 680 681 682
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
683 684 685 686
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}