iommu.c 17.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
39
#include <asm/kdump.h>
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48

#define DBG(...)

#ifdef CONFIG_IOMMU_VMERGE
static int novmerge = 0;
#else
static int novmerge = 1;
#endif

49 50
static int protect4gb = 1;

51 52 53 54 55 56 57 58 59 60 61
static inline unsigned long iommu_num_pages(unsigned long vaddr,
					    unsigned long slen)
{
	unsigned long npages;

	npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
	npages >>= IOMMU_PAGE_SHIFT;

	return npages;
}

62 63 64 65 66 67 68 69 70 71
static int __init setup_protect4gb(char *str)
{
	if (strcmp(str, "on") == 0)
		protect4gb = 1;
	else if (strcmp(str, "off") == 0)
		protect4gb = 0;

	return 1;
}

L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

81
__setup("protect4gb=", setup_protect4gb);
L
Linus Torvalds 已提交
82 83 84 85 86
__setup("iommu=", setup_iommu);

static unsigned long iommu_range_alloc(struct iommu_table *tbl,
                                       unsigned long npages,
                                       unsigned long *handle,
87
                                       unsigned long mask,
L
Linus Torvalds 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100
                                       unsigned int align_order)
{ 
	unsigned long n, end, i, start;
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
101
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;
124

L
Linus Torvalds 已提交
125 126
 again:

127 128 129 130 131 132 133 134 135 136 137 138
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	n = find_next_zero_bit(tbl->it_map, limit, start);

	/* Align allocation */
	n = (n + align_mask) & ~align_mask;

	end = n + npages;

	if (unlikely(end >= limit)) {
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
			return DMA_ERROR_CODE;
		}
	}

	for (i = n; i < end; i++)
		if (test_bit(i, tbl->it_map)) {
			start = i+1;
			goto again;
		}

	for (i = n; i < end; i++)
		__set_bit(i, tbl->it_map);

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

	return n;
}

static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
		       unsigned int npages, enum dma_data_direction direction,
189
		       unsigned long mask, unsigned int align_order)
L
Linus Torvalds 已提交
190 191 192
{
	unsigned long entry, flags;
	dma_addr_t ret = DMA_ERROR_CODE;
193

L
Linus Torvalds 已提交
194 195
	spin_lock_irqsave(&(tbl->it_lock), flags);

196
	entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
197 198 199 200 201 202 203

	if (unlikely(entry == DMA_ERROR_CODE)) {
		spin_unlock_irqrestore(&(tbl->it_lock), flags);
		return DMA_ERROR_CODE;
	}

	entry += tbl->it_offset;	/* Offset into real TCE table */
204
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
205 206

	/* Put the TCEs in the HW table */
207
	ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
L
Linus Torvalds 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
			 direction);


	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
			 unsigned int npages)
{
	unsigned long entry, free_entry;
	unsigned long i;

229
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
			WARN_ON(1);
		}
		return;
	}

	ppc_md.tce_free(tbl, entry, npages);
	
	for (i = 0; i < npages; i++)
		__clear_bit(free_entry+i, tbl->it_map);
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	unsigned long flags;

	spin_lock_irqsave(&(tbl->it_lock), flags);

	__iommu_free(tbl, dma_addr, npages);

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

273 274 275
int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
		 int nelems, unsigned long mask,
		 enum dma_data_direction direction)
L
Linus Torvalds 已提交
276 277 278 279
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
J
Jens Axboe 已提交
280
	int outcount, incount, i;
L
Linus Torvalds 已提交
281 282 283 284 285 286 287 288 289
	unsigned long handle;

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
290
	incount = nelems;
L
Linus Torvalds 已提交
291 292 293 294 295
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

296
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
297 298 299

	spin_lock_irqsave(&(tbl->it_lock), flags);

J
Jens Axboe 已提交
300
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
301 302 303 304 305 306 307 308 309
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
310
		vaddr = (unsigned long) sg_virt(s);
311 312
		npages = iommu_num_pages(vaddr, slen);
		entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
L
Linus Torvalds 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
				       " npages %lx\n", tbl, vaddr, npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
326 327
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
328

329
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
330 331 332
			    npages, entry, dma_addr);

		/* Insert into HW table */
333
		ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342 343

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if (novmerge || (dma_addr != dma_next)) {
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
344 345
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
346 347 348
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
349
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
374
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
375 376
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
377
	if (outcount < incount) {
J
Jens Axboe 已提交
378
		outs = sg_next(outs);
L
Linus Torvalds 已提交
379 380 381
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
382 383 384 385

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
386 387 388
	return outcount;

 failure:
J
Jens Axboe 已提交
389
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
390 391 392
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

393 394
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
			npages = iommu_num_pages(s->dma_address, s->dma_length);
L
Linus Torvalds 已提交
395
			__iommu_free(tbl, vaddr, npages);
396 397
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
398
		}
J
Jens Axboe 已提交
399 400
		if (s == outs)
			break;
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409
	}
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
		int nelems, enum dma_data_direction direction)
{
J
Jens Axboe 已提交
410
	struct scatterlist *sg;
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418 419
	unsigned long flags;

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

	spin_lock_irqsave(&(tbl->it_lock), flags);

J
Jens Axboe 已提交
420
	sg = sglist;
L
Linus Torvalds 已提交
421 422
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
423
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
424

J
Jens Axboe 已提交
425
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
426
			break;
J
Jens Axboe 已提交
427
		npages = iommu_num_pages(dma_handle, sg->dma_length);
L
Linus Torvalds 已提交
428
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
429
		sg = sg_next(sg);
L
Linus Torvalds 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}

/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
446
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
447 448
{
	unsigned long sz;
449 450 451
	unsigned long start_index, end_index;
	unsigned long entries_per_4g;
	unsigned long index;
L
Linus Torvalds 已提交
452
	static int welcomed = 0;
453
	struct page *page;
L
Linus Torvalds 已提交
454 455 456 457 458 459 460

	/* Set aside 1/4 of the table for large allocations. */
	tbl->it_halfpoint = tbl->it_size * 3 / 4;

	/* number of bytes needed for the bitmap */
	sz = (tbl->it_size + 7) >> 3;

461 462
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
463
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
464
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
465 466 467 468 469 470
	memset(tbl->it_map, 0, sz);

	tbl->it_hint = 0;
	tbl->it_largehint = tbl->it_halfpoint;
	spin_lock_init(&tbl->it_lock);

471 472
#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
473
		unsigned long tceval;
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
		unsigned long tcecount = 0;

		/*
		 * Reserve the existing mappings left by the first kernel.
		 */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}
		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; ");
			printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#else
J
John Rose 已提交
499 500
	/* Clear the hardware table in case firmware left allocations in it */
	ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
501
#endif
J
John Rose 已提交
502

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	/*
	 * DMA cannot cross 4 GB boundary.  Mark last entry of each 4
	 * GB chunk as reserved.
	 */
	if (protect4gb) {
		entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;

		/* Mark the last bit before a 4GB boundary as used */
		start_index = tbl->it_offset | (entries_per_4g - 1);
		start_index -= tbl->it_offset;

		end_index = tbl->it_size;

		for (index = start_index; index < end_index - 1; index += entries_per_4g)
			__set_bit(index, tbl->it_map);
	}

L
Linus Torvalds 已提交
520 521 522 523 524 525 526 527 528 529 530
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

void iommu_free_table(struct device_node *dn)
{
531 532
	struct pci_dn *pdn = dn->data;
	struct iommu_table *tbl = pdn->iommu_table;
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
	unsigned long bitmap_sz, i;
	unsigned int order;

	if (!tbl || !tbl->it_map) {
		printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
				dn->full_name);
		return;
	}

	/* verify that table contains no entries */
	/* it_size is in entries, and we're examining 64 at a time */
	for (i = 0; i < (tbl->it_size/64); i++) {
		if (tbl->it_map[i] != 0) {
			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
				__FUNCTION__, dn->full_name);
			break;
		}
	}

	/* calculate bitmap size in bytes */
	bitmap_sz = (tbl->it_size + 7) / 8;

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
 * contiguous real kernel storage (not vmalloc).  The address of the buffer
 * passed here is the kernel (virtual) address of the buffer.  The buffer
 * need not be page aligned, the dma_addr_t returned will point to the same
 * byte within the page as vaddr.
 */
dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
570 571
		size_t size, unsigned long mask,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
572 573 574 575 576 577 578 579
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
	unsigned long uaddr;
	unsigned int npages;

	BUG_ON(direction == DMA_NONE);

	uaddr = (unsigned long)vaddr;
580
	npages = iommu_num_pages(uaddr, size);
L
Linus Torvalds 已提交
581 582

	if (tbl) {
583
		dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
584
					 mask >> IOMMU_PAGE_SHIFT, 0);
L
Linus Torvalds 已提交
585 586 587 588 589 590 591
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
				printk(KERN_INFO "iommu_alloc failed, "
						"tbl %p vaddr %p npages %d\n",
						tbl, vaddr, npages);
			}
		} else
592
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
593 594 595 596 597 598 599 600
	}

	return dma_handle;
}

void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction direction)
{
601 602
	unsigned int npages;

L
Linus Torvalds 已提交
603 604
	BUG_ON(direction == DMA_NONE);

605 606 607 608
	if (tbl) {
		npages = iommu_num_pages(dma_handle, size);
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
609 610 611 612 613 614 615
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
616
		dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
617 618 619
{
	void *ret = NULL;
	dma_addr_t mapping;
620 621
	unsigned int order;
	unsigned int nio_pages, io_order;
622
	struct page *page;
L
Linus Torvalds 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
641
	page = alloc_pages_node(node, flag, order);
642
	if (!page)
L
Linus Torvalds 已提交
643
		return NULL;
644
	ret = page_address(page);
L
Linus Torvalds 已提交
645 646 647
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
648 649 650 651
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
	mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
			      mask >> IOMMU_PAGE_SHIFT, io_order);
L
Linus Torvalds 已提交
652 653
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
654 655 656
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
657 658 659 660 661 662 663
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
664 665 666 667 668
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
669 670 671 672
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}