iommu.c 18.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
A
Akinobu Mita 已提交
33
#include <linux/bitmap.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
L
Linus Torvalds 已提交
36 37 38 39 40
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
41
#include <asm/kdump.h>
42
#include <asm/fadump.h>
L
Linus Torvalds 已提交
43 44 45

#define DBG(...)

46
static int novmerge;
47

48 49
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57 58 59 60
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

__setup("iommu=", setup_iommu);

61 62
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
63 64
                                       unsigned long npages,
                                       unsigned long *handle,
65
                                       unsigned long mask,
L
Linus Torvalds 已提交
66 67
                                       unsigned int align_order)
{ 
68
	unsigned long n, end, start;
L
Linus Torvalds 已提交
69 70 71 72
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
73
	unsigned long boundary_size;
74
	unsigned long flags;
L
Linus Torvalds 已提交
75 76 77 78 79 80

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
81
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
82 83 84 85 86
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

87 88
	spin_lock_irqsave(&(tbl->it_lock), flags);

L
Linus Torvalds 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	if (handle && *handle)
		start = *handle;
	else
		start = largealloc ? tbl->it_largehint : tbl->it_hint;

	/* Use only half of the table for small allocs (15 pages or less) */
	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;

	if (largealloc && start < tbl->it_halfpoint)
		start = tbl->it_halfpoint;

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
		start = largealloc ? tbl->it_largehint : tbl->it_hint;
106

L
Linus Torvalds 已提交
107 108
 again:

109 110 111 112 113 114 115 116 117 118 119 120
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
		 * but on second pass, start at 0.
		 */
		if ((start & mask) >= limit || pass > 0)
			start = 0;
		else
			start &= mask;
	}

121 122 123 124 125 126
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
127

128 129 130 131
	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141
		if (likely(pass < 2)) {
			/* First failure, just rescan the half of the table.
			 * Second failure, rescan the other half of the table.
			 */
			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
			limit = pass ? tbl->it_size : limit;
			pass++;
			goto again;
		} else {
			/* Third failure, give up */
142
			spin_unlock_irqrestore(&(tbl->it_lock), flags);
L
Linus Torvalds 已提交
143 144 145 146
			return DMA_ERROR_CODE;
		}
	}

147
	end = n + npages;
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
		tbl->it_largehint = end;
	} else {
		/* Overflow will be taken care of at the next allocation */
		tbl->it_hint = (end + tbl->it_blocksize - 1) &
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

163
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
L
Linus Torvalds 已提交
164 165 166
	return n;
}

167 168 169
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
170 171
			      unsigned long mask, unsigned int align_order,
			      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
172
{
173
	unsigned long entry;
L
Linus Torvalds 已提交
174
	dma_addr_t ret = DMA_ERROR_CODE;
175
	int build_fail;
176

177
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
178

179
	if (unlikely(entry == DMA_ERROR_CODE))
L
Linus Torvalds 已提交
180 181 182
		return DMA_ERROR_CODE;

	entry += tbl->it_offset;	/* Offset into real TCE table */
183
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
184 185

	/* Put the TCEs in the HW table */
186 187 188 189 190 191 192 193 194 195 196 197 198
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
	                              direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
		return DMA_ERROR_CODE;
	}
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207 208 209

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

210 211
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
			     unsigned int npages)
L
Linus Torvalds 已提交
212 213 214
{
	unsigned long entry, free_entry;

215
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
216 217 218 219 220 221 222
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
223 224 225 226 227 228
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
229 230
			WARN_ON(1);
		}
231 232

		return false;
L
Linus Torvalds 已提交
233 234
	}

235 236 237 238 239
	return true;
}

static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
			 unsigned int npages)
L
Linus Torvalds 已提交
240
{
241
	unsigned long entry, free_entry;
L
Linus Torvalds 已提交
242 243
	unsigned long flags;

244 245 246 247 248 249 250 251
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
	free_entry = entry - tbl->it_offset;

	if (!iommu_free_check(tbl, dma_addr, npages))
		return;

	ppc_md.tce_free(tbl, entry, npages);

L
Linus Torvalds 已提交
252
	spin_lock_irqsave(&(tbl->it_lock), flags);
253
	bitmap_clear(tbl->it_map, free_entry, npages);
254
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
255 256 257 258 259 260
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	__iommu_free(tbl, dma_addr, npages);
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

270 271
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
272 273
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
274 275 276
{
	dma_addr_t dma_next = 0, dma_addr;
	struct scatterlist *s, *outs, *segstart;
277
	int outcount, incount, i, build_fail = 0;
278
	unsigned int align;
L
Linus Torvalds 已提交
279
	unsigned long handle;
280
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
281 282 283 284 285 286 287 288

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
289
	incount = nelems;
L
Linus Torvalds 已提交
290 291 292 293 294
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

295
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
296

297
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
298
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
299 300 301 302 303 304 305 306 307
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
308
		vaddr = (unsigned long) sg_virt(s);
309
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
310 311 312 313
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
314
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
315
					  mask >> IOMMU_PAGE_SHIFT, align);
L
Linus Torvalds 已提交
316 317 318 319 320 321

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
322 323 324
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
325 326 327 328 329
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
330 331
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
332

333
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
334 335 336
			    npages, entry, dma_addr);

		/* Insert into HW table */
337 338 339 340 341
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
342 343 344 345 346 347 348

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
349 350
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
351 352
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
353 354
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
355 356 357
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
358
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
381
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
382 383
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
384
	if (outcount < incount) {
J
Jens Axboe 已提交
385
		outs = sg_next(outs);
L
Linus Torvalds 已提交
386 387 388
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
389 390 391 392

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
393 394 395
	return outcount;

 failure:
J
Jens Axboe 已提交
396
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
397 398 399
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

400
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
401 402
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
403
			__iommu_free(tbl, vaddr, npages);
404 405
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
406
		}
J
Jens Axboe 已提交
407 408
		if (s == outs)
			break;
L
Linus Torvalds 已提交
409 410 411 412 413 414
	}
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
415 416
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
L
Linus Torvalds 已提交
417
{
J
Jens Axboe 已提交
418
	struct scatterlist *sg;
L
Linus Torvalds 已提交
419 420 421 422 423 424

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

J
Jens Axboe 已提交
425
	sg = sglist;
L
Linus Torvalds 已提交
426 427
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
428
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
429

J
Jens Axboe 已提交
430
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
431
			break;
432 433
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
434
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
435
		sg = sg_next(sg);
L
Linus Torvalds 已提交
436 437 438 439 440 441 442 443 444 445
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

446 447
static void iommu_table_clear(struct iommu_table *tbl)
{
448 449 450 451 452 453
	/*
	 * In case of firmware assisted dump system goes through clean
	 * reboot process at the time of system crash. Hence it's safe to
	 * clear the TCE entries if firmware assisted dump is active.
	 */
	if (!is_kdump_kernel() || is_fadump_active()) {
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		return;
	}

#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
487 488 489 490
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
491
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
492 493 494
{
	unsigned long sz;
	static int welcomed = 0;
495
	struct page *page;
L
Linus Torvalds 已提交
496 497 498 499 500 501 502

	/* Set aside 1/4 of the table for large allocations. */
	tbl->it_halfpoint = tbl->it_size * 3 / 4;

	/* number of bytes needed for the bitmap */
	sz = (tbl->it_size + 7) >> 3;

503 504
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
505
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
506
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
507 508
	memset(tbl->it_map, 0, sz);

509 510 511 512 513 514 515 516
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

L
Linus Torvalds 已提交
517 518 519 520
	tbl->it_hint = 0;
	tbl->it_largehint = tbl->it_halfpoint;
	spin_lock_init(&tbl->it_lock);

521
	iommu_table_clear(tbl);
J
John Rose 已提交
522

L
Linus Torvalds 已提交
523 524 525 526 527 528 529 530 531
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

532
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
L
Linus Torvalds 已提交
533 534 535 536 537
{
	unsigned long bitmap_sz, i;
	unsigned int order;

	if (!tbl || !tbl->it_map) {
538
		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
539
				node_name);
L
Linus Torvalds 已提交
540 541 542 543 544 545 546 547
		return;
	}

	/* verify that table contains no entries */
	/* it_size is in entries, and we're examining 64 at a time */
	for (i = 0; i < (tbl->it_size/64); i++) {
		if (tbl->it_map[i] != 0) {
			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
548
				__func__, node_name);
L
Linus Torvalds 已提交
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
			break;
		}
	}

	/* calculate bitmap size in bytes */
	bitmap_sz = (tbl->it_size + 7) / 8;

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
565 566 567
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
568
 */
569 570 571 572
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
			  struct dma_attrs *attrs)
L
Linus Torvalds 已提交
573 574
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
575
	void *vaddr;
L
Linus Torvalds 已提交
576
	unsigned long uaddr;
577
	unsigned int npages, align;
L
Linus Torvalds 已提交
578 579 580

	BUG_ON(direction == DMA_NONE);

581
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
582
	uaddr = (unsigned long)vaddr;
583
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
584 585

	if (tbl) {
586 587 588 589 590
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

591
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
592 593
					 mask >> IOMMU_PAGE_SHIFT, align,
					 attrs);
L
Linus Torvalds 已提交
594 595
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
596 597 598
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %p npages %d\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
599 600
			}
		} else
601
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
602 603 604 605 606
	}

	return dma_handle;
}

607 608 609
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
		      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
610
{
611 612
	unsigned int npages;

L
Linus Torvalds 已提交
613 614
	BUG_ON(direction == DMA_NONE);

615
	if (tbl) {
616
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
617 618
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
619 620 621 622 623 624
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
625 626 627
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
628 629 630
{
	void *ret = NULL;
	dma_addr_t mapping;
631 632
	unsigned int order;
	unsigned int nio_pages, io_order;
633
	struct page *page;
L
Linus Torvalds 已提交
634 635 636 637 638 639 640 641 642 643

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
644 645
		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
			 size);
L
Linus Torvalds 已提交
646 647 648 649 650 651 652
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
653
	page = alloc_pages_node(node, flag, order);
654
	if (!page)
L
Linus Torvalds 已提交
655
		return NULL;
656
	ret = page_address(page);
L
Linus Torvalds 已提交
657 658 659
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
660 661
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
662
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
663
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
L
Linus Torvalds 已提交
664 665
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
666 667 668
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
669 670 671 672 673 674 675
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
676 677 678 679 680
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
681 682 683 684
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}