iommu.c 22.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
A
Akinobu Mita 已提交
33
#include <linux/bitmap.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
36
#include <linux/hash.h>
A
Anton Blanchard 已提交
37 38
#include <linux/fault-inject.h>
#include <linux/pci.h>
L
Linus Torvalds 已提交
39 40 41 42 43
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
44
#include <asm/kdump.h>
45
#include <asm/fadump.h>
A
Anton Blanchard 已提交
46
#include <asm/vio.h>
L
Linus Torvalds 已提交
47 48 49

#define DBG(...)

50
static int novmerge;
51

52 53
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

__setup("iommu=", setup_iommu);

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);

/*
 * We precalculate the hash to avoid doing it on every allocation.
 *
 * The hash is important to spread CPUs across all the pools. For example,
 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
 * with 4 pools all primary threads would map to the same pool.
 */
static int __init setup_iommu_pool_hash(void)
{
	unsigned int i;

	for_each_possible_cpu(i)
		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);

	return 0;
}
subsys_initcall(setup_iommu_pool_hash);

A
Anton Blanchard 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#ifdef CONFIG_FAIL_IOMMU

static DECLARE_FAULT_ATTR(fail_iommu);

static int __init setup_fail_iommu(char *str)
{
	return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);

static bool should_fail_iommu(struct device *dev)
{
	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
}

static int __init fail_iommu_debugfs(void)
{
	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
						       NULL, &fail_iommu);

	return IS_ERR(dir) ? PTR_ERR(dir) : 0;
}
late_initcall(fail_iommu_debugfs);

static ssize_t fail_iommu_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
}

static ssize_t fail_iommu_store(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	int i;

	if (count > 0 && sscanf(buf, "%d", &i) > 0)
		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;

	return count;
}

static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
		   fail_iommu_store);

static int fail_iommu_bus_notify(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct device *dev = data;

	if (action == BUS_NOTIFY_ADD_DEVICE) {
		if (device_create_file(dev, &dev_attr_fail_iommu))
			pr_warn("Unable to create IOMMU fault injection sysfs "
				"entries\n");
	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
		device_remove_file(dev, &dev_attr_fail_iommu);
	}

	return 0;
}

static struct notifier_block fail_iommu_bus_notifier = {
	.notifier_call = fail_iommu_bus_notify
};

static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
#endif

	return 0;
}
/*
 * Must execute after PCI and VIO subsystem have initialised but before
 * devices are probed.
 */
arch_initcall(fail_iommu_setup);
#else
static inline bool should_fail_iommu(struct device *dev)
{
	return false;
}
#endif

173 174
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
175 176
                                       unsigned long npages,
                                       unsigned long *handle,
177
                                       unsigned long mask,
L
Linus Torvalds 已提交
178 179
                                       unsigned int align_order)
{ 
180
	unsigned long n, end, start;
L
Linus Torvalds 已提交
181 182 183 184
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
185
	unsigned long boundary_size;
186
	unsigned long flags;
187 188
	unsigned int pool_nr;
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
189 190 191 192 193 194

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
195
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
196 197 198 199 200
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

A
Anton Blanchard 已提交
201 202 203
	if (should_fail_iommu(dev))
		return DMA_ERROR_CODE;

204 205 206 207 208
	/*
	 * We don't need to disable preemption here because any CPU can
	 * safely use any IOMMU pool.
	 */
	pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
209

210 211
	if (largealloc)
		pool = &(tbl->large_pool);
L
Linus Torvalds 已提交
212
	else
213
		pool = &(tbl->pools[pool_nr]);
L
Linus Torvalds 已提交
214

215 216 217
	spin_lock_irqsave(&(pool->lock), flags);

again:
218 219
	if ((pass == 0) && handle && *handle &&
	    (*handle >= pool->start) && (*handle < pool->end))
220 221 222
		start = *handle;
	else
		start = pool->hint;
L
Linus Torvalds 已提交
223

224
	limit = pool->end;
L
Linus Torvalds 已提交
225 226 227 228 229 230

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
231
		start = pool->start;
L
Linus Torvalds 已提交
232

233 234 235 236
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
237
		 * but on second pass, start at 0 in pool 0.
238
		 */
239
		if ((start & mask) >= limit || pass > 0) {
240
			spin_unlock(&(pool->lock));
241
			pool = &(tbl->pools[0]);
242
			spin_lock(&(pool->lock));
243 244
			start = pool->start;
		} else {
245
			start &= mask;
246
		}
247 248
	}

249 250 251 252 253 254
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
255

256 257 258 259
	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
260 261 262
		if (likely(pass == 0)) {
			/* First try the pool from the start */
			pool->hint = pool->start;
L
Linus Torvalds 已提交
263 264
			pass++;
			goto again;
265 266 267 268 269 270 271 272 273 274 275

		} else if (pass <= tbl->nr_pools) {
			/* Now try scanning all the other pools */
			spin_unlock(&(pool->lock));
			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
			pool = &tbl->pools[pool_nr];
			spin_lock(&(pool->lock));
			pool->hint = pool->start;
			pass++;
			goto again;

L
Linus Torvalds 已提交
276
		} else {
277 278
			/* Give up */
			spin_unlock_irqrestore(&(pool->lock), flags);
L
Linus Torvalds 已提交
279 280 281 282
			return DMA_ERROR_CODE;
		}
	}

283
	end = n + npages;
L
Linus Torvalds 已提交
284 285 286 287

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
288
		pool->hint = end;
L
Linus Torvalds 已提交
289 290
	} else {
		/* Overflow will be taken care of at the next allocation */
291
		pool->hint = (end + tbl->it_blocksize - 1) &
L
Linus Torvalds 已提交
292 293 294 295 296 297 298
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

299 300
	spin_unlock_irqrestore(&(pool->lock), flags);

L
Linus Torvalds 已提交
301 302 303
	return n;
}

304 305 306
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
307 308
			      unsigned long mask, unsigned int align_order,
			      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
309
{
310
	unsigned long entry;
L
Linus Torvalds 已提交
311
	dma_addr_t ret = DMA_ERROR_CODE;
312
	int build_fail;
313

314
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
315

316
	if (unlikely(entry == DMA_ERROR_CODE))
L
Linus Torvalds 已提交
317 318 319
		return DMA_ERROR_CODE;

	entry += tbl->it_offset;	/* Offset into real TCE table */
320
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
321 322

	/* Put the TCEs in the HW table */
323 324 325 326 327 328 329 330 331 332 333 334 335
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
	                              direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
		return DMA_ERROR_CODE;
	}
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

347 348
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
			     unsigned int npages)
L
Linus Torvalds 已提交
349 350 351
{
	unsigned long entry, free_entry;

352
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
353 354 355 356 357 358 359
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
360 361 362 363 364 365
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
366 367
			WARN_ON(1);
		}
368 369

		return false;
L
Linus Torvalds 已提交
370 371
	}

372 373 374
	return true;
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
static struct iommu_pool *get_pool(struct iommu_table *tbl,
				   unsigned long entry)
{
	struct iommu_pool *p;
	unsigned long largepool_start = tbl->large_pool.start;

	/* The large pool is the last pool at the top of the table */
	if (entry >= largepool_start) {
		p = &tbl->large_pool;
	} else {
		unsigned int pool_nr = entry / tbl->poolsize;

		BUG_ON(pool_nr > tbl->nr_pools);
		p = &tbl->pools[pool_nr];
	}

	return p;
}

394 395
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
			 unsigned int npages)
L
Linus Torvalds 已提交
396
{
397
	unsigned long entry, free_entry;
L
Linus Torvalds 已提交
398
	unsigned long flags;
399
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
400

401 402 403
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
	free_entry = entry - tbl->it_offset;

404 405
	pool = get_pool(tbl, free_entry);

406 407 408 409 410
	if (!iommu_free_check(tbl, dma_addr, npages))
		return;

	ppc_md.tce_free(tbl, entry, npages);

411
	spin_lock_irqsave(&(pool->lock), flags);
412
	bitmap_clear(tbl->it_map, free_entry, npages);
413
	spin_unlock_irqrestore(&(pool->lock), flags);
414 415 416 417 418 419
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	__iommu_free(tbl, dma_addr, npages);
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

429 430
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
431 432
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
433 434 435
{
	dma_addr_t dma_next = 0, dma_addr;
	struct scatterlist *s, *outs, *segstart;
436
	int outcount, incount, i, build_fail = 0;
437
	unsigned int align;
L
Linus Torvalds 已提交
438
	unsigned long handle;
439
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
440 441 442 443 444 445 446 447

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
448
	incount = nelems;
L
Linus Torvalds 已提交
449 450 451 452 453
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

454
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
455

456
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
457
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
458 459 460 461 462 463 464 465 466
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
467
		vaddr = (unsigned long) sg_virt(s);
468
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
469 470 471 472
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
473
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
474
					  mask >> IOMMU_PAGE_SHIFT, align);
L
Linus Torvalds 已提交
475 476 477 478 479 480

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
481 482 483
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
484 485 486 487 488
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
489 490
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
491

492
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
493 494 495
			    npages, entry, dma_addr);

		/* Insert into HW table */
496 497 498 499 500
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
501 502 503 504 505 506 507

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
508 509
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
510 511
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
512 513
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
514 515 516
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
517
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
540
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
541 542
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
543
	if (outcount < incount) {
J
Jens Axboe 已提交
544
		outs = sg_next(outs);
L
Linus Torvalds 已提交
545 546 547
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
548 549 550 551

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
552 553 554
	return outcount;

 failure:
J
Jens Axboe 已提交
555
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
556 557 558
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

559
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
560 561
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
562
			__iommu_free(tbl, vaddr, npages);
563 564
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
565
		}
J
Jens Axboe 已提交
566 567
		if (s == outs)
			break;
L
Linus Torvalds 已提交
568 569 570 571 572 573
	}
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
574 575
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
L
Linus Torvalds 已提交
576
{
J
Jens Axboe 已提交
577
	struct scatterlist *sg;
L
Linus Torvalds 已提交
578 579 580 581 582 583

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

J
Jens Axboe 已提交
584
	sg = sglist;
L
Linus Torvalds 已提交
585 586
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
587
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
588

J
Jens Axboe 已提交
589
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
590
			break;
591 592
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
593
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
594
		sg = sg_next(sg);
L
Linus Torvalds 已提交
595 596 597 598 599 600 601 602 603 604
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

605 606
static void iommu_table_clear(struct iommu_table *tbl)
{
607 608 609 610 611 612
	/*
	 * In case of firmware assisted dump system goes through clean
	 * reboot process at the time of system crash. Hence it's safe to
	 * clear the TCE entries if firmware assisted dump is active.
	 */
	if (!is_kdump_kernel() || is_fadump_active()) {
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		return;
	}

#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
646 647 648 649
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
650
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
651 652 653
{
	unsigned long sz;
	static int welcomed = 0;
654
	struct page *page;
655 656
	unsigned int i;
	struct iommu_pool *p;
L
Linus Torvalds 已提交
657 658 659 660

	/* number of bytes needed for the bitmap */
	sz = (tbl->it_size + 7) >> 3;

661 662
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
663
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
664
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
665 666
	memset(tbl->it_map, 0, sz);

667 668 669 670 671 672 673 674
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

675 676 677 678 679 680 681
	/* We only split the IOMMU table if we have 1GB or more of space */
	if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
		tbl->nr_pools = IOMMU_NR_POOLS;
	else
		tbl->nr_pools = 1;

	/* We reserve the top 1/4 of the table for large allocations */
682
	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
683

684
	for (i = 0; i < tbl->nr_pools; i++) {
685 686 687 688 689 690 691 692 693 694 695 696
		p = &tbl->pools[i];
		spin_lock_init(&(p->lock));
		p->start = tbl->poolsize * i;
		p->hint = p->start;
		p->end = p->start + tbl->poolsize;
	}

	p = &tbl->large_pool;
	spin_lock_init(&(p->lock));
	p->start = tbl->poolsize * i;
	p->hint = p->start;
	p->end = tbl->it_size;
L
Linus Torvalds 已提交
697

698
	iommu_table_clear(tbl);
J
John Rose 已提交
699

L
Linus Torvalds 已提交
700 701 702 703 704 705 706 707 708
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

709
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
L
Linus Torvalds 已提交
710 711 712 713 714
{
	unsigned long bitmap_sz, i;
	unsigned int order;

	if (!tbl || !tbl->it_map) {
715
		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
716
				node_name);
L
Linus Torvalds 已提交
717 718 719 720 721 722 723 724
		return;
	}

	/* verify that table contains no entries */
	/* it_size is in entries, and we're examining 64 at a time */
	for (i = 0; i < (tbl->it_size/64); i++) {
		if (tbl->it_map[i] != 0) {
			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
725
				__func__, node_name);
L
Linus Torvalds 已提交
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
			break;
		}
	}

	/* calculate bitmap size in bytes */
	bitmap_sz = (tbl->it_size + 7) / 8;

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
742 743 744
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
745
 */
746 747 748 749
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
			  struct dma_attrs *attrs)
L
Linus Torvalds 已提交
750 751
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
752
	void *vaddr;
L
Linus Torvalds 已提交
753
	unsigned long uaddr;
754
	unsigned int npages, align;
L
Linus Torvalds 已提交
755 756 757

	BUG_ON(direction == DMA_NONE);

758
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
759
	uaddr = (unsigned long)vaddr;
760
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
761 762

	if (tbl) {
763 764 765 766 767
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

768
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
769 770
					 mask >> IOMMU_PAGE_SHIFT, align,
					 attrs);
L
Linus Torvalds 已提交
771 772
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
773 774 775
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %p npages %d\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
776 777
			}
		} else
778
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
779 780 781 782 783
	}

	return dma_handle;
}

784 785 786
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
		      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
787
{
788 789
	unsigned int npages;

L
Linus Torvalds 已提交
790 791
	BUG_ON(direction == DMA_NONE);

792
	if (tbl) {
793
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
794 795
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
796 797 798 799 800 801
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
802 803 804
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
805 806 807
{
	void *ret = NULL;
	dma_addr_t mapping;
808 809
	unsigned int order;
	unsigned int nio_pages, io_order;
810
	struct page *page;
L
Linus Torvalds 已提交
811 812 813 814 815 816 817 818 819 820

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
821 822
		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
			 size);
L
Linus Torvalds 已提交
823 824 825 826 827 828 829
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
830
	page = alloc_pages_node(node, flag, order);
831
	if (!page)
L
Linus Torvalds 已提交
832
		return NULL;
833
	ret = page_address(page);
L
Linus Torvalds 已提交
834 835 836
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
837 838
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
839
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
840
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
L
Linus Torvalds 已提交
841 842
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
843 844 845
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
846 847 848 849 850 851 852
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
853 854 855 856 857
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
858 859 860 861
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}