iommu.c 30.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
A
Akinobu Mita 已提交
33
#include <linux/bitmap.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
36
#include <linux/hash.h>
A
Anton Blanchard 已提交
37 38
#include <linux/fault-inject.h>
#include <linux/pci.h>
39 40
#include <linux/iommu.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
46
#include <asm/kdump.h>
47
#include <asm/fadump.h>
A
Anton Blanchard 已提交
48
#include <asm/vio.h>
49
#include <asm/tce.h>
L
Linus Torvalds 已提交
50 51 52

#define DBG(...)

53
static int novmerge;
54

55 56
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65 66 67
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

__setup("iommu=", setup_iommu);

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);

/*
 * We precalculate the hash to avoid doing it on every allocation.
 *
 * The hash is important to spread CPUs across all the pools. For example,
 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
 * with 4 pools all primary threads would map to the same pool.
 */
static int __init setup_iommu_pool_hash(void)
{
	unsigned int i;

	for_each_possible_cpu(i)
		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);

	return 0;
}
subsys_initcall(setup_iommu_pool_hash);

A
Anton Blanchard 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
#ifdef CONFIG_FAIL_IOMMU

static DECLARE_FAULT_ATTR(fail_iommu);

static int __init setup_fail_iommu(char *str)
{
	return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);

static bool should_fail_iommu(struct device *dev)
{
	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
}

static int __init fail_iommu_debugfs(void)
{
	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
						       NULL, &fail_iommu);

108
	return PTR_ERR_OR_ZERO(dir);
A
Anton Blanchard 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
}
late_initcall(fail_iommu_debugfs);

static ssize_t fail_iommu_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
}

static ssize_t fail_iommu_store(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	int i;

	if (count > 0 && sscanf(buf, "%d", &i) > 0)
		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;

	return count;
}

130
static DEVICE_ATTR_RW(fail_iommu);
A
Anton Blanchard 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

static int fail_iommu_bus_notify(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct device *dev = data;

	if (action == BUS_NOTIFY_ADD_DEVICE) {
		if (device_create_file(dev, &dev_attr_fail_iommu))
			pr_warn("Unable to create IOMMU fault injection sysfs "
				"entries\n");
	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
		device_remove_file(dev, &dev_attr_fail_iommu);
	}

	return 0;
}

static struct notifier_block fail_iommu_bus_notifier = {
	.notifier_call = fail_iommu_bus_notify
};

static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
#endif

	return 0;
}
/*
 * Must execute after PCI and VIO subsystem have initialised but before
 * devices are probed.
 */
arch_initcall(fail_iommu_setup);
#else
static inline bool should_fail_iommu(struct device *dev)
{
	return false;
}
#endif

175 176
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
177 178
                                       unsigned long npages,
                                       unsigned long *handle,
179
                                       unsigned long mask,
L
Linus Torvalds 已提交
180 181
                                       unsigned int align_order)
{ 
182
	unsigned long n, end, start;
L
Linus Torvalds 已提交
183 184 185 186
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
187
	unsigned long boundary_size;
188
	unsigned long flags;
189 190
	unsigned int pool_nr;
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
191

192
	align_mask = (1ull << align_order) - 1;
L
Linus Torvalds 已提交
193 194 195 196

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
197
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
198 199
		if (printk_ratelimit())
			WARN_ON(1);
200
		return IOMMU_MAPPING_ERROR;
L
Linus Torvalds 已提交
201 202
	}

A
Anton Blanchard 已提交
203
	if (should_fail_iommu(dev))
204
		return IOMMU_MAPPING_ERROR;
A
Anton Blanchard 已提交
205

206 207 208 209
	/*
	 * We don't need to disable preemption here because any CPU can
	 * safely use any IOMMU pool.
	 */
210
	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
211

212 213
	if (largealloc)
		pool = &(tbl->large_pool);
L
Linus Torvalds 已提交
214
	else
215
		pool = &(tbl->pools[pool_nr]);
L
Linus Torvalds 已提交
216

217 218 219
	spin_lock_irqsave(&(pool->lock), flags);

again:
220 221
	if ((pass == 0) && handle && *handle &&
	    (*handle >= pool->start) && (*handle < pool->end))
222 223 224
		start = *handle;
	else
		start = pool->hint;
L
Linus Torvalds 已提交
225

226
	limit = pool->end;
L
Linus Torvalds 已提交
227 228 229 230 231 232

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
233
		start = pool->start;
L
Linus Torvalds 已提交
234

235 236 237 238
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
239
		 * but on second pass, start at 0 in pool 0.
240
		 */
241
		if ((start & mask) >= limit || pass > 0) {
242
			spin_unlock(&(pool->lock));
243
			pool = &(tbl->pools[0]);
244
			spin_lock(&(pool->lock));
245 246
			start = pool->start;
		} else {
247
			start &= mask;
248
		}
249 250
	}

251 252
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
253
				      1 << tbl->it_page_shift);
254
	else
255
		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
256
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
257

258 259
	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
			     boundary_size >> tbl->it_page_shift, align_mask);
260
	if (n == -1) {
261 262 263
		if (likely(pass == 0)) {
			/* First try the pool from the start */
			pool->hint = pool->start;
L
Linus Torvalds 已提交
264 265
			pass++;
			goto again;
266 267 268 269 270 271 272 273 274 275 276

		} else if (pass <= tbl->nr_pools) {
			/* Now try scanning all the other pools */
			spin_unlock(&(pool->lock));
			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
			pool = &tbl->pools[pool_nr];
			spin_lock(&(pool->lock));
			pool->hint = pool->start;
			pass++;
			goto again;

L
Linus Torvalds 已提交
277
		} else {
278 279
			/* Give up */
			spin_unlock_irqrestore(&(pool->lock), flags);
280
			return IOMMU_MAPPING_ERROR;
L
Linus Torvalds 已提交
281 282 283
		}
	}

284
	end = n + npages;
L
Linus Torvalds 已提交
285 286 287 288

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
289
		pool->hint = end;
L
Linus Torvalds 已提交
290 291
	} else {
		/* Overflow will be taken care of at the next allocation */
292
		pool->hint = (end + tbl->it_blocksize - 1) &
L
Linus Torvalds 已提交
293 294 295 296 297 298 299
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

300 301
	spin_unlock_irqrestore(&(pool->lock), flags);

L
Linus Torvalds 已提交
302 303 304
	return n;
}

305 306 307
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
308
			      unsigned long mask, unsigned int align_order,
309
			      unsigned long attrs)
L
Linus Torvalds 已提交
310
{
311
	unsigned long entry;
312
	dma_addr_t ret = IOMMU_MAPPING_ERROR;
313
	int build_fail;
314

315
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
316

317 318
	if (unlikely(entry == IOMMU_MAPPING_ERROR))
		return IOMMU_MAPPING_ERROR;
L
Linus Torvalds 已提交
319 320

	entry += tbl->it_offset;	/* Offset into real TCE table */
321
	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
L
Linus Torvalds 已提交
322 323

	/* Put the TCEs in the HW table */
324
	build_fail = tbl->it_ops->set(tbl, entry, npages,
325 326
				      (unsigned long)page &
				      IOMMU_PAGE_MASK(tbl), direction, attrs);
327

328
	/* tbl->it_ops->set() only returns non-zero for transient errors.
329
	 * Clean up the table bitmap in this case and return
330
	 * IOMMU_MAPPING_ERROR. For all other errors the functionality is
331 332 333 334
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
335
		return IOMMU_MAPPING_ERROR;
336
	}
L
Linus Torvalds 已提交
337 338

	/* Flush/invalidate TLB caches if necessary */
339 340
	if (tbl->it_ops->flush)
		tbl->it_ops->flush(tbl);
L
Linus Torvalds 已提交
341 342 343 344 345 346 347

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

348 349
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
			     unsigned int npages)
L
Linus Torvalds 已提交
350 351 352
{
	unsigned long entry, free_entry;

353
	entry = dma_addr >> tbl->it_page_shift;
L
Linus Torvalds 已提交
354 355 356 357 358 359 360
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
361 362 363 364 365 366
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
367 368
			WARN_ON(1);
		}
369 370

		return false;
L
Linus Torvalds 已提交
371 372
	}

373 374 375
	return true;
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
static struct iommu_pool *get_pool(struct iommu_table *tbl,
				   unsigned long entry)
{
	struct iommu_pool *p;
	unsigned long largepool_start = tbl->large_pool.start;

	/* The large pool is the last pool at the top of the table */
	if (entry >= largepool_start) {
		p = &tbl->large_pool;
	} else {
		unsigned int pool_nr = entry / tbl->poolsize;

		BUG_ON(pool_nr > tbl->nr_pools);
		p = &tbl->pools[pool_nr];
	}

	return p;
}

395 396
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
			 unsigned int npages)
L
Linus Torvalds 已提交
397
{
398
	unsigned long entry, free_entry;
L
Linus Torvalds 已提交
399
	unsigned long flags;
400
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
401

402
	entry = dma_addr >> tbl->it_page_shift;
403 404
	free_entry = entry - tbl->it_offset;

405 406
	pool = get_pool(tbl, free_entry);

407 408 409
	if (!iommu_free_check(tbl, dma_addr, npages))
		return;

410
	tbl->it_ops->clear(tbl, entry, npages);
411

412
	spin_lock_irqsave(&(pool->lock), flags);
413
	bitmap_clear(tbl->it_map, free_entry, npages);
414
	spin_unlock_irqrestore(&(pool->lock), flags);
415 416 417 418 419 420
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	__iommu_free(tbl, dma_addr, npages);
L
Linus Torvalds 已提交
421 422 423 424 425

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
426 427
	if (tbl->it_ops->flush)
		tbl->it_ops->flush(tbl);
L
Linus Torvalds 已提交
428 429
}

430 431 432
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		     struct scatterlist *sglist, int nelems,
		     unsigned long mask, enum dma_data_direction direction,
433
		     unsigned long attrs)
L
Linus Torvalds 已提交
434 435 436
{
	dma_addr_t dma_next = 0, dma_addr;
	struct scatterlist *s, *outs, *segstart;
437
	int outcount, incount, i, build_fail = 0;
438
	unsigned int align;
L
Linus Torvalds 已提交
439
	unsigned long handle;
440
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
441 442 443 444 445 446 447 448

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
449
	incount = nelems;
L
Linus Torvalds 已提交
450 451 452 453 454
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

455
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
456

457
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
458
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
459 460 461 462 463 464 465 466 467
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
468
		vaddr = (unsigned long) sg_virt(s);
469
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
470
		align = 0;
471
		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
472
		    (vaddr & ~PAGE_MASK) == 0)
473
			align = PAGE_SHIFT - tbl->it_page_shift;
474
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
475
					  mask >> tbl->it_page_shift, align);
L
Linus Torvalds 已提交
476 477 478 479

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
480
		if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
481 482
			if (!(attrs & DMA_ATTR_NO_WARN) &&
			    printk_ratelimit())
483 484 485
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
486 487 488 489 490
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
491 492
		dma_addr = entry << tbl->it_page_shift;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
L
Linus Torvalds 已提交
493

494
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
495 496 497
			    npages, entry, dma_addr);

		/* Insert into HW table */
498
		build_fail = tbl->it_ops->set(tbl, entry, npages,
499 500
					      vaddr & IOMMU_PAGE_MASK(tbl),
					      direction, attrs);
501 502
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
503 504 505 506 507 508 509

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
510 511
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
512 513
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
514 515
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
516 517 518
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
519
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
537 538
	if (tbl->it_ops->flush)
		tbl->it_ops->flush(tbl);
L
Linus Torvalds 已提交
539 540 541

	DBG("mapped %d elements:\n", outcount);

542
	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
543 544
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
545
	if (outcount < incount) {
J
Jens Axboe 已提交
546
		outs = sg_next(outs);
547
		outs->dma_address = IOMMU_MAPPING_ERROR;
L
Linus Torvalds 已提交
548 549
		outs->dma_length = 0;
	}
550 551 552 553

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
554 555 556
	return outcount;

 failure:
J
Jens Axboe 已提交
557
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
558 559 560
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

561
			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
562
			npages = iommu_num_pages(s->dma_address, s->dma_length,
563
						 IOMMU_PAGE_SIZE(tbl));
564
			__iommu_free(tbl, vaddr, npages);
565
			s->dma_address = IOMMU_MAPPING_ERROR;
566
			s->dma_length = 0;
L
Linus Torvalds 已提交
567
		}
J
Jens Axboe 已提交
568 569
		if (s == outs)
			break;
L
Linus Torvalds 已提交
570 571 572 573 574
	}
	return 0;
}


575 576
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
			int nelems, enum dma_data_direction direction,
577
			unsigned long attrs)
L
Linus Torvalds 已提交
578
{
J
Jens Axboe 已提交
579
	struct scatterlist *sg;
L
Linus Torvalds 已提交
580 581 582 583 584 585

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

J
Jens Axboe 已提交
586
	sg = sglist;
L
Linus Torvalds 已提交
587 588
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
589
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
590

J
Jens Axboe 已提交
591
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
592
			break;
593
		npages = iommu_num_pages(dma_handle, sg->dma_length,
594
					 IOMMU_PAGE_SIZE(tbl));
595
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
596
		sg = sg_next(sg);
L
Linus Torvalds 已提交
597 598 599 600 601 602
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
603 604
	if (tbl->it_ops->flush)
		tbl->it_ops->flush(tbl);
L
Linus Torvalds 已提交
605 606
}

607 608
static void iommu_table_clear(struct iommu_table *tbl)
{
609 610 611 612 613 614
	/*
	 * In case of firmware assisted dump system goes through clean
	 * reboot process at the time of system crash. Hence it's safe to
	 * clear the TCE entries if firmware assisted dump is active.
	 */
	if (!is_kdump_kernel() || is_fadump_active()) {
615
		/* Clear the table in case firmware left allocations in it */
616
		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
617 618 619 620
		return;
	}

#ifdef CONFIG_CRASH_DUMP
621
	if (tbl->it_ops->get) {
622 623 624 625
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
626
			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
648 649 650 651
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
652
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
653 654 655
{
	unsigned long sz;
	static int welcomed = 0;
656
	struct page *page;
657 658
	unsigned int i;
	struct iommu_pool *p;
L
Linus Torvalds 已提交
659

660 661
	BUG_ON(!tbl->it_ops);

L
Linus Torvalds 已提交
662
	/* number of bytes needed for the bitmap */
A
Akinobu Mita 已提交
663
	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
L
Linus Torvalds 已提交
664

665
	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
666
	if (!page)
L
Linus Torvalds 已提交
667
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
668
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
669 670
	memset(tbl->it_map, 0, sz);

671 672 673 674 675 676 677 678
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

679
	/* We only split the IOMMU table if we have 1GB or more of space */
680
	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
681 682 683 684 685
		tbl->nr_pools = IOMMU_NR_POOLS;
	else
		tbl->nr_pools = 1;

	/* We reserve the top 1/4 of the table for large allocations */
686
	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
687

688
	for (i = 0; i < tbl->nr_pools; i++) {
689 690 691 692 693 694 695 696 697 698 699 700
		p = &tbl->pools[i];
		spin_lock_init(&(p->lock));
		p->start = tbl->poolsize * i;
		p->hint = p->start;
		p->end = p->start + tbl->poolsize;
	}

	p = &tbl->large_pool;
	spin_lock_init(&(p->lock));
	p->start = tbl->poolsize * i;
	p->hint = p->start;
	p->end = tbl->it_size;
L
Linus Torvalds 已提交
701

702
	iommu_table_clear(tbl);
J
John Rose 已提交
703

L
Linus Torvalds 已提交
704 705 706 707 708 709 710 711 712
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

713
static void iommu_table_free(struct kref *kref)
L
Linus Torvalds 已提交
714
{
A
Akinobu Mita 已提交
715
	unsigned long bitmap_sz;
L
Linus Torvalds 已提交
716
	unsigned int order;
717
	struct iommu_table *tbl;
L
Linus Torvalds 已提交
718

719
	tbl = container_of(kref, struct iommu_table, it_kref);
720

721 722 723
	if (tbl->it_ops->free)
		tbl->it_ops->free(tbl);

724 725
	if (!tbl->it_map) {
		kfree(tbl);
L
Linus Torvalds 已提交
726 727 728
		return;
	}

729 730 731 732 733 734 735
	/*
	 * In case we have reserved the first bit, we should not emit
	 * the warning below.
	 */
	if (tbl->it_offset == 0)
		clear_bit(0, tbl->it_map);

L
Linus Torvalds 已提交
736
	/* verify that table contains no entries */
A
Akinobu Mita 已提交
737
	if (!bitmap_empty(tbl->it_map, tbl->it_size))
738
		pr_warn("%s: Unexpected TCEs\n", __func__);
L
Linus Torvalds 已提交
739 740

	/* calculate bitmap size in bytes */
A
Akinobu Mita 已提交
741
	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
L
Linus Torvalds 已提交
742 743 744 745 746 747 748 749

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767

struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
{
	if (kref_get_unless_zero(&tbl->it_kref))
		return tbl;

	return NULL;
}
EXPORT_SYMBOL_GPL(iommu_tce_table_get);

int iommu_tce_table_put(struct iommu_table *tbl)
{
	if (WARN_ON(!tbl))
		return 0;

	return kref_put(&tbl->it_kref, iommu_table_free);
}
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
L
Linus Torvalds 已提交
768 769

/* Creates TCEs for a user provided buffer.  The user buffer must be
770 771 772
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
773
 */
774 775 776
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
777
			  unsigned long attrs)
L
Linus Torvalds 已提交
778
{
779
	dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
780
	void *vaddr;
L
Linus Torvalds 已提交
781
	unsigned long uaddr;
782
	unsigned int npages, align;
L
Linus Torvalds 已提交
783 784 785

	BUG_ON(direction == DMA_NONE);

786
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
787
	uaddr = (unsigned long)vaddr;
788
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
L
Linus Torvalds 已提交
789 790

	if (tbl) {
791
		align = 0;
792
		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
793
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
794
			align = PAGE_SHIFT - tbl->it_page_shift;
795

796
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
797
					 mask >> tbl->it_page_shift, align,
798
					 attrs);
799
		if (dma_handle == IOMMU_MAPPING_ERROR) {
800 801
			if (!(attrs & DMA_ATTR_NO_WARN) &&
			    printk_ratelimit())  {
802 803 804
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %p npages %d\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
805 806
			}
		} else
807
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
L
Linus Torvalds 已提交
808 809 810 811 812
	}

	return dma_handle;
}

813 814
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
815
		      unsigned long attrs)
L
Linus Torvalds 已提交
816
{
817 818
	unsigned int npages;

L
Linus Torvalds 已提交
819 820
	BUG_ON(direction == DMA_NONE);

821
	if (tbl) {
822 823
		npages = iommu_num_pages(dma_handle, size,
					 IOMMU_PAGE_SIZE(tbl));
824 825
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
826 827 828 829 830 831
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
832 833 834
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
835 836 837
{
	void *ret = NULL;
	dma_addr_t mapping;
838 839
	unsigned int order;
	unsigned int nio_pages, io_order;
840
	struct page *page;
L
Linus Torvalds 已提交
841 842 843 844 845 846 847 848 849 850

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
851 852
		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
			 size);
L
Linus Torvalds 已提交
853 854 855 856 857 858 859
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
860
	page = alloc_pages_node(node, flag, order);
861
	if (!page)
L
Linus Torvalds 已提交
862
		return NULL;
863
	ret = page_address(page);
L
Linus Torvalds 已提交
864 865 866
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
867 868
	nio_pages = size >> tbl->it_page_shift;
	io_order = get_iommu_order(size, tbl);
869
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
870
			      mask >> tbl->it_page_shift, io_order, 0);
871
	if (mapping == IOMMU_MAPPING_ERROR) {
L
Linus Torvalds 已提交
872
		free_pages((unsigned long)ret, order);
873 874 875
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
876 877 878 879 880 881 882
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
883 884 885
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
886
		nio_pages = size >> tbl->it_page_shift;
887
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
888 889 890 891
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}
892

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_BIDIRECTIONAL:
		return TCE_PCI_READ | TCE_PCI_WRITE;
	case DMA_FROM_DEVICE:
		return TCE_PCI_WRITE;
	case DMA_TO_DEVICE:
		return TCE_PCI_READ;
	default:
		return 0;
	}
}
EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);

908 909 910 911 912 913
#ifdef CONFIG_IOMMU_API
/*
 * SPAPR TCE API
 */
static void group_release(void *iommu_data)
{
914 915 916
	struct iommu_table_group *table_group = iommu_data;

	table_group->group = NULL;
917 918
}

919
void iommu_register_group(struct iommu_table_group *table_group,
920 921 922 923 924 925 926 927 928 929 930
		int pci_domain_number, unsigned long pe_num)
{
	struct iommu_group *grp;
	char *name;

	grp = iommu_group_alloc();
	if (IS_ERR(grp)) {
		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
				PTR_ERR(grp));
		return;
	}
931 932
	table_group->group = grp;
	iommu_group_set_iommudata(grp, table_group, group_release);
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
			pci_domain_number, pe_num);
	if (!name)
		return;
	iommu_group_set_name(grp, name);
	kfree(name);
}

enum dma_data_direction iommu_tce_direction(unsigned long tce)
{
	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
		return DMA_BIDIRECTIONAL;
	else if (tce & TCE_PCI_READ)
		return DMA_TO_DEVICE;
	else if (tce & TCE_PCI_WRITE)
		return DMA_FROM_DEVICE;
	else
		return DMA_NONE;
}
EXPORT_SYMBOL_GPL(iommu_tce_direction);

void iommu_flush_tce(struct iommu_table *tbl)
{
	/* Flush/invalidate TLB caches if necessary */
957 958
	if (tbl->it_ops->flush)
		tbl->it_ops->flush(tbl);
959 960 961 962 963 964

	/* Make sure updates are seen by hardware */
	mb();
}
EXPORT_SYMBOL_GPL(iommu_flush_tce);

965 966 967
int iommu_tce_check_ioba(unsigned long page_shift,
		unsigned long offset, unsigned long size,
		unsigned long ioba, unsigned long npages)
968
{
969
	unsigned long mask = (1UL << page_shift) - 1;
970

971
	if (ioba & mask)
972 973
		return -EINVAL;

974 975
	ioba >>= page_shift;
	if (ioba < offset)
976 977
		return -EINVAL;

978
	if ((ioba + 1) > (offset + size))
979 980 981 982
		return -EINVAL;

	return 0;
}
983
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
984

985
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
986
{
987
	unsigned long mask = (1UL << page_shift) - 1;
988

989
	if (gpa & mask)
990 991 992 993
		return -EINVAL;

	return 0;
}
994
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
995

996 997
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
		unsigned long *hpa, enum dma_data_direction *direction)
998
{
999
	long ret;
1000

1001
	ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
1002

1003 1004 1005
	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
			(*direction == DMA_BIDIRECTIONAL)))
		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1006 1007 1008

	/* if (unlikely(ret))
		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1009
			__func__, hwaddr, entry << tbl->it_page_shift,
1010 1011 1012 1013
				hwaddr, ret); */

	return ret;
}
1014
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1015

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
#ifdef CONFIG_PPC_BOOK3S_64
long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
		unsigned long *hpa, enum dma_data_direction *direction)
{
	long ret;

	ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);

	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
			(*direction == DMA_BIDIRECTIONAL))) {
		struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);

		if (likely(pg)) {
			SetPageDirty(pg);
		} else {
			tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
			ret = -EFAULT;
		}
	}

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
#endif

1041 1042
int iommu_take_ownership(struct iommu_table *tbl)
{
1043 1044 1045
	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
	int ret = 0;

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	/*
	 * VFIO does not control TCE entries allocation and the guest
	 * can write new TCEs on top of existing ones so iommu_tce_build()
	 * must be able to release old pages. This functionality
	 * requires exchange() callback defined so if it is not
	 * implemented, we disallow taking ownership over the table.
	 */
	if (!tbl->it_ops->exchange)
		return -EINVAL;

1056 1057 1058
	spin_lock_irqsave(&tbl->large_pool.lock, flags);
	for (i = 0; i < tbl->nr_pools; i++)
		spin_lock(&tbl->pools[i].lock);
1059 1060 1061 1062 1063 1064

	if (tbl->it_offset == 0)
		clear_bit(0, tbl->it_map);

	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
		pr_err("iommu_tce: it_map is not empty");
1065 1066 1067 1068 1069 1070
		ret = -EBUSY;
		/* Restore bit#0 set by iommu_init_table() */
		if (tbl->it_offset == 0)
			set_bit(0, tbl->it_map);
	} else {
		memset(tbl->it_map, 0xff, sz);
1071 1072
	}

1073 1074 1075
	for (i = 0; i < tbl->nr_pools; i++)
		spin_unlock(&tbl->pools[i].lock);
	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1076

1077
	return ret;
1078 1079 1080 1081 1082
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);

void iommu_release_ownership(struct iommu_table *tbl)
{
1083 1084 1085 1086 1087
	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;

	spin_lock_irqsave(&tbl->large_pool.lock, flags);
	for (i = 0; i < tbl->nr_pools; i++)
		spin_lock(&tbl->pools[i].lock);
1088 1089 1090 1091 1092 1093

	memset(tbl->it_map, 0, sz);

	/* Restore bit#0 set by iommu_init_table() */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);
1094 1095 1096 1097

	for (i = 0; i < tbl->nr_pools; i++)
		spin_unlock(&tbl->pools[i].lock);
	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1098 1099 1100
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);

1101
int iommu_add_device(struct device *dev)
1102 1103
{
	struct iommu_table *tbl;
1104
	struct iommu_table_group_link *tgl;
1105

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	/*
	 * The sysfs entries should be populated before
	 * binding IOMMU group. If sysfs entries isn't
	 * ready, we simply bail.
	 */
	if (!device_is_registered(dev))
		return -ENOENT;

	if (dev->iommu_group) {
		pr_debug("%s: Skipping device %s with iommu group %d\n",
			 __func__, dev_name(dev),
			 iommu_group_id(dev->iommu_group));
1118 1119 1120 1121
		return -EBUSY;
	}

	tbl = get_iommu_table_base(dev);
1122
	if (!tbl) {
1123 1124
		pr_debug("%s: Skipping device %s with no tbl\n",
			 __func__, dev_name(dev));
1125 1126 1127
		return 0;
	}

1128 1129 1130 1131 1132 1133 1134
	tgl = list_first_entry_or_null(&tbl->it_group_list,
			struct iommu_table_group_link, next);
	if (!tgl) {
		pr_debug("%s: Skipping device %s with no group\n",
			 __func__, dev_name(dev));
		return 0;
	}
1135 1136
	pr_debug("%s: Adding %s to iommu group %d\n",
		 __func__, dev_name(dev),
1137
		 iommu_group_id(tgl->table_group->group));
1138

1139
	if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1140 1141 1142
		pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
		       __func__, IOMMU_PAGE_SIZE(tbl),
		       PAGE_SIZE, dev_name(dev));
1143 1144 1145
		return -EINVAL;
	}

1146
	return iommu_group_add_device(tgl->table_group->group, dev);
1147
}
1148
EXPORT_SYMBOL_GPL(iommu_add_device);
1149

1150
void iommu_del_device(struct device *dev)
1151
{
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	/*
	 * Some devices might not have IOMMU table and group
	 * and we needn't detach them from the associated
	 * IOMMU groups
	 */
	if (!dev->iommu_group) {
		pr_debug("iommu_tce: skipping device %s with no tbl\n",
			 dev_name(dev));
		return;
	}

1163 1164
	iommu_group_remove_device(dev);
}
1165
EXPORT_SYMBOL_GPL(iommu_del_device);
1166

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
static int tce_iommu_bus_notifier(struct notifier_block *nb,
                unsigned long action, void *data)
{
        struct device *dev = data;

        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
                return iommu_add_device(dev);
        case BUS_NOTIFY_DEL_DEVICE:
                if (dev->iommu_group)
                        iommu_del_device(dev);
                return 0;
        default:
                return 0;
        }
}

static struct notifier_block tce_iommu_bus_nb = {
        .notifier_call = tce_iommu_bus_notifier,
};

int __init tce_iommu_bus_notifier_init(void)
{
        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
        return 0;
}
1193
#endif /* CONFIG_IOMMU_API */