iommu.c 29.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 * 
 * Rewrite, cleanup, new allocation schemes, virtual merging: 
 * Copyright (C) 2004 Olof Johansson, IBM Corporation
 *               and  Ben. Herrenschmidt, IBM Corporation
 *
 * Dynamic DMA mapping support, bus-independent parts.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
A
Akinobu Mita 已提交
33
#include <linux/bitmap.h>
34
#include <linux/iommu-helper.h>
M
Milton Miller 已提交
35
#include <linux/crash_dump.h>
36
#include <linux/hash.h>
A
Anton Blanchard 已提交
37 38
#include <linux/fault-inject.h>
#include <linux/pci.h>
39 40
#include <linux/iommu.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
46
#include <asm/kdump.h>
47
#include <asm/fadump.h>
A
Anton Blanchard 已提交
48
#include <asm/vio.h>
49
#include <asm/tce.h>
L
Linus Torvalds 已提交
50 51 52

#define DBG(...)

53
static int novmerge;
54

55 56
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);

L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65 66 67
static int __init setup_iommu(char *str)
{
	if (!strcmp(str, "novmerge"))
		novmerge = 1;
	else if (!strcmp(str, "vmerge"))
		novmerge = 0;
	return 1;
}

__setup("iommu=", setup_iommu);

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);

/*
 * We precalculate the hash to avoid doing it on every allocation.
 *
 * The hash is important to spread CPUs across all the pools. For example,
 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
 * with 4 pools all primary threads would map to the same pool.
 */
static int __init setup_iommu_pool_hash(void)
{
	unsigned int i;

	for_each_possible_cpu(i)
		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);

	return 0;
}
subsys_initcall(setup_iommu_pool_hash);

A
Anton Blanchard 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
#ifdef CONFIG_FAIL_IOMMU

static DECLARE_FAULT_ATTR(fail_iommu);

static int __init setup_fail_iommu(char *str)
{
	return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);

static bool should_fail_iommu(struct device *dev)
{
	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
}

static int __init fail_iommu_debugfs(void)
{
	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
						       NULL, &fail_iommu);

108
	return PTR_ERR_OR_ZERO(dir);
A
Anton Blanchard 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
}
late_initcall(fail_iommu_debugfs);

static ssize_t fail_iommu_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
}

static ssize_t fail_iommu_store(struct device *dev,
				struct device_attribute *attr, const char *buf,
				size_t count)
{
	int i;

	if (count > 0 && sscanf(buf, "%d", &i) > 0)
		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;

	return count;
}

static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
		   fail_iommu_store);

static int fail_iommu_bus_notify(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct device *dev = data;

	if (action == BUS_NOTIFY_ADD_DEVICE) {
		if (device_create_file(dev, &dev_attr_fail_iommu))
			pr_warn("Unable to create IOMMU fault injection sysfs "
				"entries\n");
	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
		device_remove_file(dev, &dev_attr_fail_iommu);
	}

	return 0;
}

static struct notifier_block fail_iommu_bus_notifier = {
	.notifier_call = fail_iommu_bus_notify
};

static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
#endif

	return 0;
}
/*
 * Must execute after PCI and VIO subsystem have initialised but before
 * devices are probed.
 */
arch_initcall(fail_iommu_setup);
#else
static inline bool should_fail_iommu(struct device *dev)
{
	return false;
}
#endif

176 177
static unsigned long iommu_range_alloc(struct device *dev,
				       struct iommu_table *tbl,
L
Linus Torvalds 已提交
178 179
                                       unsigned long npages,
                                       unsigned long *handle,
180
                                       unsigned long mask,
L
Linus Torvalds 已提交
181 182
                                       unsigned int align_order)
{ 
183
	unsigned long n, end, start;
L
Linus Torvalds 已提交
184 185 186 187
	unsigned long limit;
	int largealloc = npages > 15;
	int pass = 0;
	unsigned long align_mask;
188
	unsigned long boundary_size;
189
	unsigned long flags;
190 191
	unsigned int pool_nr;
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
192 193 194 195 196 197

	align_mask = 0xffffffffffffffffl >> (64 - align_order);

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
N
Nick Piggin 已提交
198
	if (unlikely(npages == 0)) {
L
Linus Torvalds 已提交
199 200 201 202 203
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

A
Anton Blanchard 已提交
204 205 206
	if (should_fail_iommu(dev))
		return DMA_ERROR_CODE;

207 208 209 210 211
	/*
	 * We don't need to disable preemption here because any CPU can
	 * safely use any IOMMU pool.
	 */
	pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
212

213 214
	if (largealloc)
		pool = &(tbl->large_pool);
L
Linus Torvalds 已提交
215
	else
216
		pool = &(tbl->pools[pool_nr]);
L
Linus Torvalds 已提交
217

218 219 220
	spin_lock_irqsave(&(pool->lock), flags);

again:
221 222
	if ((pass == 0) && handle && *handle &&
	    (*handle >= pool->start) && (*handle < pool->end))
223 224 225
		start = *handle;
	else
		start = pool->hint;
L
Linus Torvalds 已提交
226

227
	limit = pool->end;
L
Linus Torvalds 已提交
228 229 230 231 232 233

	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the initial start.
	 */
	if (start >= limit)
234
		start = pool->start;
L
Linus Torvalds 已提交
235

236 237 238 239
	if (limit + tbl->it_offset > mask) {
		limit = mask - tbl->it_offset + 1;
		/* If we're constrained on address range, first try
		 * at the masked hint to avoid O(n) search complexity,
240
		 * but on second pass, start at 0 in pool 0.
241
		 */
242
		if ((start & mask) >= limit || pass > 0) {
243
			spin_unlock(&(pool->lock));
244
			pool = &(tbl->pools[0]);
245
			spin_lock(&(pool->lock));
246 247
			start = pool->start;
		} else {
248
			start &= mask;
249
		}
250 251
	}

252 253 254 255 256 257
	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IOMMU_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
L
Linus Torvalds 已提交
258

259 260 261 262
	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
			     align_mask);
	if (n == -1) {
263 264 265
		if (likely(pass == 0)) {
			/* First try the pool from the start */
			pool->hint = pool->start;
L
Linus Torvalds 已提交
266 267
			pass++;
			goto again;
268 269 270 271 272 273 274 275 276 277 278

		} else if (pass <= tbl->nr_pools) {
			/* Now try scanning all the other pools */
			spin_unlock(&(pool->lock));
			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
			pool = &tbl->pools[pool_nr];
			spin_lock(&(pool->lock));
			pool->hint = pool->start;
			pass++;
			goto again;

L
Linus Torvalds 已提交
279
		} else {
280 281
			/* Give up */
			spin_unlock_irqrestore(&(pool->lock), flags);
L
Linus Torvalds 已提交
282 283 284 285
			return DMA_ERROR_CODE;
		}
	}

286
	end = n + npages;
L
Linus Torvalds 已提交
287 288 289 290

	/* Bump the hint to a new block for small allocs. */
	if (largealloc) {
		/* Don't bump to new block to avoid fragmentation */
291
		pool->hint = end;
L
Linus Torvalds 已提交
292 293
	} else {
		/* Overflow will be taken care of at the next allocation */
294
		pool->hint = (end + tbl->it_blocksize - 1) &
L
Linus Torvalds 已提交
295 296 297 298 299 300 301
		                ~(tbl->it_blocksize - 1);
	}

	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

302 303
	spin_unlock_irqrestore(&(pool->lock), flags);

L
Linus Torvalds 已提交
304 305 306
	return n;
}

307 308 309
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
			      void *page, unsigned int npages,
			      enum dma_data_direction direction,
310 311
			      unsigned long mask, unsigned int align_order,
			      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
312
{
313
	unsigned long entry;
L
Linus Torvalds 已提交
314
	dma_addr_t ret = DMA_ERROR_CODE;
315
	int build_fail;
316

317
	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
L
Linus Torvalds 已提交
318

319
	if (unlikely(entry == DMA_ERROR_CODE))
L
Linus Torvalds 已提交
320 321 322
		return DMA_ERROR_CODE;

	entry += tbl->it_offset;	/* Offset into real TCE table */
323
	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
L
Linus Torvalds 已提交
324 325

	/* Put the TCEs in the HW table */
326 327 328 329 330 331 332 333 334 335 336 337 338
	build_fail = ppc_md.tce_build(tbl, entry, npages,
	                              (unsigned long)page & IOMMU_PAGE_MASK,
	                              direction, attrs);

	/* ppc_md.tce_build() only returns non-zero for transient errors.
	 * Clean up the table bitmap in this case and return
	 * DMA_ERROR_CODE. For all other errors the functionality is
	 * not altered.
	 */
	if (unlikely(build_fail)) {
		__iommu_free(tbl, ret, npages);
		return DMA_ERROR_CODE;
	}
L
Linus Torvalds 已提交
339 340 341 342 343 344 345 346 347 348 349

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	/* Make sure updates are seen by hardware */
	mb();

	return ret;
}

350 351
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
			     unsigned int npages)
L
Linus Torvalds 已提交
352 353 354
{
	unsigned long entry, free_entry;

355
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
L
Linus Torvalds 已提交
356 357 358 359 360 361 362
	free_entry = entry - tbl->it_offset;

	if (((free_entry + npages) > tbl->it_size) ||
	    (entry < tbl->it_offset)) {
		if (printk_ratelimit()) {
			printk(KERN_INFO "iommu_free: invalid entry\n");
			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
363 364 365 366 367 368
			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
L
Linus Torvalds 已提交
369 370
			WARN_ON(1);
		}
371 372

		return false;
L
Linus Torvalds 已提交
373 374
	}

375 376 377
	return true;
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
static struct iommu_pool *get_pool(struct iommu_table *tbl,
				   unsigned long entry)
{
	struct iommu_pool *p;
	unsigned long largepool_start = tbl->large_pool.start;

	/* The large pool is the last pool at the top of the table */
	if (entry >= largepool_start) {
		p = &tbl->large_pool;
	} else {
		unsigned int pool_nr = entry / tbl->poolsize;

		BUG_ON(pool_nr > tbl->nr_pools);
		p = &tbl->pools[pool_nr];
	}

	return p;
}

397 398
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
			 unsigned int npages)
L
Linus Torvalds 已提交
399
{
400
	unsigned long entry, free_entry;
L
Linus Torvalds 已提交
401
	unsigned long flags;
402
	struct iommu_pool *pool;
L
Linus Torvalds 已提交
403

404 405 406
	entry = dma_addr >> IOMMU_PAGE_SHIFT;
	free_entry = entry - tbl->it_offset;

407 408
	pool = get_pool(tbl, free_entry);

409 410 411 412 413
	if (!iommu_free_check(tbl, dma_addr, npages))
		return;

	ppc_md.tce_free(tbl, entry, npages);

414
	spin_lock_irqsave(&(pool->lock), flags);
415
	bitmap_clear(tbl->it_map, free_entry, npages);
416
	spin_unlock_irqrestore(&(pool->lock), flags);
417 418 419 420 421 422
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
		unsigned int npages)
{
	__iommu_free(tbl, dma_addr, npages);
L
Linus Torvalds 已提交
423 424 425 426 427 428 429 430 431

	/* Make sure TLB cache is flushed if the HW needs it. We do
	 * not do an mb() here on purpose, it is not needed on any of
	 * the current platforms.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

432 433
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
434 435
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
L
Linus Torvalds 已提交
436 437 438
{
	dma_addr_t dma_next = 0, dma_addr;
	struct scatterlist *s, *outs, *segstart;
439
	int outcount, incount, i, build_fail = 0;
440
	unsigned int align;
L
Linus Torvalds 已提交
441
	unsigned long handle;
442
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
B
Brian King 已提交
451
	incount = nelems;
L
Linus Torvalds 已提交
452 453 454 455 456
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

457
	DBG("sg mapping %d elements:\n", nelems);
L
Linus Torvalds 已提交
458

459
	max_seg_size = dma_get_max_seg_size(dev);
J
Jens Axboe 已提交
460
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
461 462 463 464 465 466 467 468 469
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
J
Jens Axboe 已提交
470
		vaddr = (unsigned long) sg_virt(s);
471
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
472 473 474 475
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
476
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
477
					  mask >> IOMMU_PAGE_SHIFT, align);
L
Linus Torvalds 已提交
478 479 480 481 482 483

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
484 485 486
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
487 488 489 490 491
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
492 493
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
494

495
		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
L
Linus Torvalds 已提交
496 497 498
			    npages, entry, dma_addr);

		/* Insert into HW table */
499 500 501 502 503
		build_fail = ppc_md.tce_build(tbl, entry, npages,
		                              vaddr & IOMMU_PAGE_MASK,
		                              direction, attrs);
		if(unlikely(build_fail))
			goto failure;
L
Linus Torvalds 已提交
504 505 506 507 508 509 510

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
511 512
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
L
Linus Torvalds 已提交
513 514
				/* Can't merge: create a new segment */
				segstart = s;
J
Jens Axboe 已提交
515 516
				outcount++;
				outs = sg_next(outs);
L
Linus Torvalds 已提交
517 518 519
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
520
				DBG("    merged, new len: %ux\n", outs->dma_length);
L
Linus Torvalds 已提交
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	DBG("mapped %d elements:\n", outcount);

B
Brian King 已提交
543
	/* For the sake of iommu_unmap_sg, we clear out the length in the
L
Linus Torvalds 已提交
544 545
	 * next entry of the sglist if we didn't fill the list completely
	 */
B
Brian King 已提交
546
	if (outcount < incount) {
J
Jens Axboe 已提交
547
		outs = sg_next(outs);
L
Linus Torvalds 已提交
548 549 550
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}
551 552 553 554

	/* Make sure updates are seen by hardware */
	mb();

L
Linus Torvalds 已提交
555 556 557
	return outcount;

 failure:
J
Jens Axboe 已提交
558
	for_each_sg(sglist, s, nelems, i) {
L
Linus Torvalds 已提交
559 560 561
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

562
			vaddr = s->dma_address & IOMMU_PAGE_MASK;
563 564
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IOMMU_PAGE_SIZE);
565
			__iommu_free(tbl, vaddr, npages);
566 567
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
L
Linus Torvalds 已提交
568
		}
J
Jens Axboe 已提交
569 570
		if (s == outs)
			break;
L
Linus Torvalds 已提交
571 572 573 574 575 576
	}
	return 0;
}


void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
577 578
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
L
Linus Torvalds 已提交
579
{
J
Jens Axboe 已提交
580
	struct scatterlist *sg;
L
Linus Torvalds 已提交
581 582 583 584 585 586

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

J
Jens Axboe 已提交
587
	sg = sglist;
L
Linus Torvalds 已提交
588 589
	while (nelems--) {
		unsigned int npages;
J
Jens Axboe 已提交
590
		dma_addr_t dma_handle = sg->dma_address;
L
Linus Torvalds 已提交
591

J
Jens Axboe 已提交
592
		if (sg->dma_length == 0)
L
Linus Torvalds 已提交
593
			break;
594 595
		npages = iommu_num_pages(dma_handle, sg->dma_length,
					 IOMMU_PAGE_SIZE);
596
		__iommu_free(tbl, dma_handle, npages);
J
Jens Axboe 已提交
597
		sg = sg_next(sg);
L
Linus Torvalds 已提交
598 599 600 601 602 603 604 605 606 607
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);
}

608 609
static void iommu_table_clear(struct iommu_table *tbl)
{
610 611 612 613 614 615
	/*
	 * In case of firmware assisted dump system goes through clean
	 * reboot process at the time of system crash. Hence it's safe to
	 * clear the TCE entries if firmware assisted dump is active.
	 */
	if (!is_kdump_kernel() || is_fadump_active()) {
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
		/* Clear the table in case firmware left allocations in it */
		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
		return;
	}

#ifdef CONFIG_CRASH_DUMP
	if (ppc_md.tce_get) {
		unsigned long index, tceval, tcecount = 0;

		/* Reserve the existing mappings left by the first kernel. */
		for (index = 0; index < tbl->it_size; index++) {
			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
			/*
			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
			 */
			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
				__set_bit(index, tbl->it_map);
				tcecount++;
			}
		}

		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
			printk(KERN_WARNING "TCE table is full; freeing ");
			printk(KERN_WARNING "%d entries for the kdump boot\n",
				KDUMP_MIN_TCE_ENTRIES);
			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
				index < tbl->it_size; index++)
				__clear_bit(index, tbl->it_map);
		}
	}
#endif
}

L
Linus Torvalds 已提交
649 650 651 652
/*
 * Build a iommu_table structure.  This contains a bit map which
 * is used to manage allocation of the tce space.
 */
653
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
L
Linus Torvalds 已提交
654 655 656
{
	unsigned long sz;
	static int welcomed = 0;
657
	struct page *page;
658 659
	unsigned int i;
	struct iommu_pool *p;
L
Linus Torvalds 已提交
660 661

	/* number of bytes needed for the bitmap */
A
Akinobu Mita 已提交
662
	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
L
Linus Torvalds 已提交
663

664 665
	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
	if (!page)
L
Linus Torvalds 已提交
666
		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667
	tbl->it_map = page_address(page);
L
Linus Torvalds 已提交
668 669
	memset(tbl->it_map, 0, sz);

670 671 672 673 674 675 676 677
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

678 679 680 681 682 683 684
	/* We only split the IOMMU table if we have 1GB or more of space */
	if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
		tbl->nr_pools = IOMMU_NR_POOLS;
	else
		tbl->nr_pools = 1;

	/* We reserve the top 1/4 of the table for large allocations */
685
	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
686

687
	for (i = 0; i < tbl->nr_pools; i++) {
688 689 690 691 692 693 694 695 696 697 698 699
		p = &tbl->pools[i];
		spin_lock_init(&(p->lock));
		p->start = tbl->poolsize * i;
		p->hint = p->start;
		p->end = p->start + tbl->poolsize;
	}

	p = &tbl->large_pool;
	spin_lock_init(&(p->lock));
	p->start = tbl->poolsize * i;
	p->hint = p->start;
	p->end = tbl->it_size;
L
Linus Torvalds 已提交
700

701
	iommu_table_clear(tbl);
J
John Rose 已提交
702

L
Linus Torvalds 已提交
703 704 705 706 707 708 709 710 711
	if (!welcomed) {
		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
		       novmerge ? "disabled" : "enabled");
		welcomed = 1;
	}

	return tbl;
}

712
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
L
Linus Torvalds 已提交
713
{
A
Akinobu Mita 已提交
714
	unsigned long bitmap_sz;
L
Linus Torvalds 已提交
715 716 717
	unsigned int order;

	if (!tbl || !tbl->it_map) {
718
		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
719
				node_name);
L
Linus Torvalds 已提交
720 721 722
		return;
	}

723 724 725 726 727 728 729
	/*
	 * In case we have reserved the first bit, we should not emit
	 * the warning below.
	 */
	if (tbl->it_offset == 0)
		clear_bit(0, tbl->it_map);

730 731 732 733 734 735 736
#ifdef CONFIG_IOMMU_API
	if (tbl->it_group) {
		iommu_group_put(tbl->it_group);
		BUG_ON(tbl->it_group);
	}
#endif

L
Linus Torvalds 已提交
737
	/* verify that table contains no entries */
A
Akinobu Mita 已提交
738 739
	if (!bitmap_empty(tbl->it_map, tbl->it_size))
		pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
L
Linus Torvalds 已提交
740 741

	/* calculate bitmap size in bytes */
A
Akinobu Mita 已提交
742
	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
L
Linus Torvalds 已提交
743 744 745 746 747 748 749 750 751 752

	/* free bitmap */
	order = get_order(bitmap_sz);
	free_pages((unsigned long) tbl->it_map, order);

	/* free table */
	kfree(tbl);
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
753 754 755
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
L
Linus Torvalds 已提交
756
 */
757 758 759 760
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
			  struct page *page, unsigned long offset, size_t size,
			  unsigned long mask, enum dma_data_direction direction,
			  struct dma_attrs *attrs)
L
Linus Torvalds 已提交
761 762
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
763
	void *vaddr;
L
Linus Torvalds 已提交
764
	unsigned long uaddr;
765
	unsigned int npages, align;
L
Linus Torvalds 已提交
766 767 768

	BUG_ON(direction == DMA_NONE);

769
	vaddr = page_address(page) + offset;
L
Linus Torvalds 已提交
770
	uaddr = (unsigned long)vaddr;
771
	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
L
Linus Torvalds 已提交
772 773

	if (tbl) {
774 775 776 777 778
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

779
		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
780 781
					 mask >> IOMMU_PAGE_SHIFT, align,
					 attrs);
L
Linus Torvalds 已提交
782 783
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
784 785 786
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %p npages %d\n", tbl, vaddr,
					 npages);
L
Linus Torvalds 已提交
787 788
			}
		} else
789
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
L
Linus Torvalds 已提交
790 791 792 793 794
	}

	return dma_handle;
}

795 796 797
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
		      size_t size, enum dma_data_direction direction,
		      struct dma_attrs *attrs)
L
Linus Torvalds 已提交
798
{
799 800
	unsigned int npages;

L
Linus Torvalds 已提交
801 802
	BUG_ON(direction == DMA_NONE);

803
	if (tbl) {
804
		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
805 806
		iommu_free(tbl, dma_handle, npages);
	}
L
Linus Torvalds 已提交
807 808 809 810 811 812
}

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
813 814 815
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
			   size_t size,	dma_addr_t *dma_handle,
			   unsigned long mask, gfp_t flag, int node)
L
Linus Torvalds 已提交
816 817 818
{
	void *ret = NULL;
	dma_addr_t mapping;
819 820
	unsigned int order;
	unsigned int nio_pages, io_order;
821
	struct page *page;
L
Linus Torvalds 已提交
822 823 824 825 826 827 828 829 830 831

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
832 833
		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
			 size);
L
Linus Torvalds 已提交
834 835 836 837 838 839 840
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
841
	page = alloc_pages_node(node, flag, order);
842
	if (!page)
L
Linus Torvalds 已提交
843
		return NULL;
844
	ret = page_address(page);
L
Linus Torvalds 已提交
845 846 847
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
848 849
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
850
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
851
			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
L
Linus Torvalds 已提交
852 853
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
854 855 856
		return NULL;
	}
	*dma_handle = mapping;
L
Linus Torvalds 已提交
857 858 859 860 861 862 863
	return ret;
}

void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
864 865 866 867 868
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> IOMMU_PAGE_SHIFT;
		iommu_free(tbl, dma_handle, nio_pages);
L
Linus Torvalds 已提交
869 870 871 872
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
	}
}
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185

#ifdef CONFIG_IOMMU_API
/*
 * SPAPR TCE API
 */
static void group_release(void *iommu_data)
{
	struct iommu_table *tbl = iommu_data;
	tbl->it_group = NULL;
}

void iommu_register_group(struct iommu_table *tbl,
		int pci_domain_number, unsigned long pe_num)
{
	struct iommu_group *grp;
	char *name;

	grp = iommu_group_alloc();
	if (IS_ERR(grp)) {
		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
				PTR_ERR(grp));
		return;
	}
	tbl->it_group = grp;
	iommu_group_set_iommudata(grp, tbl, group_release);
	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
			pci_domain_number, pe_num);
	if (!name)
		return;
	iommu_group_set_name(grp, name);
	kfree(name);
}

enum dma_data_direction iommu_tce_direction(unsigned long tce)
{
	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
		return DMA_BIDIRECTIONAL;
	else if (tce & TCE_PCI_READ)
		return DMA_TO_DEVICE;
	else if (tce & TCE_PCI_WRITE)
		return DMA_FROM_DEVICE;
	else
		return DMA_NONE;
}
EXPORT_SYMBOL_GPL(iommu_tce_direction);

void iommu_flush_tce(struct iommu_table *tbl)
{
	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	/* Make sure updates are seen by hardware */
	mb();
}
EXPORT_SYMBOL_GPL(iommu_flush_tce);

int iommu_tce_clear_param_check(struct iommu_table *tbl,
		unsigned long ioba, unsigned long tce_value,
		unsigned long npages)
{
	/* ppc_md.tce_free() does not support any value but 0 */
	if (tce_value)
		return -EINVAL;

	if (ioba & ~IOMMU_PAGE_MASK)
		return -EINVAL;

	ioba >>= IOMMU_PAGE_SHIFT;
	if (ioba < tbl->it_offset)
		return -EINVAL;

	if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
		return -EINVAL;

	return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);

int iommu_tce_put_param_check(struct iommu_table *tbl,
		unsigned long ioba, unsigned long tce)
{
	if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
		return -EINVAL;

	if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
		return -EINVAL;

	if (ioba & ~IOMMU_PAGE_MASK)
		return -EINVAL;

	ioba >>= IOMMU_PAGE_SHIFT;
	if (ioba < tbl->it_offset)
		return -EINVAL;

	if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
		return -EINVAL;

	return 0;
}
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);

unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)
{
	unsigned long oldtce;
	struct iommu_pool *pool = get_pool(tbl, entry);

	spin_lock(&(pool->lock));

	oldtce = ppc_md.tce_get(tbl, entry);
	if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
		ppc_md.tce_free(tbl, entry, 1);
	else
		oldtce = 0;

	spin_unlock(&(pool->lock));

	return oldtce;
}
EXPORT_SYMBOL_GPL(iommu_clear_tce);

int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
		unsigned long entry, unsigned long pages)
{
	unsigned long oldtce;
	struct page *page;

	for ( ; pages; --pages, ++entry) {
		oldtce = iommu_clear_tce(tbl, entry);
		if (!oldtce)
			continue;

		page = pfn_to_page(oldtce >> PAGE_SHIFT);
		WARN_ON(!page);
		if (page) {
			if (oldtce & TCE_PCI_WRITE)
				SetPageDirty(page);
			put_page(page);
		}
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages);

/*
 * hwaddr is a kernel virtual address here (0xc... bazillion),
 * tce_build converts it to a physical address.
 */
int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
		unsigned long hwaddr, enum dma_data_direction direction)
{
	int ret = -EBUSY;
	unsigned long oldtce;
	struct iommu_pool *pool = get_pool(tbl, entry);

	spin_lock(&(pool->lock));

	oldtce = ppc_md.tce_get(tbl, entry);
	/* Add new entry if it is not busy */
	if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
		ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);

	spin_unlock(&(pool->lock));

	/* if (unlikely(ret))
		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
				__func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
				hwaddr, ret); */

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_tce_build);

int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
		unsigned long tce)
{
	int ret;
	struct page *page = NULL;
	unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
	enum dma_data_direction direction = iommu_tce_direction(tce);

	ret = get_user_pages_fast(tce & PAGE_MASK, 1,
			direction != DMA_TO_DEVICE, &page);
	if (unlikely(ret != 1)) {
		/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
				tce, entry << IOMMU_PAGE_SHIFT, ret); */
		return -EFAULT;
	}
	hwaddr = (unsigned long) page_address(page) + offset;

	ret = iommu_tce_build(tbl, entry, hwaddr, direction);
	if (ret)
		put_page(page);

	if (ret < 0)
		pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
				__func__, entry << IOMMU_PAGE_SHIFT, tce, ret);

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode);

int iommu_take_ownership(struct iommu_table *tbl)
{
	unsigned long sz = (tbl->it_size + 7) >> 3;

	if (tbl->it_offset == 0)
		clear_bit(0, tbl->it_map);

	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
		pr_err("iommu_tce: it_map is not empty");
		return -EBUSY;
	}

	memset(tbl->it_map, 0xff, sz);
	iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);

	return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);

void iommu_release_ownership(struct iommu_table *tbl)
{
	unsigned long sz = (tbl->it_size + 7) >> 3;

	iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
	memset(tbl->it_map, 0, sz);

	/* Restore bit#0 set by iommu_init_table() */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);

static int iommu_add_device(struct device *dev)
{
	struct iommu_table *tbl;
	int ret = 0;

	if (WARN_ON(dev->iommu_group)) {
		pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n",
				dev_name(dev),
				iommu_group_id(dev->iommu_group));
		return -EBUSY;
	}

	tbl = get_iommu_table_base(dev);
	if (!tbl || !tbl->it_group) {
		pr_debug("iommu_tce: skipping device %s with no tbl\n",
				dev_name(dev));
		return 0;
	}

	pr_debug("iommu_tce: adding %s to iommu group %d\n",
			dev_name(dev), iommu_group_id(tbl->it_group));

	ret = iommu_group_add_device(tbl->it_group, dev);
	if (ret < 0)
		pr_err("iommu_tce: %s has not been added, ret=%d\n",
				dev_name(dev), ret);

	return ret;
}

static void iommu_del_device(struct device *dev)
{
	iommu_group_remove_device(dev);
}

static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data)
{
	struct device *dev = data;

	switch (action) {
	case BUS_NOTIFY_ADD_DEVICE:
		return iommu_add_device(dev);
	case BUS_NOTIFY_DEL_DEVICE:
		iommu_del_device(dev);
		return 0;
	default:
		return 0;
	}
}

static struct notifier_block tce_iommu_bus_nb = {
	.notifier_call = iommu_bus_notifier,
};

static int __init tce_iommu_init(void)
{
	struct pci_dev *pdev = NULL;

	BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);

	for_each_pci_dev(pdev)
		iommu_add_device(&pdev->dev);

	bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
	return 0;
}

subsys_initcall_sync(tce_iommu_init);

#else

void iommu_register_group(struct iommu_table *tbl,
		int pci_domain_number, unsigned long pe_num)
{
}

#endif /* CONFIG_IOMMU_API */