omap-iommu.c 26.5 KB
Newer Older
1 2 3
/*
 * omap iommu: tlb and pagetable primitives
 *
4
 * Copyright (C) 2008-2010 Nokia Corporation
5 6 7 8 9 10 11 12 13 14 15
 *
 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
 *		Paul Mundt and Toshihiro Kobayashi
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/module.h>
16
#include <linux/slab.h>
17 18 19 20
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
21 22 23
#include <linux/iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
24 25 26

#include <asm/cacheflush.h>

27
#include <plat/iommu.h>
28

29
#include <plat/iopgtable.h>
30

31 32 33 34 35
#define for_each_iotlb_cr(obj, n, __i, cr)				\
	for (__i = 0;							\
	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
	     __i++)

36 37 38 39 40 41 42 43 44
/**
 * struct omap_iommu_domain - omap iommu domain
 * @pgtable:	the page table
 * @iommu_dev:	an omap iommu device attached to this domain. only a single
 *		iommu device can be attached for now.
 * @lock:	domain lock, should be taken when attaching/detaching
 */
struct omap_iommu_domain {
	u32 *pgtable;
45
	struct omap_iommu *iommu_dev;
46 47 48
	spinlock_t lock;
};

49 50 51 52 53 54 55
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;

static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;

/**
56
 * omap_install_iommu_arch - Install archtecure specific iommu functions
57 58 59 60 61
 * @ops:	a pointer to architecture specific iommu functions
 *
 * There are several kind of iommu algorithm(tlb, pagetable) among
 * omap series. This interface installs such an iommu algorighm.
 **/
62
int omap_install_iommu_arch(const struct iommu_functions *ops)
63 64 65 66 67 68 69
{
	if (arch_iommu)
		return -EBUSY;

	arch_iommu = ops;
	return 0;
}
70
EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
71 72

/**
73
 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 75 76 77
 * @ops:	a pointer to architecture specific iommu functions
 *
 * This interface uninstalls the iommu algorighm installed previously.
 **/
78
void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
79 80 81 82 83 84
{
	if (arch_iommu != ops)
		pr_err("%s: not your arch\n", __func__);

	arch_iommu = NULL;
}
85
EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
86 87

/**
88
 * omap_iommu_save_ctx - Save registers for pm off-mode support
89
 * @dev:	client device
90
 **/
91
void omap_iommu_save_ctx(struct device *dev)
92
{
93 94
	struct omap_iommu *obj = dev_to_omap_iommu(dev);

95 96
	arch_iommu->save_ctx(obj);
}
97
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
98 99

/**
100
 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
101
 * @dev:	client device
102
 **/
103
void omap_iommu_restore_ctx(struct device *dev)
104
{
105 106
	struct omap_iommu *obj = dev_to_omap_iommu(dev);

107 108
	arch_iommu->restore_ctx(obj);
}
109
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
110 111

/**
112
 * omap_iommu_arch_version - Return running iommu arch version
113
 **/
114
u32 omap_iommu_arch_version(void)
115 116 117
{
	return arch_iommu->version;
}
118
EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
119

120
static int iommu_enable(struct omap_iommu *obj)
121 122 123 124 125 126
{
	int err;

	if (!obj)
		return -EINVAL;

127 128 129
	if (!arch_iommu)
		return -ENODEV;

130 131 132 133 134 135 136 137
	clk_enable(obj->clk);

	err = arch_iommu->enable(obj);

	clk_disable(obj->clk);
	return err;
}

138
static void iommu_disable(struct omap_iommu *obj)
139 140 141 142 143 144 145 146 147 148 149 150 151 152
{
	if (!obj)
		return;

	clk_enable(obj->clk);

	arch_iommu->disable(obj);

	clk_disable(obj->clk);
}

/*
 *	TLB operations
 */
153
void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
154 155 156 157 158
{
	BUG_ON(!cr || !e);

	arch_iommu->cr_to_e(cr, e);
}
159
EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
160 161 162 163 164 165 166 167 168

static inline int iotlb_cr_valid(struct cr_regs *cr)
{
	if (!cr)
		return -EINVAL;

	return arch_iommu->cr_valid(cr);
}

169
static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
170 171 172 173 174 175 176 177
					     struct iotlb_entry *e)
{
	if (!e)
		return NULL;

	return arch_iommu->alloc_cr(obj, e);
}

178
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
179 180 181 182 183 184 185 186 187
{
	return arch_iommu->cr_to_virt(cr);
}

static u32 get_iopte_attr(struct iotlb_entry *e)
{
	return arch_iommu->get_pte_attr(e);
}

188
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
189 190 191 192
{
	return arch_iommu->fault_isr(obj, da);
}

193
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
194 195 196 197 198 199 200 201 202 203
{
	u32 val;

	val = iommu_read_reg(obj, MMU_LOCK);

	l->base = MMU_LOCK_BASE(val);
	l->vict = MMU_LOCK_VICT(val);

}

204
static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
205 206 207 208 209 210 211 212 213
{
	u32 val;

	val = (l->base << MMU_LOCK_BASE_SHIFT);
	val |= (l->vict << MMU_LOCK_VICT_SHIFT);

	iommu_write_reg(obj, val, MMU_LOCK);
}

214
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
215 216 217 218
{
	arch_iommu->tlb_read_cr(obj, cr);
}

219
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
220 221 222 223 224 225 226 227 228 229 230 231 232
{
	arch_iommu->tlb_load_cr(obj, cr);

	iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
	iommu_write_reg(obj, 1, MMU_LD_TLB);
}

/**
 * iotlb_dump_cr - Dump an iommu tlb entry into buf
 * @obj:	target iommu
 * @cr:		contents of cam and ram register
 * @buf:	output buffer
 **/
233
static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
234 235 236 237 238 239 240
				    char *buf)
{
	BUG_ON(!cr || !buf);

	return arch_iommu->dump_cr(obj, cr, buf);
}

241
/* only used in iotlb iteration for-loop */
242
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
243 244 245 246 247 248 249 250 251 252 253 254
{
	struct cr_regs cr;
	struct iotlb_lock l;

	iotlb_lock_get(obj, &l);
	l.vict = n;
	iotlb_lock_set(obj, &l);
	iotlb_read_cr(obj, &cr);

	return cr;
}

255 256 257 258 259
/**
 * load_iotlb_entry - Set an iommu tlb entry
 * @obj:	target iommu
 * @e:		an iommu tlb entry info
 **/
260
#ifdef PREFETCH_IOTLB
261
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
262 263 264 265 266 267 268 269 270 271
{
	int err = 0;
	struct iotlb_lock l;
	struct cr_regs *cr;

	if (!obj || !obj->nr_tlb_entries || !e)
		return -EINVAL;

	clk_enable(obj->clk);

272 273 274
	iotlb_lock_get(obj, &l);
	if (l.base == obj->nr_tlb_entries) {
		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
275 276 277
		err = -EBUSY;
		goto out;
	}
278
	if (!e->prsvd) {
279 280
		int i;
		struct cr_regs tmp;
281

282
		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
283 284
			if (!iotlb_cr_valid(&tmp))
				break;
285

286 287 288 289 290
		if (i == obj->nr_tlb_entries) {
			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
			err = -EBUSY;
			goto out;
		}
291 292

		iotlb_lock_get(obj, &l);
293 294 295 296
	} else {
		l.vict = l.base;
		iotlb_lock_set(obj, &l);
	}
297 298 299 300 301 302 303 304 305 306

	cr = iotlb_alloc_cr(obj, e);
	if (IS_ERR(cr)) {
		clk_disable(obj->clk);
		return PTR_ERR(cr);
	}

	iotlb_load_cr(obj, cr);
	kfree(cr);

307 308
	if (e->prsvd)
		l.base++;
309 310
	/* increment victim for next tlb load */
	if (++l.vict == obj->nr_tlb_entries)
311
		l.vict = l.base;
312 313 314 315 316 317
	iotlb_lock_set(obj, &l);
out:
	clk_disable(obj->clk);
	return err;
}

318 319
#else /* !PREFETCH_IOTLB */

320
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
321 322 323 324 325 326
{
	return 0;
}

#endif /* !PREFETCH_IOTLB */

327
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
328 329 330
{
	return load_iotlb_entry(obj, e);
}
331 332 333 334 335 336 337 338

/**
 * flush_iotlb_page - Clear an iommu tlb entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 *
 * Clear an iommu tlb entry which includes 'da' address.
 **/
339
static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
340 341
{
	int i;
342
	struct cr_regs cr;
343 344 345

	clk_enable(obj->clk);

346
	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
347 348 349 350 351 352 353 354 355 356 357 358
		u32 start;
		size_t bytes;

		if (!iotlb_cr_valid(&cr))
			continue;

		start = iotlb_cr_to_virt(&cr);
		bytes = iopgsz_to_bytes(cr.cam & 3);

		if ((start <= da) && (da < start + bytes)) {
			dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
				__func__, start, da, bytes);
359
			iotlb_load_cr(obj, &cr);
360 361 362 363 364 365 366 367 368 369 370 371 372
			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
		}
	}
	clk_disable(obj->clk);

	if (i == obj->nr_tlb_entries)
		dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
}

/**
 * flush_iotlb_all - Clear all iommu tlb entries
 * @obj:	target iommu
 **/
373
static void flush_iotlb_all(struct omap_iommu *obj)
374 375 376 377 378 379 380 381 382 383 384 385 386
{
	struct iotlb_lock l;

	clk_enable(obj->clk);

	l.base = 0;
	l.vict = 0;
	iotlb_lock_set(obj, &l);

	iommu_write_reg(obj, 1, MMU_GFLUSH);

	clk_disable(obj->clk);
}
387

388
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
389

390
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
391 392 393 394 395 396
{
	if (!obj || !buf)
		return -EINVAL;

	clk_enable(obj->clk);

397
	bytes = arch_iommu->dump_ctx(obj, buf, bytes);
398 399 400 401 402

	clk_disable(obj->clk);

	return bytes;
}
403
EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
404

405 406
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
407 408
{
	int i;
409 410
	struct iotlb_lock saved;
	struct cr_regs tmp;
411 412 413 414 415
	struct cr_regs *p = crs;

	clk_enable(obj->clk);
	iotlb_lock_get(obj, &saved);

416
	for_each_iotlb_cr(obj, num, i, tmp) {
417 418 419 420
		if (!iotlb_cr_valid(&tmp))
			continue;
		*p++ = tmp;
	}
421

422 423 424 425 426 427 428
	iotlb_lock_set(obj, &saved);
	clk_disable(obj->clk);

	return  p - crs;
}

/**
429
 * omap_dump_tlb_entries - dump cr arrays to given buffer
430 431 432
 * @obj:	target iommu
 * @buf:	output buffer
 **/
433
size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
434
{
435
	int i, num;
436 437 438
	struct cr_regs *cr;
	char *p = buf;

439 440 441 442
	num = bytes / sizeof(*cr);
	num = min(obj->nr_tlb_entries, num);

	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
443 444 445
	if (!cr)
		return 0;

446 447
	num = __dump_tlb_entries(obj, cr, num);
	for (i = 0; i < num; i++)
448 449 450 451 452
		p += iotlb_dump_cr(obj, cr + i, p);
	kfree(cr);

	return p - buf;
}
453
EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
454

455
int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
456 457 458 459
{
	return driver_for_each_device(&omap_iommu_driver.driver,
				      NULL, data, fn);
}
460
EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492

#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */

/*
 *	H/W pagetable operations
 */
static void flush_iopgd_range(u32 *first, u32 *last)
{
	/* FIXME: L2 cache should be taken care of if it exists */
	do {
		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
		    : : "r" (first));
		first += L1_CACHE_BYTES / sizeof(*first);
	} while (first <= last);
}

static void flush_iopte_range(u32 *first, u32 *last)
{
	/* FIXME: L2 cache should be taken care of if it exists */
	do {
		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
		    : : "r" (first));
		first += L1_CACHE_BYTES / sizeof(*first);
	} while (first <= last);
}

static void iopte_free(u32 *iopte)
{
	/* Note: freed iopte's must be clean ready for re-use */
	kmem_cache_free(iopte_cachep, iopte);
}

493
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
{
	u32 *iopte;

	/* a table has already existed */
	if (*iopgd)
		goto pte_ready;

	/*
	 * do the allocation outside the page table lock
	 */
	spin_unlock(&obj->page_table_lock);
	iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
	spin_lock(&obj->page_table_lock);

	if (!*iopgd) {
		if (!iopte)
			return ERR_PTR(-ENOMEM);

		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
		flush_iopgd_range(iopgd, iopgd);

		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
	} else {
		/* We raced, free the reduniovant table */
		iopte_free(iopte);
	}

pte_ready:
	iopte = iopte_offset(iopgd, da);

	dev_vdbg(obj->dev,
		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
		 __func__, da, iopgd, *iopgd, iopte, *iopte);

	return iopte;
}

531
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
532 533 534
{
	u32 *iopgd = iopgd_offset(obj, da);

535 536 537 538 539 540
	if ((da | pa) & ~IOSECTION_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOSECTION_SIZE);
		return -EINVAL;
	}

541 542 543 544 545
	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
	flush_iopgd_range(iopgd, iopgd);
	return 0;
}

546
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
547 548 549 550
{
	u32 *iopgd = iopgd_offset(obj, da);
	int i;

551 552 553 554 555 556
	if ((da | pa) & ~IOSUPER_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOSUPER_SIZE);
		return -EINVAL;
	}

557 558 559 560 561 562
	for (i = 0; i < 16; i++)
		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
	flush_iopgd_range(iopgd, iopgd + 15);
	return 0;
}

563
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
{
	u32 *iopgd = iopgd_offset(obj, da);
	u32 *iopte = iopte_alloc(obj, iopgd, da);

	if (IS_ERR(iopte))
		return PTR_ERR(iopte);

	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
	flush_iopte_range(iopte, iopte);

	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
		 __func__, da, pa, iopte, *iopte);

	return 0;
}

580
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
581 582 583 584 585
{
	u32 *iopgd = iopgd_offset(obj, da);
	u32 *iopte = iopte_alloc(obj, iopgd, da);
	int i;

586 587 588 589 590 591
	if ((da | pa) & ~IOLARGE_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOLARGE_SIZE);
		return -EINVAL;
	}

592 593 594 595 596 597 598 599 600
	if (IS_ERR(iopte))
		return PTR_ERR(iopte);

	for (i = 0; i < 16; i++)
		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
	flush_iopte_range(iopte, iopte + 15);
	return 0;
}

601 602
static int
iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
603
{
604
	int (*fn)(struct omap_iommu *, u32, u32, u32);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
	u32 prot;
	int err;

	if (!obj || !e)
		return -EINVAL;

	switch (e->pgsz) {
	case MMU_CAM_PGSZ_16M:
		fn = iopgd_alloc_super;
		break;
	case MMU_CAM_PGSZ_1M:
		fn = iopgd_alloc_section;
		break;
	case MMU_CAM_PGSZ_64K:
		fn = iopte_alloc_large;
		break;
	case MMU_CAM_PGSZ_4K:
		fn = iopte_alloc_page;
		break;
	default:
		fn = NULL;
		BUG();
		break;
	}

	prot = get_iopte_attr(e);

	spin_lock(&obj->page_table_lock);
	err = fn(obj, e->da, e->pa, prot);
	spin_unlock(&obj->page_table_lock);

	return err;
}

/**
640
 * omap_iopgtable_store_entry - Make an iommu pte entry
641 642 643
 * @obj:	target iommu
 * @e:		an iommu tlb entry info
 **/
644
int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
645 646 647 648 649 650
{
	int err;

	flush_iotlb_page(obj, e->da);
	err = iopgtable_store_entry_core(obj, e);
	if (!err)
651
		prefetch_iotlb_entry(obj, e);
652 653
	return err;
}
654
EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
655 656 657 658 659 660 661 662

/**
 * iopgtable_lookup_entry - Lookup an iommu pte entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 * @ppgd:	iommu pgd entry pointer to be returned
 * @ppte:	iommu pte entry pointer to be returned
 **/
663 664
static void
iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
665 666 667 668 669 670 671
{
	u32 *iopgd, *iopte = NULL;

	iopgd = iopgd_offset(obj, da);
	if (!*iopgd)
		goto out;

672
	if (iopgd_is_table(*iopgd))
673 674 675 676 677 678
		iopte = iopte_offset(iopgd, da);
out:
	*ppgd = iopgd;
	*ppte = iopte;
}

679
static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
680 681 682 683 684 685 686 687
{
	size_t bytes;
	u32 *iopgd = iopgd_offset(obj, da);
	int nent = 1;

	if (!*iopgd)
		return 0;

688
	if (iopgd_is_table(*iopgd)) {
689 690 691 692 693 694 695
		int i;
		u32 *iopte = iopte_offset(iopgd, da);

		bytes = IOPTE_SIZE;
		if (*iopte & IOPTE_LARGE) {
			nent *= 16;
			/* rewind to the 1st entry */
696
			iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		}
		bytes *= nent;
		memset(iopte, 0, nent * sizeof(*iopte));
		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));

		/*
		 * do table walk to check if this table is necessary or not
		 */
		iopte = iopte_offset(iopgd, 0);
		for (i = 0; i < PTRS_PER_IOPTE; i++)
			if (iopte[i])
				goto out;

		iopte_free(iopte);
		nent = 1; /* for the next L1 entry */
	} else {
		bytes = IOPGD_SIZE;
714
		if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
715 716
			nent *= 16;
			/* rewind to the 1st entry */
717
			iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
718 719 720 721 722 723 724 725 726 727 728 729 730 731
		}
		bytes *= nent;
	}
	memset(iopgd, 0, nent * sizeof(*iopgd));
	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
out:
	return bytes;
}

/**
 * iopgtable_clear_entry - Remove an iommu pte entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 **/
732
static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
733 734 735 736 737 738 739 740 741 742 743 744 745
{
	size_t bytes;

	spin_lock(&obj->page_table_lock);

	bytes = iopgtable_clear_entry_core(obj, da);
	flush_iotlb_page(obj, da);

	spin_unlock(&obj->page_table_lock);

	return bytes;
}

746
static void iopgtable_clear_entry_all(struct omap_iommu *obj)
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
{
	int i;

	spin_lock(&obj->page_table_lock);

	for (i = 0; i < PTRS_PER_IOPGD; i++) {
		u32 da;
		u32 *iopgd;

		da = i << IOPGD_SHIFT;
		iopgd = iopgd_offset(obj, da);

		if (!*iopgd)
			continue;

762
		if (iopgd_is_table(*iopgd))
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
			iopte_free(iopte_offset(iopgd, 0));

		*iopgd = 0;
		flush_iopgd_range(iopgd, iopgd);
	}

	flush_iotlb_all(obj);

	spin_unlock(&obj->page_table_lock);
}

/*
 *	Device IOMMU generic operations
 */
static irqreturn_t iommu_fault_handler(int irq, void *data)
{
779
	u32 da, errs;
780
	u32 *iopgd, *iopte;
781
	struct omap_iommu *obj = data;
782
	struct iommu_domain *domain = obj->domain;
783 784 785 786 787

	if (!obj->refcount)
		return IRQ_NONE;

	clk_enable(obj->clk);
788
	errs = iommu_report_fault(obj, &da);
789
	clk_disable(obj->clk);
790 791
	if (errs == 0)
		return IRQ_HANDLED;
792 793

	/* Fault callback or TLB/PTE Dynamic loading */
794
	if (!report_iommu_fault(domain, obj->dev, da, 0))
795 796
		return IRQ_HANDLED;

797 798
	iommu_disable(obj);

799 800
	iopgd = iopgd_offset(obj, da);

801
	if (!iopgd_is_table(*iopgd)) {
802 803
		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
			"*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
804 805 806 807 808
		return IRQ_NONE;
	}

	iopte = iopte_offset(iopgd, da);

809 810 811
	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
		"pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
		iopte, *iopte);
812 813 814 815 816 817

	return IRQ_NONE;
}

static int device_match_by_alias(struct device *dev, void *data)
{
818
	struct omap_iommu *obj = to_iommu(dev);
819 820 821 822 823 824 825 826
	const char *name = data;

	pr_debug("%s: %s %s\n", __func__, obj->name, name);

	return strcmp(obj->name, name) == 0;
}

/**
827
 * omap_iommu_attach() - attach iommu device to an iommu domain
828
 * @name:	name of target omap iommu device
829
 * @iopgd:	page table
830
 **/
831
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
832 833
{
	int err = -ENOMEM;
834 835 836 837 838 839 840 841 842 843
	struct device *dev;
	struct omap_iommu *obj;

	dev = driver_find_device(&omap_iommu_driver.driver, NULL,
				(void *)name,
				device_match_by_alias);
	if (!dev)
		return NULL;

	obj = to_iommu(dev);
844

845
	spin_lock(&obj->iommu_lock);
846

847 848 849 850 851
	/* an iommu device can only be attached once */
	if (++obj->refcount > 1) {
		dev_err(dev, "%s: already attached!\n", obj->name);
		err = -EBUSY;
		goto err_enable;
852 853
	}

854 855 856 857 858 859
	obj->iopgd = iopgd;
	err = iommu_enable(obj);
	if (err)
		goto err_enable;
	flush_iotlb_all(obj);

860 861 862
	if (!try_module_get(obj->owner))
		goto err_module;

863
	spin_unlock(&obj->iommu_lock);
864 865 866 867 868 869 870 871 872

	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
	return obj;

err_module:
	if (obj->refcount == 1)
		iommu_disable(obj);
err_enable:
	obj->refcount--;
873
	spin_unlock(&obj->iommu_lock);
874 875 876 877
	return ERR_PTR(err);
}

/**
878
 * omap_iommu_detach - release iommu device
879 880
 * @obj:	target iommu
 **/
881
static void omap_iommu_detach(struct omap_iommu *obj)
882
{
883
	if (!obj || IS_ERR(obj))
884 885
		return;

886
	spin_lock(&obj->iommu_lock);
887 888 889 890 891 892

	if (--obj->refcount == 0)
		iommu_disable(obj);

	module_put(obj->owner);

893
	obj->iopgd = NULL;
894

895
	spin_unlock(&obj->iommu_lock);
896

897
	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
898 899
}

900 901 902 903 904 905 906
/*
 *	OMAP Device MMU(IOMMU) detection
 */
static int __devinit omap_iommu_probe(struct platform_device *pdev)
{
	int err = -ENODEV;
	int irq;
907
	struct omap_iommu *obj;
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
	struct resource *res;
	struct iommu_platform_data *pdata = pdev->dev.platform_data;

	if (pdev->num_resources != 2)
		return -EINVAL;

	obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

	obj->clk = clk_get(&pdev->dev, pdata->clk_name);
	if (IS_ERR(obj->clk))
		goto err_clk;

	obj->nr_tlb_entries = pdata->nr_tlb_entries;
	obj->name = pdata->name;
	obj->dev = &pdev->dev;
	obj->ctx = (void *)obj + sizeof(*obj);
926 927
	obj->da_start = pdata->da_start;
	obj->da_end = pdata->da_end;
928

929
	spin_lock_init(&obj->iommu_lock);
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	mutex_init(&obj->mmap_lock);
	spin_lock_init(&obj->page_table_lock);
	INIT_LIST_HEAD(&obj->mmap);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		err = -ENODEV;
		goto err_mem;
	}

	res = request_mem_region(res->start, resource_size(res),
				 dev_name(&pdev->dev));
	if (!res) {
		err = -EIO;
		goto err_mem;
	}

947 948 949 950 951 952
	obj->regbase = ioremap(res->start, resource_size(res));
	if (!obj->regbase) {
		err = -ENOMEM;
		goto err_ioremap;
	}

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		err = -ENODEV;
		goto err_irq;
	}
	err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
			  dev_name(&pdev->dev), obj);
	if (err < 0)
		goto err_irq;
	platform_set_drvdata(pdev, obj);

	dev_info(&pdev->dev, "%s registered\n", obj->name);
	return 0;

err_irq:
	iounmap(obj->regbase);
969 970
err_ioremap:
	release_mem_region(res->start, resource_size(res));
971 972 973 974 975 976 977 978 979 980 981
err_mem:
	clk_put(obj->clk);
err_clk:
	kfree(obj);
	return err;
}

static int __devexit omap_iommu_remove(struct platform_device *pdev)
{
	int irq;
	struct resource *res;
982
	struct omap_iommu *obj = platform_get_drvdata(pdev);
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012

	platform_set_drvdata(pdev, NULL);

	iopgtable_clear_entry_all(obj);

	irq = platform_get_irq(pdev, 0);
	free_irq(irq, obj);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	release_mem_region(res->start, resource_size(res));
	iounmap(obj->regbase);

	clk_put(obj->clk);
	dev_info(&pdev->dev, "%s removed\n", obj->name);
	kfree(obj);
	return 0;
}

static struct platform_driver omap_iommu_driver = {
	.probe	= omap_iommu_probe,
	.remove	= __devexit_p(omap_iommu_remove),
	.driver	= {
		.name	= "omap-iommu",
	},
};

static void iopte_cachep_ctor(void *iopte)
{
	clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}

1013 1014 1015 1016
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
			 phys_addr_t pa, int order, int prot)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1017
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	struct device *dev = oiommu->dev;
	size_t bytes = PAGE_SIZE << order;
	struct iotlb_entry e;
	int omap_pgsz;
	u32 ret, flags;

	/* we only support mapping a single iommu page for now */
	omap_pgsz = bytes_to_iopgsz(bytes);
	if (omap_pgsz < 0) {
		dev_err(dev, "invalid size to map: %d\n", bytes);
		return -EINVAL;
	}

	dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);

	flags = omap_pgsz | prot;

	iotlb_init_entry(&e, da, pa, flags);

1037
	ret = omap_iopgtable_store_entry(oiommu, &e);
1038
	if (ret)
1039
		dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1040

1041
	return ret;
1042 1043 1044 1045 1046 1047
}

static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
			    int order)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1048
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1049
	struct device *dev = oiommu->dev;
1050
	size_t unmap_size;
1051

1052
	dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
1053

1054
	unmap_size = iopgtable_clear_entry(oiommu, da);
1055

1056
	return unmap_size ? get_order(unmap_size) : -EINVAL;
1057 1058 1059 1060 1061 1062
}

static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1063
	struct omap_iommu *oiommu;
1064
	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	int ret = 0;

	spin_lock(&omap_domain->lock);

	/* only a single device is supported per domain for now */
	if (omap_domain->iommu_dev) {
		dev_err(dev, "iommu domain is already attached\n");
		ret = -EBUSY;
		goto out;
	}

	/* get a handle to and enable the omap iommu */
1077
	oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1078 1079 1080 1081 1082 1083
	if (IS_ERR(oiommu)) {
		ret = PTR_ERR(oiommu);
		dev_err(dev, "can't get omap iommu: %d\n", ret);
		goto out;
	}

1084
	omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1085
	oiommu->domain = domain;
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095

out:
	spin_unlock(&omap_domain->lock);
	return ret;
}

static void omap_iommu_detach_dev(struct iommu_domain *domain,
				 struct device *dev)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1096 1097
	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
	struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110

	spin_lock(&omap_domain->lock);

	/* only a single device is supported per domain for now */
	if (omap_domain->iommu_dev != oiommu) {
		dev_err(dev, "invalid iommu device\n");
		goto out;
	}

	iopgtable_clear_entry_all(oiommu);

	omap_iommu_detach(oiommu);

1111
	omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

out:
	spin_unlock(&omap_domain->lock);
}

static int omap_iommu_domain_init(struct iommu_domain *domain)
{
	struct omap_iommu_domain *omap_domain;

	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
	if (!omap_domain) {
		pr_err("kzalloc failed\n");
		goto out;
	}

	omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
	if (!omap_domain->pgtable) {
		pr_err("kzalloc failed\n");
		goto fail_nomem;
	}

	/*
	 * should never fail, but please keep this around to ensure
	 * we keep the hardware happy
	 */
	BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));

	clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
	spin_lock_init(&omap_domain->lock);

	domain->priv = omap_domain;

	return 0;

fail_nomem:
	kfree(omap_domain);
out:
	return -ENOMEM;
}

/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
	struct omap_iommu_domain *omap_domain = domain->priv;

	domain->priv = NULL;

	kfree(omap_domain->pgtable);
	kfree(omap_domain);
}

static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
					  unsigned long da)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1167
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	struct device *dev = oiommu->dev;
	u32 *pgd, *pte;
	phys_addr_t ret = 0;

	iopgtable_lookup_entry(oiommu, da, &pgd, &pte);

	if (pte) {
		if (iopte_is_small(*pte))
			ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
		else if (iopte_is_large(*pte))
			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
		else
			dev_err(dev, "bogus pte 0x%x", *pte);
	} else {
		if (iopgd_is_section(*pgd))
			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
		else if (iopgd_is_super(*pgd))
			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
		else
			dev_err(dev, "bogus pgd 0x%x", *pgd);
	}

	return ret;
}

static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
				    unsigned long cap)
{
	return 0;
}

static struct iommu_ops omap_iommu_ops = {
	.domain_init	= omap_iommu_domain_init,
	.domain_destroy	= omap_iommu_domain_destroy,
	.attach_dev	= omap_iommu_attach_dev,
	.detach_dev	= omap_iommu_detach_dev,
	.map		= omap_iommu_map,
	.unmap		= omap_iommu_unmap,
	.iova_to_phys	= omap_iommu_iova_to_phys,
	.domain_has_cap	= omap_iommu_domain_has_cap,
};

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
static int __init omap_iommu_init(void)
{
	struct kmem_cache *p;
	const unsigned long flags = SLAB_HWCACHE_ALIGN;
	size_t align = 1 << 10; /* L2 pagetable alignement */

	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
			      iopte_cachep_ctor);
	if (!p)
		return -ENOMEM;
	iopte_cachep = p;

1222
	bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1223

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	return platform_driver_register(&omap_iommu_driver);
}
module_init(omap_iommu_init);

static void __exit omap_iommu_exit(void)
{
	kmem_cache_destroy(iopte_cachep);

	platform_driver_unregister(&omap_iommu_driver);
}
module_exit(omap_iommu_exit);

MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
MODULE_ALIAS("platform:omap-iommu");
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_LICENSE("GPL v2");