omap-iommu.c 27.8 KB
Newer Older
1 2 3
/*
 * omap iommu: tlb and pagetable primitives
 *
4
 * Copyright (C) 2008-2010 Nokia Corporation
5 6 7 8 9 10 11 12 13 14 15
 *
 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
 *		Paul Mundt and Toshihiro Kobayashi
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/module.h>
16
#include <linux/slab.h>
17 18 19
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
20
#include <linux/iommu.h>
21
#include <linux/omap-iommu.h>
22 23
#include <linux/mutex.h>
#include <linux/spinlock.h>
24
#include <linux/io.h>
25
#include <linux/pm_runtime.h>
26 27 28

#include <asm/cacheflush.h>

29
#include <linux/platform_data/iommu-omap.h>
30

31
#include "omap-iopgtable.h"
32
#include "omap-iommu.h"
33

34 35 36 37 38
#define for_each_iotlb_cr(obj, n, __i, cr)				\
	for (__i = 0;							\
	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
	     __i++)

39 40 41
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)

42 43 44 45 46
/**
 * struct omap_iommu_domain - omap iommu domain
 * @pgtable:	the page table
 * @iommu_dev:	an omap iommu device attached to this domain. only a single
 *		iommu device can be attached for now.
47
 * @dev:	Device using this domain.
48 49 50 51
 * @lock:	domain lock, should be taken when attaching/detaching
 */
struct omap_iommu_domain {
	u32 *pgtable;
52
	struct omap_iommu *iommu_dev;
53
	struct device *dev;
54 55 56
	spinlock_t lock;
};

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#define MMU_LOCK_BASE_SHIFT	10
#define MMU_LOCK_BASE_MASK	(0x1f << MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_BASE(x)	\
	((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)

#define MMU_LOCK_VICT_SHIFT	4
#define MMU_LOCK_VICT_MASK	(0x1f << MMU_LOCK_VICT_SHIFT)
#define MMU_LOCK_VICT(x)	\
	((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)

struct iotlb_lock {
	short base;
	short vict;
};

72 73 74 75 76 77 78
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;

static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;

/**
79
 * omap_install_iommu_arch - Install archtecure specific iommu functions
80 81 82 83 84
 * @ops:	a pointer to architecture specific iommu functions
 *
 * There are several kind of iommu algorithm(tlb, pagetable) among
 * omap series. This interface installs such an iommu algorighm.
 **/
85
int omap_install_iommu_arch(const struct iommu_functions *ops)
86 87 88 89 90 91 92
{
	if (arch_iommu)
		return -EBUSY;

	arch_iommu = ops;
	return 0;
}
93
EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
94 95

/**
96
 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
97 98 99 100
 * @ops:	a pointer to architecture specific iommu functions
 *
 * This interface uninstalls the iommu algorighm installed previously.
 **/
101
void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
102 103 104 105 106 107
{
	if (arch_iommu != ops)
		pr_err("%s: not your arch\n", __func__);

	arch_iommu = NULL;
}
108
EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
109 110

/**
111
 * omap_iommu_save_ctx - Save registers for pm off-mode support
112
 * @dev:	client device
113
 **/
114
void omap_iommu_save_ctx(struct device *dev)
115
{
116 117
	struct omap_iommu *obj = dev_to_omap_iommu(dev);

118 119
	arch_iommu->save_ctx(obj);
}
120
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
121 122

/**
123
 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
124
 * @dev:	client device
125
 **/
126
void omap_iommu_restore_ctx(struct device *dev)
127
{
128 129
	struct omap_iommu *obj = dev_to_omap_iommu(dev);

130 131
	arch_iommu->restore_ctx(obj);
}
132
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
133 134

/**
135
 * omap_iommu_arch_version - Return running iommu arch version
136
 **/
137
u32 omap_iommu_arch_version(void)
138 139 140
{
	return arch_iommu->version;
}
141
EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
142

143
static int iommu_enable(struct omap_iommu *obj)
144 145
{
	int err;
146 147
	struct platform_device *pdev = to_platform_device(obj->dev);
	struct iommu_platform_data *pdata = pdev->dev.platform_data;
148

149 150 151
	if (!arch_iommu)
		return -ENODEV;

152
	if (pdata && pdata->deassert_reset) {
153 154 155 156 157 158 159
		err = pdata->deassert_reset(pdev, pdata->reset_name);
		if (err) {
			dev_err(obj->dev, "deassert_reset failed: %d\n", err);
			return err;
		}
	}

160
	pm_runtime_get_sync(obj->dev);
161 162 163 164 165 166

	err = arch_iommu->enable(obj);

	return err;
}

167
static void iommu_disable(struct omap_iommu *obj)
168
{
169 170 171
	struct platform_device *pdev = to_platform_device(obj->dev);
	struct iommu_platform_data *pdata = pdev->dev.platform_data;

172 173
	arch_iommu->disable(obj);

174
	pm_runtime_put_sync(obj->dev);
175

176
	if (pdata && pdata->assert_reset)
177
		pdata->assert_reset(pdev, pdata->reset_name);
178 179 180 181 182
}

/*
 *	TLB operations
 */
183
void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
184 185 186 187 188
{
	BUG_ON(!cr || !e);

	arch_iommu->cr_to_e(cr, e);
}
189
EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
190 191 192 193 194 195 196 197 198

static inline int iotlb_cr_valid(struct cr_regs *cr)
{
	if (!cr)
		return -EINVAL;

	return arch_iommu->cr_valid(cr);
}

199
static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
200 201 202 203 204 205 206 207
					     struct iotlb_entry *e)
{
	if (!e)
		return NULL;

	return arch_iommu->alloc_cr(obj, e);
}

208
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
209 210 211 212 213 214 215 216 217
{
	return arch_iommu->cr_to_virt(cr);
}

static u32 get_iopte_attr(struct iotlb_entry *e)
{
	return arch_iommu->get_pte_attr(e);
}

218
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
219 220 221 222
{
	return arch_iommu->fault_isr(obj, da);
}

223
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
224 225 226 227 228 229 230 231 232 233
{
	u32 val;

	val = iommu_read_reg(obj, MMU_LOCK);

	l->base = MMU_LOCK_BASE(val);
	l->vict = MMU_LOCK_VICT(val);

}

234
static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
235 236 237 238 239 240 241 242 243
{
	u32 val;

	val = (l->base << MMU_LOCK_BASE_SHIFT);
	val |= (l->vict << MMU_LOCK_VICT_SHIFT);

	iommu_write_reg(obj, val, MMU_LOCK);
}

244
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
245 246 247 248
{
	arch_iommu->tlb_read_cr(obj, cr);
}

249
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
250 251 252 253 254 255 256 257 258 259 260 261 262
{
	arch_iommu->tlb_load_cr(obj, cr);

	iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
	iommu_write_reg(obj, 1, MMU_LD_TLB);
}

/**
 * iotlb_dump_cr - Dump an iommu tlb entry into buf
 * @obj:	target iommu
 * @cr:		contents of cam and ram register
 * @buf:	output buffer
 **/
263
static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
264 265 266 267 268 269 270
				    char *buf)
{
	BUG_ON(!cr || !buf);

	return arch_iommu->dump_cr(obj, cr, buf);
}

271
/* only used in iotlb iteration for-loop */
272
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
273 274 275 276 277 278 279 280 281 282 283 284
{
	struct cr_regs cr;
	struct iotlb_lock l;

	iotlb_lock_get(obj, &l);
	l.vict = n;
	iotlb_lock_set(obj, &l);
	iotlb_read_cr(obj, &cr);

	return cr;
}

285 286 287 288 289
/**
 * load_iotlb_entry - Set an iommu tlb entry
 * @obj:	target iommu
 * @e:		an iommu tlb entry info
 **/
290
#ifdef PREFETCH_IOTLB
291
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
292 293 294 295 296 297 298 299
{
	int err = 0;
	struct iotlb_lock l;
	struct cr_regs *cr;

	if (!obj || !obj->nr_tlb_entries || !e)
		return -EINVAL;

300
	pm_runtime_get_sync(obj->dev);
301

302 303 304
	iotlb_lock_get(obj, &l);
	if (l.base == obj->nr_tlb_entries) {
		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
305 306 307
		err = -EBUSY;
		goto out;
	}
308
	if (!e->prsvd) {
309 310
		int i;
		struct cr_regs tmp;
311

312
		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
313 314
			if (!iotlb_cr_valid(&tmp))
				break;
315

316 317 318 319 320
		if (i == obj->nr_tlb_entries) {
			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
			err = -EBUSY;
			goto out;
		}
321 322

		iotlb_lock_get(obj, &l);
323 324 325 326
	} else {
		l.vict = l.base;
		iotlb_lock_set(obj, &l);
	}
327 328 329

	cr = iotlb_alloc_cr(obj, e);
	if (IS_ERR(cr)) {
330
		pm_runtime_put_sync(obj->dev);
331 332 333 334 335 336
		return PTR_ERR(cr);
	}

	iotlb_load_cr(obj, cr);
	kfree(cr);

337 338
	if (e->prsvd)
		l.base++;
339 340
	/* increment victim for next tlb load */
	if (++l.vict == obj->nr_tlb_entries)
341
		l.vict = l.base;
342 343
	iotlb_lock_set(obj, &l);
out:
344
	pm_runtime_put_sync(obj->dev);
345 346 347
	return err;
}

348 349
#else /* !PREFETCH_IOTLB */

350
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
351 352 353 354 355 356
{
	return 0;
}

#endif /* !PREFETCH_IOTLB */

357
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
358 359 360
{
	return load_iotlb_entry(obj, e);
}
361 362 363 364 365 366 367 368

/**
 * flush_iotlb_page - Clear an iommu tlb entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 *
 * Clear an iommu tlb entry which includes 'da' address.
 **/
369
static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
370 371
{
	int i;
372
	struct cr_regs cr;
373

374
	pm_runtime_get_sync(obj->dev);
375

376
	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
377 378 379 380 381 382 383 384 385 386 387 388
		u32 start;
		size_t bytes;

		if (!iotlb_cr_valid(&cr))
			continue;

		start = iotlb_cr_to_virt(&cr);
		bytes = iopgsz_to_bytes(cr.cam & 3);

		if ((start <= da) && (da < start + bytes)) {
			dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
				__func__, start, da, bytes);
389
			iotlb_load_cr(obj, &cr);
390 391 392
			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
		}
	}
393
	pm_runtime_put_sync(obj->dev);
394 395 396 397 398 399 400 401 402

	if (i == obj->nr_tlb_entries)
		dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
}

/**
 * flush_iotlb_all - Clear all iommu tlb entries
 * @obj:	target iommu
 **/
403
static void flush_iotlb_all(struct omap_iommu *obj)
404 405 406
{
	struct iotlb_lock l;

407
	pm_runtime_get_sync(obj->dev);
408 409 410 411 412 413 414

	l.base = 0;
	l.vict = 0;
	iotlb_lock_set(obj, &l);

	iommu_write_reg(obj, 1, MMU_GFLUSH);

415
	pm_runtime_put_sync(obj->dev);
416
}
417

418
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
419

420
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
421 422 423 424
{
	if (!obj || !buf)
		return -EINVAL;

425
	pm_runtime_get_sync(obj->dev);
426

427
	bytes = arch_iommu->dump_ctx(obj, buf, bytes);
428

429
	pm_runtime_put_sync(obj->dev);
430 431 432

	return bytes;
}
433
EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
434

435 436
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
437 438
{
	int i;
439 440
	struct iotlb_lock saved;
	struct cr_regs tmp;
441 442
	struct cr_regs *p = crs;

443
	pm_runtime_get_sync(obj->dev);
444 445
	iotlb_lock_get(obj, &saved);

446
	for_each_iotlb_cr(obj, num, i, tmp) {
447 448 449 450
		if (!iotlb_cr_valid(&tmp))
			continue;
		*p++ = tmp;
	}
451

452
	iotlb_lock_set(obj, &saved);
453
	pm_runtime_put_sync(obj->dev);
454 455 456 457 458

	return  p - crs;
}

/**
459
 * omap_dump_tlb_entries - dump cr arrays to given buffer
460 461 462
 * @obj:	target iommu
 * @buf:	output buffer
 **/
463
size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
464
{
465
	int i, num;
466 467 468
	struct cr_regs *cr;
	char *p = buf;

469 470 471 472
	num = bytes / sizeof(*cr);
	num = min(obj->nr_tlb_entries, num);

	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
473 474 475
	if (!cr)
		return 0;

476 477
	num = __dump_tlb_entries(obj, cr, num);
	for (i = 0; i < num; i++)
478 479 480 481 482
		p += iotlb_dump_cr(obj, cr + i, p);
	kfree(cr);

	return p - buf;
}
483
EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
484

485
int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
486 487 488 489
{
	return driver_for_each_device(&omap_iommu_driver.driver,
				      NULL, data, fn);
}
490
EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522

#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */

/*
 *	H/W pagetable operations
 */
static void flush_iopgd_range(u32 *first, u32 *last)
{
	/* FIXME: L2 cache should be taken care of if it exists */
	do {
		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
		    : : "r" (first));
		first += L1_CACHE_BYTES / sizeof(*first);
	} while (first <= last);
}

static void flush_iopte_range(u32 *first, u32 *last)
{
	/* FIXME: L2 cache should be taken care of if it exists */
	do {
		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
		    : : "r" (first));
		first += L1_CACHE_BYTES / sizeof(*first);
	} while (first <= last);
}

static void iopte_free(u32 *iopte)
{
	/* Note: freed iopte's must be clean ready for re-use */
	kmem_cache_free(iopte_cachep, iopte);
}

523
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
{
	u32 *iopte;

	/* a table has already existed */
	if (*iopgd)
		goto pte_ready;

	/*
	 * do the allocation outside the page table lock
	 */
	spin_unlock(&obj->page_table_lock);
	iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
	spin_lock(&obj->page_table_lock);

	if (!*iopgd) {
		if (!iopte)
			return ERR_PTR(-ENOMEM);

		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
		flush_iopgd_range(iopgd, iopgd);

		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
	} else {
		/* We raced, free the reduniovant table */
		iopte_free(iopte);
	}

pte_ready:
	iopte = iopte_offset(iopgd, da);

	dev_vdbg(obj->dev,
		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
		 __func__, da, iopgd, *iopgd, iopte, *iopte);

	return iopte;
}

561
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
562 563 564
{
	u32 *iopgd = iopgd_offset(obj, da);

565 566 567 568 569 570
	if ((da | pa) & ~IOSECTION_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOSECTION_SIZE);
		return -EINVAL;
	}

571 572 573 574 575
	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
	flush_iopgd_range(iopgd, iopgd);
	return 0;
}

576
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
577 578 579 580
{
	u32 *iopgd = iopgd_offset(obj, da);
	int i;

581 582 583 584 585 586
	if ((da | pa) & ~IOSUPER_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOSUPER_SIZE);
		return -EINVAL;
	}

587 588 589 590 591 592
	for (i = 0; i < 16; i++)
		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
	flush_iopgd_range(iopgd, iopgd + 15);
	return 0;
}

593
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
{
	u32 *iopgd = iopgd_offset(obj, da);
	u32 *iopte = iopte_alloc(obj, iopgd, da);

	if (IS_ERR(iopte))
		return PTR_ERR(iopte);

	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
	flush_iopte_range(iopte, iopte);

	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
		 __func__, da, pa, iopte, *iopte);

	return 0;
}

610
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
611 612 613 614 615
{
	u32 *iopgd = iopgd_offset(obj, da);
	u32 *iopte = iopte_alloc(obj, iopgd, da);
	int i;

616 617 618 619 620 621
	if ((da | pa) & ~IOLARGE_MASK) {
		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
			__func__, da, pa, IOLARGE_SIZE);
		return -EINVAL;
	}

622 623 624 625 626 627 628 629 630
	if (IS_ERR(iopte))
		return PTR_ERR(iopte);

	for (i = 0; i < 16; i++)
		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
	flush_iopte_range(iopte, iopte + 15);
	return 0;
}

631 632
static int
iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
633
{
634
	int (*fn)(struct omap_iommu *, u32, u32, u32);
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
	u32 prot;
	int err;

	if (!obj || !e)
		return -EINVAL;

	switch (e->pgsz) {
	case MMU_CAM_PGSZ_16M:
		fn = iopgd_alloc_super;
		break;
	case MMU_CAM_PGSZ_1M:
		fn = iopgd_alloc_section;
		break;
	case MMU_CAM_PGSZ_64K:
		fn = iopte_alloc_large;
		break;
	case MMU_CAM_PGSZ_4K:
		fn = iopte_alloc_page;
		break;
	default:
		fn = NULL;
		BUG();
		break;
	}

	prot = get_iopte_attr(e);

	spin_lock(&obj->page_table_lock);
	err = fn(obj, e->da, e->pa, prot);
	spin_unlock(&obj->page_table_lock);

	return err;
}

/**
670
 * omap_iopgtable_store_entry - Make an iommu pte entry
671 672 673
 * @obj:	target iommu
 * @e:		an iommu tlb entry info
 **/
674
int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
675 676 677 678 679 680
{
	int err;

	flush_iotlb_page(obj, e->da);
	err = iopgtable_store_entry_core(obj, e);
	if (!err)
681
		prefetch_iotlb_entry(obj, e);
682 683
	return err;
}
684
EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
685 686 687 688 689 690 691 692

/**
 * iopgtable_lookup_entry - Lookup an iommu pte entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 * @ppgd:	iommu pgd entry pointer to be returned
 * @ppte:	iommu pte entry pointer to be returned
 **/
693 694
static void
iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
695 696 697 698 699 700 701
{
	u32 *iopgd, *iopte = NULL;

	iopgd = iopgd_offset(obj, da);
	if (!*iopgd)
		goto out;

702
	if (iopgd_is_table(*iopgd))
703 704 705 706 707 708
		iopte = iopte_offset(iopgd, da);
out:
	*ppgd = iopgd;
	*ppte = iopte;
}

709
static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
710 711 712 713 714 715 716 717
{
	size_t bytes;
	u32 *iopgd = iopgd_offset(obj, da);
	int nent = 1;

	if (!*iopgd)
		return 0;

718
	if (iopgd_is_table(*iopgd)) {
719 720 721 722 723 724 725
		int i;
		u32 *iopte = iopte_offset(iopgd, da);

		bytes = IOPTE_SIZE;
		if (*iopte & IOPTE_LARGE) {
			nent *= 16;
			/* rewind to the 1st entry */
726
			iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
		}
		bytes *= nent;
		memset(iopte, 0, nent * sizeof(*iopte));
		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));

		/*
		 * do table walk to check if this table is necessary or not
		 */
		iopte = iopte_offset(iopgd, 0);
		for (i = 0; i < PTRS_PER_IOPTE; i++)
			if (iopte[i])
				goto out;

		iopte_free(iopte);
		nent = 1; /* for the next L1 entry */
	} else {
		bytes = IOPGD_SIZE;
744
		if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
745 746
			nent *= 16;
			/* rewind to the 1st entry */
747
			iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
748 749 750 751 752 753 754 755 756 757 758 759 760 761
		}
		bytes *= nent;
	}
	memset(iopgd, 0, nent * sizeof(*iopgd));
	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
out:
	return bytes;
}

/**
 * iopgtable_clear_entry - Remove an iommu pte entry
 * @obj:	target iommu
 * @da:		iommu device virtual address
 **/
762
static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
763 764 765 766 767 768 769 770 771 772 773 774 775
{
	size_t bytes;

	spin_lock(&obj->page_table_lock);

	bytes = iopgtable_clear_entry_core(obj, da);
	flush_iotlb_page(obj, da);

	spin_unlock(&obj->page_table_lock);

	return bytes;
}

776
static void iopgtable_clear_entry_all(struct omap_iommu *obj)
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
{
	int i;

	spin_lock(&obj->page_table_lock);

	for (i = 0; i < PTRS_PER_IOPGD; i++) {
		u32 da;
		u32 *iopgd;

		da = i << IOPGD_SHIFT;
		iopgd = iopgd_offset(obj, da);

		if (!*iopgd)
			continue;

792
		if (iopgd_is_table(*iopgd))
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
			iopte_free(iopte_offset(iopgd, 0));

		*iopgd = 0;
		flush_iopgd_range(iopgd, iopgd);
	}

	flush_iotlb_all(obj);

	spin_unlock(&obj->page_table_lock);
}

/*
 *	Device IOMMU generic operations
 */
static irqreturn_t iommu_fault_handler(int irq, void *data)
{
809
	u32 da, errs;
810
	u32 *iopgd, *iopte;
811
	struct omap_iommu *obj = data;
812
	struct iommu_domain *domain = obj->domain;
813 814 815 816

	if (!obj->refcount)
		return IRQ_NONE;

817
	errs = iommu_report_fault(obj, &da);
818 819
	if (errs == 0)
		return IRQ_HANDLED;
820 821

	/* Fault callback or TLB/PTE Dynamic loading */
822
	if (!report_iommu_fault(domain, obj->dev, da, 0))
823 824
		return IRQ_HANDLED;

825 826
	iommu_disable(obj);

827 828
	iopgd = iopgd_offset(obj, da);

829
	if (!iopgd_is_table(*iopgd)) {
830 831
		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
				obj->name, errs, da, iopgd, *iopgd);
832 833 834 835 836
		return IRQ_NONE;
	}

	iopte = iopte_offset(iopgd, da);

837 838
	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
			obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
839 840 841 842 843 844

	return IRQ_NONE;
}

static int device_match_by_alias(struct device *dev, void *data)
{
845
	struct omap_iommu *obj = to_iommu(dev);
846 847 848 849 850 851 852 853
	const char *name = data;

	pr_debug("%s: %s %s\n", __func__, obj->name, name);

	return strcmp(obj->name, name) == 0;
}

/**
854
 * omap_iommu_attach() - attach iommu device to an iommu domain
855
 * @name:	name of target omap iommu device
856
 * @iopgd:	page table
857
 **/
858
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
859
{
860
	int err;
861 862 863 864 865 866 867
	struct device *dev;
	struct omap_iommu *obj;

	dev = driver_find_device(&omap_iommu_driver.driver, NULL,
				(void *)name,
				device_match_by_alias);
	if (!dev)
868
		return ERR_PTR(-ENODEV);
869 870

	obj = to_iommu(dev);
871

872
	spin_lock(&obj->iommu_lock);
873

874 875 876 877 878
	/* an iommu device can only be attached once */
	if (++obj->refcount > 1) {
		dev_err(dev, "%s: already attached!\n", obj->name);
		err = -EBUSY;
		goto err_enable;
879 880
	}

881 882 883 884 885 886
	obj->iopgd = iopgd;
	err = iommu_enable(obj);
	if (err)
		goto err_enable;
	flush_iotlb_all(obj);

887 888
	if (!try_module_get(obj->owner)) {
		err = -ENODEV;
889
		goto err_module;
890
	}
891

892
	spin_unlock(&obj->iommu_lock);
893 894 895 896 897 898 899 900 901

	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
	return obj;

err_module:
	if (obj->refcount == 1)
		iommu_disable(obj);
err_enable:
	obj->refcount--;
902
	spin_unlock(&obj->iommu_lock);
903 904 905 906
	return ERR_PTR(err);
}

/**
907
 * omap_iommu_detach - release iommu device
908 909
 * @obj:	target iommu
 **/
910
static void omap_iommu_detach(struct omap_iommu *obj)
911
{
912
	if (!obj || IS_ERR(obj))
913 914
		return;

915
	spin_lock(&obj->iommu_lock);
916 917 918 919 920 921

	if (--obj->refcount == 0)
		iommu_disable(obj);

	module_put(obj->owner);

922
	obj->iopgd = NULL;
923

924
	spin_unlock(&obj->iommu_lock);
925

926
	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
927 928
}

929 930 931
/*
 *	OMAP Device MMU(IOMMU) detection
 */
932
static int omap_iommu_probe(struct platform_device *pdev)
933 934 935
{
	int err = -ENODEV;
	int irq;
936
	struct omap_iommu *obj;
937 938 939
	struct resource *res;
	struct iommu_platform_data *pdata = pdev->dev.platform_data;

940
	obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
941 942 943 944 945 946 947
	if (!obj)
		return -ENOMEM;

	obj->nr_tlb_entries = pdata->nr_tlb_entries;
	obj->name = pdata->name;
	obj->dev = &pdev->dev;
	obj->ctx = (void *)obj + sizeof(*obj);
948 949
	obj->da_start = pdata->da_start;
	obj->da_end = pdata->da_end;
950

951
	spin_lock_init(&obj->iommu_lock);
952 953 954 955 956
	mutex_init(&obj->mmap_lock);
	spin_lock_init(&obj->page_table_lock);
	INIT_LIST_HEAD(&obj->mmap);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
957 958 959
	obj->regbase = devm_ioremap_resource(obj->dev, res);
	if (IS_ERR(obj->regbase))
		return PTR_ERR(obj->regbase);
960

961
	irq = platform_get_irq(pdev, 0);
962 963 964 965 966
	if (irq < 0)
		return -ENODEV;

	err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
			       dev_name(obj->dev), obj);
967
	if (err < 0)
968
		return err;
969 970
	platform_set_drvdata(pdev, obj);

971 972 973
	pm_runtime_irq_safe(obj->dev);
	pm_runtime_enable(obj->dev);

974 975 976 977
	dev_info(&pdev->dev, "%s registered\n", obj->name);
	return 0;
}

978
static int omap_iommu_remove(struct platform_device *pdev)
979
{
980
	struct omap_iommu *obj = platform_get_drvdata(pdev);
981 982 983

	iopgtable_clear_entry_all(obj);

984 985
	pm_runtime_disable(obj->dev);

986 987 988 989 990 991
	dev_info(&pdev->dev, "%s removed\n", obj->name);
	return 0;
}

static struct platform_driver omap_iommu_driver = {
	.probe	= omap_iommu_probe,
992
	.remove	= omap_iommu_remove,
993 994 995 996 997 998 999 1000 1001 1002
	.driver	= {
		.name	= "omap-iommu",
	},
};

static void iopte_cachep_ctor(void *iopte)
{
	clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
				   u32 flags)
{
	memset(e, 0, sizeof(*e));

	e->da		= da;
	e->pa		= pa;
	e->valid	= 1;
	/* FIXME: add OMAP1 support */
	e->pgsz		= flags & MMU_CAM_PGSZ_MASK;
	e->endian	= flags & MMU_RAM_ENDIAN_MASK;
	e->elsz		= flags & MMU_RAM_ELSZ_MASK;
	e->mixed	= flags & MMU_RAM_MIXED_MASK;

	return iopgsz_to_bytes(e->pgsz);
}

1020
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1021
			 phys_addr_t pa, size_t bytes, int prot)
1022 1023
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1024
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	struct device *dev = oiommu->dev;
	struct iotlb_entry e;
	int omap_pgsz;
	u32 ret, flags;

	/* we only support mapping a single iommu page for now */
	omap_pgsz = bytes_to_iopgsz(bytes);
	if (omap_pgsz < 0) {
		dev_err(dev, "invalid size to map: %d\n", bytes);
		return -EINVAL;
	}

	dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);

	flags = omap_pgsz | prot;

	iotlb_init_entry(&e, da, pa, flags);

1043
	ret = omap_iopgtable_store_entry(oiommu, &e);
1044
	if (ret)
1045
		dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1046

1047
	return ret;
1048 1049
}

1050 1051
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
			    size_t size)
1052 1053
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1054
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1055 1056
	struct device *dev = oiommu->dev;

1057
	dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1058

1059
	return iopgtable_clear_entry(oiommu, da);
1060 1061 1062 1063 1064 1065
}

static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1066
	struct omap_iommu *oiommu;
1067
	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	int ret = 0;

	spin_lock(&omap_domain->lock);

	/* only a single device is supported per domain for now */
	if (omap_domain->iommu_dev) {
		dev_err(dev, "iommu domain is already attached\n");
		ret = -EBUSY;
		goto out;
	}

	/* get a handle to and enable the omap iommu */
1080
	oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1081 1082 1083 1084 1085 1086
	if (IS_ERR(oiommu)) {
		ret = PTR_ERR(oiommu);
		dev_err(dev, "can't get omap iommu: %d\n", ret);
		goto out;
	}

1087
	omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1088
	omap_domain->dev = dev;
1089
	oiommu->domain = domain;
1090 1091 1092 1093 1094 1095

out:
	spin_unlock(&omap_domain->lock);
	return ret;
}

1096 1097
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
			struct device *dev)
1098
{
1099
	struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1100
	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1101 1102 1103 1104

	/* only a single device is supported per domain for now */
	if (omap_domain->iommu_dev != oiommu) {
		dev_err(dev, "invalid iommu device\n");
1105
		return;
1106 1107 1108 1109 1110 1111
	}

	iopgtable_clear_entry_all(oiommu);

	omap_iommu_detach(oiommu);

1112
	omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1113 1114
	omap_domain->dev = NULL;
}
1115

1116 1117 1118 1119 1120 1121 1122
static void omap_iommu_detach_dev(struct iommu_domain *domain,
				 struct device *dev)
{
	struct omap_iommu_domain *omap_domain = domain->priv;

	spin_lock(&omap_domain->lock);
	_omap_iommu_detach_dev(omap_domain, dev);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
	spin_unlock(&omap_domain->lock);
}

static int omap_iommu_domain_init(struct iommu_domain *domain)
{
	struct omap_iommu_domain *omap_domain;

	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
	if (!omap_domain) {
		pr_err("kzalloc failed\n");
		goto out;
	}

	omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
	if (!omap_domain->pgtable) {
		pr_err("kzalloc failed\n");
		goto fail_nomem;
	}

	/*
	 * should never fail, but please keep this around to ensure
	 * we keep the hardware happy
	 */
	BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));

	clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
	spin_lock_init(&omap_domain->lock);

	domain->priv = omap_domain;

1153 1154 1155 1156
	domain->geometry.aperture_start = 0;
	domain->geometry.aperture_end   = (1ULL << 32) - 1;
	domain->geometry.force_aperture = true;

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	return 0;

fail_nomem:
	kfree(omap_domain);
out:
	return -ENOMEM;
}

static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
	struct omap_iommu_domain *omap_domain = domain->priv;

	domain->priv = NULL;

1171 1172 1173 1174 1175 1176 1177
	/*
	 * An iommu device is still attached
	 * (currently, only one device can be attached) ?
	 */
	if (omap_domain->iommu_dev)
		_omap_iommu_detach_dev(omap_domain, omap_domain->dev);

1178 1179 1180 1181 1182
	kfree(omap_domain->pgtable);
	kfree(omap_domain);
}

static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1183
					  dma_addr_t da)
1184 1185
{
	struct omap_iommu_domain *omap_domain = domain->priv;
1186
	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	struct device *dev = oiommu->dev;
	u32 *pgd, *pte;
	phys_addr_t ret = 0;

	iopgtable_lookup_entry(oiommu, da, &pgd, &pte);

	if (pte) {
		if (iopte_is_small(*pte))
			ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
		else if (iopte_is_large(*pte))
			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
		else
1199 1200
			dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
							(unsigned long long)da);
1201 1202 1203 1204 1205 1206
	} else {
		if (iopgd_is_section(*pgd))
			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
		else if (iopgd_is_super(*pgd))
			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
		else
1207 1208
			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
							(unsigned long long)da);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	}

	return ret;
}

static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
				    unsigned long cap)
{
	return 0;
}

static struct iommu_ops omap_iommu_ops = {
	.domain_init	= omap_iommu_domain_init,
	.domain_destroy	= omap_iommu_domain_destroy,
	.attach_dev	= omap_iommu_attach_dev,
	.detach_dev	= omap_iommu_detach_dev,
	.map		= omap_iommu_map,
	.unmap		= omap_iommu_unmap,
	.iova_to_phys	= omap_iommu_iova_to_phys,
	.domain_has_cap	= omap_iommu_domain_has_cap,
1229
	.pgsize_bitmap	= OMAP_IOMMU_PGSIZES,
1230 1231
};

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
static int __init omap_iommu_init(void)
{
	struct kmem_cache *p;
	const unsigned long flags = SLAB_HWCACHE_ALIGN;
	size_t align = 1 << 10; /* L2 pagetable alignement */

	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
			      iopte_cachep_ctor);
	if (!p)
		return -ENOMEM;
	iopte_cachep = p;

1244
	bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1245

1246 1247
	return platform_driver_register(&omap_iommu_driver);
}
1248 1249
/* must be ready before omap3isp is probed */
subsys_initcall(omap_iommu_init);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262

static void __exit omap_iommu_exit(void)
{
	kmem_cache_destroy(iopte_cachep);

	platform_driver_unregister(&omap_iommu_driver);
}
module_exit(omap_iommu_exit);

MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
MODULE_ALIAS("platform:omap-iommu");
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_LICENSE("GPL v2");