ipmmu-vmsa.c 28.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * IPMMU VMSA
 *
 * Copyright (C) 2014 Renesas Electronics Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 */

11
#include <linux/bitmap.h>
12
#include <linux/delay.h>
13
#include <linux/dma-iommu.h>
14 15 16 17 18 19 20
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/module.h>
21
#include <linux/of.h>
22
#include <linux/of_device.h>
23
#include <linux/of_iommu.h>
24
#include <linux/of_platform.h>
25 26 27
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
28
#include <linux/sys_soc.h>
29

30
#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
31 32
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
R
Robin Murphy 已提交
33 34 35 36 37
#else
#define arm_iommu_create_mapping(...)	NULL
#define arm_iommu_attach_device(...)	-ENODEV
#define arm_iommu_release_mapping(...)	do {} while (0)
#define arm_iommu_detach_device(...)	do {} while (0)
38
#endif
39

40 41
#include "io-pgtable.h"

42
#define IPMMU_CTX_MAX 8
43

44 45
struct ipmmu_features {
	bool use_ns_alias_offset;
46
	bool has_cache_leaf_nodes;
47
	unsigned int number_of_contexts;
48
	bool setup_imbuscr;
M
Magnus Damm 已提交
49
	bool twobit_imttbcr_sl0;
50 51
};

52 53 54
struct ipmmu_vmsa_device {
	struct device *dev;
	void __iomem *base;
55
	struct iommu_device iommu;
56
	struct ipmmu_vmsa_device *root;
57
	const struct ipmmu_features *features;
58
	unsigned int num_utlbs;
59
	unsigned int num_ctx;
60 61 62
	spinlock_t lock;			/* Protects ctx and domains[] */
	DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
	struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
63

64
	struct iommu_group *group;
65 66 67 68 69
	struct dma_iommu_mapping *mapping;
};

struct ipmmu_vmsa_domain {
	struct ipmmu_vmsa_device *mmu;
70
	struct iommu_domain io_domain;
71

72 73 74
	struct io_pgtable_cfg cfg;
	struct io_pgtable_ops *iop;

75 76 77 78
	unsigned int context_id;
	spinlock_t lock;			/* Protects mappings */
};

79 80 81 82 83
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
{
	return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
}

84
static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
85
{
86
	return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
87 88
}

89 90 91 92 93 94
#define TLB_LOOP_TIMEOUT		100	/* 100us */

/* -----------------------------------------------------------------------------
 * Registers Definition
 */

95 96
#define IM_NS_ALIAS_OFFSET		0x800

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
#define IM_CTX_SIZE			0x40

#define IMCTR				0x0000
#define IMCTR_TRE			(1 << 17)
#define IMCTR_AFE			(1 << 16)
#define IMCTR_RTSEL_MASK		(3 << 4)
#define IMCTR_RTSEL_SHIFT		4
#define IMCTR_TREN			(1 << 3)
#define IMCTR_INTEN			(1 << 2)
#define IMCTR_FLUSH			(1 << 1)
#define IMCTR_MMUEN			(1 << 0)

#define IMCAAR				0x0004

#define IMTTBCR				0x0008
#define IMTTBCR_EAE			(1 << 31)
#define IMTTBCR_PMB			(1 << 30)
#define IMTTBCR_SH1_NON_SHAREABLE	(0 << 28)
#define IMTTBCR_SH1_OUTER_SHAREABLE	(2 << 28)
#define IMTTBCR_SH1_INNER_SHAREABLE	(3 << 28)
#define IMTTBCR_SH1_MASK		(3 << 28)
#define IMTTBCR_ORGN1_NC		(0 << 26)
#define IMTTBCR_ORGN1_WB_WA		(1 << 26)
#define IMTTBCR_ORGN1_WT		(2 << 26)
#define IMTTBCR_ORGN1_WB		(3 << 26)
#define IMTTBCR_ORGN1_MASK		(3 << 26)
#define IMTTBCR_IRGN1_NC		(0 << 24)
#define IMTTBCR_IRGN1_WB_WA		(1 << 24)
#define IMTTBCR_IRGN1_WT		(2 << 24)
#define IMTTBCR_IRGN1_WB		(3 << 24)
#define IMTTBCR_IRGN1_MASK		(3 << 24)
#define IMTTBCR_TSZ1_MASK		(7 << 16)
#define IMTTBCR_TSZ1_SHIFT		16
#define IMTTBCR_SH0_NON_SHAREABLE	(0 << 12)
#define IMTTBCR_SH0_OUTER_SHAREABLE	(2 << 12)
#define IMTTBCR_SH0_INNER_SHAREABLE	(3 << 12)
#define IMTTBCR_SH0_MASK		(3 << 12)
#define IMTTBCR_ORGN0_NC		(0 << 10)
#define IMTTBCR_ORGN0_WB_WA		(1 << 10)
#define IMTTBCR_ORGN0_WT		(2 << 10)
#define IMTTBCR_ORGN0_WB		(3 << 10)
#define IMTTBCR_ORGN0_MASK		(3 << 10)
#define IMTTBCR_IRGN0_NC		(0 << 8)
#define IMTTBCR_IRGN0_WB_WA		(1 << 8)
#define IMTTBCR_IRGN0_WT		(2 << 8)
#define IMTTBCR_IRGN0_WB		(3 << 8)
#define IMTTBCR_IRGN0_MASK		(3 << 8)
#define IMTTBCR_SL0_LVL_2		(0 << 4)
#define IMTTBCR_SL0_LVL_1		(1 << 4)
#define IMTTBCR_TSZ0_MASK		(7 << 0)
#define IMTTBCR_TSZ0_SHIFT		O

M
Magnus Damm 已提交
149 150 151 152
#define IMTTBCR_SL0_TWOBIT_LVL_3	(0 << 6)
#define IMTTBCR_SL0_TWOBIT_LVL_2	(1 << 6)
#define IMTTBCR_SL0_TWOBIT_LVL_1	(2 << 6)

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
#define IMBUSCR				0x000c
#define IMBUSCR_DVM			(1 << 2)
#define IMBUSCR_BUSSEL_SYS		(0 << 0)
#define IMBUSCR_BUSSEL_CCI		(1 << 0)
#define IMBUSCR_BUSSEL_IMCAAR		(2 << 0)
#define IMBUSCR_BUSSEL_CCI_IMCAAR	(3 << 0)
#define IMBUSCR_BUSSEL_MASK		(3 << 0)

#define IMTTLBR0			0x0010
#define IMTTUBR0			0x0014
#define IMTTLBR1			0x0018
#define IMTTUBR1			0x001c

#define IMSTR				0x0020
#define IMSTR_ERRLVL_MASK		(3 << 12)
#define IMSTR_ERRLVL_SHIFT		12
#define IMSTR_ERRCODE_TLB_FORMAT	(1 << 8)
#define IMSTR_ERRCODE_ACCESS_PERM	(4 << 8)
#define IMSTR_ERRCODE_SECURE_ACCESS	(5 << 8)
#define IMSTR_ERRCODE_MASK		(7 << 8)
#define IMSTR_MHIT			(1 << 4)
#define IMSTR_ABORT			(1 << 2)
#define IMSTR_PF			(1 << 1)
#define IMSTR_TF			(1 << 0)

#define IMMAIR0				0x0028
#define IMMAIR1				0x002c
#define IMMAIR_ATTR_MASK		0xff
#define IMMAIR_ATTR_DEVICE		0x04
#define IMMAIR_ATTR_NC			0x44
#define IMMAIR_ATTR_WBRWA		0xff
#define IMMAIR_ATTR_SHIFT(n)		((n) << 3)
#define IMMAIR_ATTR_IDX_NC		0
#define IMMAIR_ATTR_IDX_WBRWA		1
#define IMMAIR_ATTR_IDX_DEV		2

#define IMEAR				0x0030

#define IMPCTR				0x0200
#define IMPSTR				0x0208
#define IMPEAR				0x020c
#define IMPMBA(n)			(0x0280 + ((n) * 4))
#define IMPMBD(n)			(0x02c0 + ((n) * 4))

#define IMUCTR(n)			(0x0300 + ((n) * 16))
#define IMUCTR_FIXADDEN			(1 << 31)
#define IMUCTR_FIXADD_MASK		(0xff << 16)
#define IMUCTR_FIXADD_SHIFT		16
#define IMUCTR_TTSEL_MMU(n)		((n) << 4)
#define IMUCTR_TTSEL_PMB		(8 << 4)
#define IMUCTR_TTSEL_MASK		(15 << 4)
#define IMUCTR_FLUSH			(1 << 1)
#define IMUCTR_MMUEN			(1 << 0)

#define IMUASID(n)			(0x0308 + ((n) * 16))
#define IMUASID_ASID8_MASK		(0xff << 8)
#define IMUASID_ASID8_SHIFT		8
#define IMUASID_ASID0_MASK		(0xff << 0)
#define IMUASID_ASID0_SHIFT		0

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
/* -----------------------------------------------------------------------------
 * Root device handling
 */

static struct platform_driver ipmmu_driver;

static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
{
	return mmu->root == mmu;
}

static int __ipmmu_check_device(struct device *dev, void *data)
{
	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
	struct ipmmu_vmsa_device **rootp = data;

	if (ipmmu_is_root(mmu))
		*rootp = mmu;

	return 0;
}

static struct ipmmu_vmsa_device *ipmmu_find_root(void)
{
	struct ipmmu_vmsa_device *root = NULL;

	return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
				      __ipmmu_check_device) == 0 ? root : NULL;
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/* -----------------------------------------------------------------------------
 * Read/Write Access
 */

static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
{
	return ioread32(mmu->base + offset);
}

static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
			u32 data)
{
	iowrite32(data, mmu->base + offset);
}

M
Magnus Damm 已提交
258 259
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
			       unsigned int reg)
260
{
261 262
	return ipmmu_read(domain->mmu->root,
			  domain->context_id * IM_CTX_SIZE + reg);
263 264
}

M
Magnus Damm 已提交
265 266
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
				 unsigned int reg, u32 data)
267
{
268 269
	ipmmu_write(domain->mmu->root,
		    domain->context_id * IM_CTX_SIZE + reg, data);
270 271
}

M
Magnus Damm 已提交
272 273 274 275 276 277 278 279 280 281 282
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
				unsigned int reg, u32 data)
{
	if (domain->mmu != domain->mmu->root)
		ipmmu_write(domain->mmu,
			    domain->context_id * IM_CTX_SIZE + reg, data);

	ipmmu_write(domain->mmu->root,
		    domain->context_id * IM_CTX_SIZE + reg, data);
}

283 284 285 286 287 288 289 290 291
/* -----------------------------------------------------------------------------
 * TLB and microTLB Management
 */

/* Wait for any pending TLB invalidations to complete */
static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
	unsigned int count = 0;

M
Magnus Damm 已提交
292
	while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
293 294 295 296 297 298 299 300 301 302 303 304 305 306
		cpu_relax();
		if (++count == TLB_LOOP_TIMEOUT) {
			dev_err_ratelimited(domain->mmu->dev,
			"TLB sync timed out -- MMU may be deadlocked\n");
			return;
		}
		udelay(1);
	}
}

static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
{
	u32 reg;

M
Magnus Damm 已提交
307
	reg = ipmmu_ctx_read_root(domain, IMCTR);
308
	reg |= IMCTR_FLUSH;
M
Magnus Damm 已提交
309
	ipmmu_ctx_write_all(domain, IMCTR, reg);
310 311 312 313 314 315 316 317

	ipmmu_tlb_sync(domain);
}

/*
 * Enable MMU translation for the microTLB.
 */
static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
318
			      unsigned int utlb)
319 320 321
{
	struct ipmmu_vmsa_device *mmu = domain->mmu;

322 323 324 325 326
	/*
	 * TODO: Reference-count the microTLB as several bus masters can be
	 * connected to the same microTLB.
	 */

327
	/* TODO: What should we set the ASID to ? */
328
	ipmmu_write(mmu, IMUASID(utlb), 0);
329
	/* TODO: Do we need to flush the microTLB ? */
330
	ipmmu_write(mmu, IMUCTR(utlb),
331 332 333 334 335 336 337 338
		    IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
		    IMUCTR_MMUEN);
}

/*
 * Disable MMU translation for the microTLB.
 */
static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
339
			       unsigned int utlb)
340 341 342
{
	struct ipmmu_vmsa_device *mmu = domain->mmu;

343
	ipmmu_write(mmu, IMUCTR(utlb), 0);
344 345
}

346
static void ipmmu_tlb_flush_all(void *cookie)
347
{
348 349 350 351 352
	struct ipmmu_vmsa_domain *domain = cookie;

	ipmmu_tlb_invalidate(domain);
}

353 354
static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
				size_t granule, bool leaf, void *cookie)
355 356 357 358
{
	/* The hardware doesn't support selective TLB flush. */
}

359
static const struct iommu_gather_ops ipmmu_gather_ops = {
360 361 362 363 364
	.tlb_flush_all = ipmmu_tlb_flush_all,
	.tlb_add_flush = ipmmu_tlb_add_flush,
	.tlb_sync = ipmmu_tlb_flush_all,
};

365 366 367 368
/* -----------------------------------------------------------------------------
 * Domain/Context Management
 */

369 370 371 372 373 374 375 376
static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
					 struct ipmmu_vmsa_domain *domain)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&mmu->lock, flags);

377 378
	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
	if (ret != mmu->num_ctx) {
379 380
		mmu->domains[ret] = domain;
		set_bit(ret, mmu->ctx);
381 382
	} else
		ret = -EBUSY;
383 384 385 386 387 388

	spin_unlock_irqrestore(&mmu->lock, flags);

	return ret;
}

389 390 391 392 393 394 395 396 397 398 399 400 401
static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
				      unsigned int context_id)
{
	unsigned long flags;

	spin_lock_irqsave(&mmu->lock, flags);

	clear_bit(context_id, mmu->ctx);
	mmu->domains[context_id] = NULL;

	spin_unlock_irqrestore(&mmu->lock, flags);
}

402 403
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
404
	u64 ttbr;
M
Magnus Damm 已提交
405
	u32 tmp;
406
	int ret;
407 408 409 410 411 412 413 414 415 416 417 418 419

	/*
	 * Allocate the page table operations.
	 *
	 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
	 * access, Long-descriptor format" that the NStable bit being set in a
	 * table descriptor will result in the NStable and NS bits of all child
	 * entries being ignored and considered as being set. The IPMMU seems
	 * not to comply with this, as it generates a secure access page fault
	 * if any of the NStable and NS bits isn't set when running in
	 * non-secure mode.
	 */
	domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
420
	domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
421 422 423
	domain->cfg.ias = 32;
	domain->cfg.oas = 40;
	domain->cfg.tlb = &ipmmu_gather_ops;
424 425
	domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
	domain->io_domain.geometry.force_aperture = true;
426 427 428 429
	/*
	 * TODO: Add support for coherent walk through CCI with DVM and remove
	 * cache handling. For now, delegate it to the io-pgtable code.
	 */
430
	domain->cfg.iommu_dev = domain->mmu->root->dev;
431

432
	/*
433
	 * Find an unused context.
434
	 */
435
	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
436 437
	if (ret < 0)
		return ret;
438 439

	domain->context_id = ret;
440

441 442 443
	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
					   domain);
	if (!domain->iop) {
444 445
		ipmmu_domain_free_context(domain->mmu->root,
					  domain->context_id);
446 447 448
		return -EINVAL;
	}

449
	/* TTBR0 */
450
	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
M
Magnus Damm 已提交
451 452
	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
453 454 455 456 457 458

	/*
	 * TTBCR
	 * We use long descriptors with inner-shareable WBWA tables and allocate
	 * the whole 32-bit VA space to TTBR0.
	 */
M
Magnus Damm 已提交
459 460 461 462 463
	if (domain->mmu->features->twobit_imttbcr_sl0)
		tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
	else
		tmp = IMTTBCR_SL0_LVL_1;

M
Magnus Damm 已提交
464 465
	ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
			     IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
M
Magnus Damm 已提交
466
			     IMTTBCR_IRGN0_WB_WA | tmp);
467

468
	/* MAIR0 */
M
Magnus Damm 已提交
469 470
	ipmmu_ctx_write_root(domain, IMMAIR0,
			     domain->cfg.arm_lpae_s1_cfg.mair[0]);
471 472

	/* IMBUSCR */
473 474 475 476
	if (domain->mmu->features->setup_imbuscr)
		ipmmu_ctx_write_root(domain, IMBUSCR,
				     ipmmu_ctx_read_root(domain, IMBUSCR) &
				     ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
477 478 479 480 481

	/*
	 * IMSTR
	 * Clear all interrupt flags.
	 */
M
Magnus Damm 已提交
482
	ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
483 484 485 486 487 488 489 490

	/*
	 * IMCTR
	 * Enable the MMU and interrupt generation. The long-descriptor
	 * translation table format doesn't use TEX remapping. Don't enable AF
	 * software management as we have no use for it. Flush the TLB as
	 * required when modifying the context registers.
	 */
M
Magnus Damm 已提交
491 492
	ipmmu_ctx_write_all(domain, IMCTR,
			    IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
493 494 495 496 497 498 499 500 501 502 503 504

	return 0;
}

static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
{
	/*
	 * Disable the context. Flush the TLB as required when modifying the
	 * context registers.
	 *
	 * TODO: Is TLB flush really needed ?
	 */
M
Magnus Damm 已提交
505
	ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
506
	ipmmu_tlb_sync(domain);
507
	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
508 509 510 511 512 513 514 515 516 517 518 519 520
}

/* -----------------------------------------------------------------------------
 * Fault Handling
 */

static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
	const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
	struct ipmmu_vmsa_device *mmu = domain->mmu;
	u32 status;
	u32 iova;

M
Magnus Damm 已提交
521
	status = ipmmu_ctx_read_root(domain, IMSTR);
522 523 524
	if (!(status & err_mask))
		return IRQ_NONE;

M
Magnus Damm 已提交
525
	iova = ipmmu_ctx_read_root(domain, IMEAR);
526 527 528 529 530 531 532

	/*
	 * Clear the error status flags. Unlike traditional interrupt flag
	 * registers that must be cleared by writing 1, this status register
	 * seems to require 0. The error address register must be read before,
	 * otherwise its value will be 0.
	 */
M
Magnus Damm 已提交
533
	ipmmu_ctx_write_root(domain, IMSTR, 0);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

	/* Log fatal errors. */
	if (status & IMSTR_MHIT)
		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
				    iova);
	if (status & IMSTR_ABORT)
		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
				    iova);

	if (!(status & (IMSTR_PF | IMSTR_TF)))
		return IRQ_NONE;

	/*
	 * Try to handle page faults and translation faults.
	 *
	 * TODO: We need to look up the faulty device based on the I/O VA. Use
	 * the IOMMU device for now.
	 */
552
	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
553 554 555 556 557 558 559 560 561 562 563 564
		return IRQ_HANDLED;

	dev_err_ratelimited(mmu->dev,
			    "Unhandled fault: status 0x%08x iova 0x%08x\n",
			    status, iova);

	return IRQ_HANDLED;
}

static irqreturn_t ipmmu_irq(int irq, void *dev)
{
	struct ipmmu_vmsa_device *mmu = dev;
565 566 567
	irqreturn_t status = IRQ_NONE;
	unsigned int i;
	unsigned long flags;
568

569 570 571 572 573
	spin_lock_irqsave(&mmu->lock, flags);

	/*
	 * Check interrupts for all active contexts.
	 */
574
	for (i = 0; i < mmu->num_ctx; i++) {
575 576 577 578 579
		if (!mmu->domains[i])
			continue;
		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
			status = IRQ_HANDLED;
	}
580

581
	spin_unlock_irqrestore(&mmu->lock, flags);
582

583
	return status;
584 585 586 587 588 589
}

/* -----------------------------------------------------------------------------
 * IOMMU Operations
 */

590
static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
591 592 593 594 595
{
	struct ipmmu_vmsa_domain *domain;

	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
	if (!domain)
596
		return NULL;
597 598 599

	spin_lock_init(&domain->lock);

600
	return &domain->io_domain;
601 602
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{
	struct iommu_domain *io_domain = NULL;

	switch (type) {
	case IOMMU_DOMAIN_UNMANAGED:
		io_domain = __ipmmu_domain_alloc(type);
		break;

	case IOMMU_DOMAIN_DMA:
		io_domain = __ipmmu_domain_alloc(type);
		if (io_domain && iommu_get_dma_cookie(io_domain)) {
			kfree(io_domain);
			io_domain = NULL;
		}
		break;
	}

	return io_domain;
}

624
static void ipmmu_domain_free(struct iommu_domain *io_domain)
625
{
626
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
627 628 629 630 631

	/*
	 * Free the domain resources. We assume that all devices have already
	 * been detached.
	 */
632
	iommu_put_dma_cookie(io_domain);
633
	ipmmu_domain_destroy_context(domain);
634
	free_io_pgtable_ops(domain->iop);
635 636 637 638 639 640
	kfree(domain);
}

static int ipmmu_attach_device(struct iommu_domain *io_domain,
			       struct device *dev)
{
641
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
642
	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
643
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
644
	unsigned long flags;
645
	unsigned int i;
646 647
	int ret = 0;

648
	if (!mmu) {
649 650 651 652 653 654 655 656 657 658
		dev_err(dev, "Cannot attach to IPMMU\n");
		return -ENXIO;
	}

	spin_lock_irqsave(&domain->lock, flags);

	if (!domain->mmu) {
		/* The domain hasn't been used yet, initialize it. */
		domain->mmu = mmu;
		ret = ipmmu_domain_init_context(domain);
659 660 661 662 663 664 665
		if (ret < 0) {
			dev_err(dev, "Unable to initialize IPMMU context\n");
			domain->mmu = NULL;
		} else {
			dev_info(dev, "Using IPMMU context %u\n",
				 domain->context_id);
		}
666 667 668 669 670 671 672 673
	} else if (domain->mmu != mmu) {
		/*
		 * Something is wrong, we can't attach two devices using
		 * different IOMMUs to the same domain.
		 */
		dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
			dev_name(mmu->dev), dev_name(domain->mmu->dev));
		ret = -EINVAL;
674 675
	} else
		dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
676 677 678 679 680 681

	spin_unlock_irqrestore(&domain->lock, flags);

	if (ret < 0)
		return ret;

682 683
	for (i = 0; i < fwspec->num_ids; ++i)
		ipmmu_utlb_enable(domain, fwspec->ids[i]);
684 685 686 687 688 689 690

	return 0;
}

static void ipmmu_detach_device(struct iommu_domain *io_domain,
				struct device *dev)
{
691
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
692
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
693
	unsigned int i;
694

695 696
	for (i = 0; i < fwspec->num_ids; ++i)
		ipmmu_utlb_disable(domain, fwspec->ids[i]);
697 698 699 700 701 702 703 704 705

	/*
	 * TODO: Optimize by disabling the context when no device is attached.
	 */
}

static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
		     phys_addr_t paddr, size_t size, int prot)
{
706
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
707 708 709 710

	if (!domain)
		return -ENODEV;

711
	return domain->iop->map(domain->iop, iova, paddr, size, prot);
712 713 714 715 716
}

static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
			  size_t size)
{
717
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
718

719
	return domain->iop->unmap(domain->iop, iova, size);
720 721
}

722 723 724 725 726 727 728 729
static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
{
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);

	if (domain->mmu)
		ipmmu_tlb_flush_all(domain);
}

730 731 732
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
				      dma_addr_t iova)
{
733
	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
734 735 736

	/* TODO: Is locking needed ? */

737
	return domain->iop->iova_to_phys(domain->iop, iova);
738 739
}

740 741
static int ipmmu_init_platform_device(struct device *dev,
				      struct of_phandle_args *args)
742
{
743
	struct platform_device *ipmmu_pdev;
744

745 746
	ipmmu_pdev = of_find_device_by_node(args->np);
	if (!ipmmu_pdev)
747 748
		return -ENODEV;

749
	dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
750 751 752
	return 0;
}

753 754 755 756 757 758 759 760 761 762 763
static bool ipmmu_slave_whitelist(struct device *dev)
{
	/* By default, do not allow use of IPMMU */
	return false;
}

static const struct soc_device_attribute soc_r8a7795[] = {
	{ .soc_id = "r8a7795", },
	{ /* sentinel */ }
};

764 765 766
static int ipmmu_of_xlate(struct device *dev,
			  struct of_phandle_args *spec)
{
767 768 769 770
	/* For R-Car Gen3 use a white list to opt-in slave devices */
	if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev))
		return -ENODEV;

771 772
	iommu_fwspec_add_ids(dev, spec->args, 1);

773
	/* Initialize once - xlate() will call multiple times */
774
	if (to_ipmmu(dev))
775 776
		return 0;

777
	return ipmmu_init_platform_device(dev, spec);
778 779
}

R
Robin Murphy 已提交
780
static int ipmmu_init_arm_mapping(struct device *dev)
781
{
782
	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
783 784 785
	struct iommu_group *group;
	int ret;

786 787 788 789
	/* Create a device group and add the device to it. */
	group = iommu_group_alloc();
	if (IS_ERR(group)) {
		dev_err(dev, "Failed to allocate IOMMU group\n");
R
Robin Murphy 已提交
790
		return PTR_ERR(group);
791 792 793 794 795 796 797
	}

	ret = iommu_group_add_device(group, dev);
	iommu_group_put(group);

	if (ret < 0) {
		dev_err(dev, "Failed to add device to IPMMU group\n");
R
Robin Murphy 已提交
798
		return ret;
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	}

	/*
	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
	 * VAs. This will allocate a corresponding IOMMU domain.
	 *
	 * TODO:
	 * - Create one mapping per context (TLB).
	 * - Make the mapping size configurable ? We currently use a 2GB mapping
	 *   at a 1GB offset to ensure that NULL VAs will fault.
	 */
	if (!mmu->mapping) {
		struct dma_iommu_mapping *mapping;

		mapping = arm_iommu_create_mapping(&platform_bus_type,
J
Joerg Roedel 已提交
814
						   SZ_1G, SZ_2G);
815 816
		if (IS_ERR(mapping)) {
			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
817 818
			ret = PTR_ERR(mapping);
			goto error;
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
		}

		mmu->mapping = mapping;
	}

	/* Attach the ARM VA mapping to the device. */
	ret = arm_iommu_attach_device(dev, mmu->mapping);
	if (ret < 0) {
		dev_err(dev, "Failed to attach device to VA mapping\n");
		goto error;
	}

	return 0;

error:
R
Robin Murphy 已提交
834 835
	iommu_group_remove_device(dev);
	if (mmu->mapping)
836
		arm_iommu_release_mapping(mmu->mapping);
837

838 839 840
	return ret;
}

R
Robin Murphy 已提交
841
static int ipmmu_add_device(struct device *dev)
842 843 844
{
	struct iommu_group *group;

845 846 847
	/*
	 * Only let through devices that have been verified in xlate()
	 */
848
	if (!to_ipmmu(dev))
849 850
		return -ENODEV;

R
Robin Murphy 已提交
851 852 853
	if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
		return ipmmu_init_arm_mapping(dev);

854 855 856 857
	group = iommu_group_get_for_dev(dev);
	if (IS_ERR(group))
		return PTR_ERR(group);

R
Robin Murphy 已提交
858
	iommu_group_put(group);
859 860 861
	return 0;
}

R
Robin Murphy 已提交
862
static void ipmmu_remove_device(struct device *dev)
863
{
R
Robin Murphy 已提交
864
	arm_iommu_detach_device(dev);
865 866 867
	iommu_group_remove_device(dev);
}

868
static struct iommu_group *ipmmu_find_group(struct device *dev)
869
{
870
	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
871 872
	struct iommu_group *group;

873 874
	if (mmu->group)
		return iommu_group_ref_get(mmu->group);
875 876 877

	group = iommu_group_alloc();
	if (!IS_ERR(group))
878
		mmu->group = group;
879 880 881 882 883

	return group;
}

static const struct iommu_ops ipmmu_ops = {
884 885
	.domain_alloc = ipmmu_domain_alloc,
	.domain_free = ipmmu_domain_free,
886 887 888 889
	.attach_dev = ipmmu_attach_device,
	.detach_dev = ipmmu_detach_device,
	.map = ipmmu_map,
	.unmap = ipmmu_unmap,
890 891
	.flush_iotlb_all = ipmmu_iotlb_sync,
	.iotlb_sync = ipmmu_iotlb_sync,
892 893
	.map_sg = default_iommu_map_sg,
	.iova_to_phys = ipmmu_iova_to_phys,
R
Robin Murphy 已提交
894 895
	.add_device = ipmmu_add_device,
	.remove_device = ipmmu_remove_device,
896
	.device_group = ipmmu_find_group,
897
	.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
898
	.of_xlate = ipmmu_of_xlate,
899 900
};

901 902 903 904 905 906 907 908 909
/* -----------------------------------------------------------------------------
 * Probe/remove and init
 */

static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
{
	unsigned int i;

	/* Disable all contexts. */
910
	for (i = 0; i < mmu->num_ctx; ++i)
911 912 913
		ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
}

914 915
static const struct ipmmu_features ipmmu_features_default = {
	.use_ns_alias_offset = true,
916
	.has_cache_leaf_nodes = false,
917
	.number_of_contexts = 1, /* software only tested with one context */
918
	.setup_imbuscr = true,
M
Magnus Damm 已提交
919
	.twobit_imttbcr_sl0 = false,
920 921
};

922 923 924 925 926 927 928 929
static const struct ipmmu_features ipmmu_features_r8a7795 = {
	.use_ns_alias_offset = false,
	.has_cache_leaf_nodes = true,
	.number_of_contexts = 8,
	.setup_imbuscr = false,
	.twobit_imttbcr_sl0 = true,
};

930 931 932 933
static const struct of_device_id ipmmu_of_ids[] = {
	{
		.compatible = "renesas,ipmmu-vmsa",
		.data = &ipmmu_features_default,
934 935 936
	}, {
		.compatible = "renesas,ipmmu-r8a7795",
		.data = &ipmmu_features_r8a7795,
937 938 939 940 941 942 943
	}, {
		/* Terminator */
	},
};

MODULE_DEVICE_TABLE(of, ipmmu_of_ids);

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
static int ipmmu_probe(struct platform_device *pdev)
{
	struct ipmmu_vmsa_device *mmu;
	struct resource *res;
	int irq;
	int ret;

	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
	if (!mmu) {
		dev_err(&pdev->dev, "cannot allocate device data\n");
		return -ENOMEM;
	}

	mmu->dev = &pdev->dev;
	mmu->num_utlbs = 32;
959 960
	spin_lock_init(&mmu->lock);
	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
961
	mmu->features = of_device_get_match_data(&pdev->dev);
962
	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
963 964 965 966 967 968 969

	/* Map I/O memory and request IRQ. */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mmu->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(mmu->base))
		return PTR_ERR(mmu->base);

970 971 972 973 974 975 976 977 978 979 980 981
	/*
	 * The IPMMU has two register banks, for secure and non-secure modes.
	 * The bank mapped at the beginning of the IPMMU address space
	 * corresponds to the running mode of the CPU. When running in secure
	 * mode the non-secure register bank is also available at an offset.
	 *
	 * Secure mode operation isn't clearly documented and is thus currently
	 * not implemented in the driver. Furthermore, preliminary tests of
	 * non-secure operation with the main register bank were not successful.
	 * Offset the registers base unconditionally to point to the non-secure
	 * alias space for now.
	 */
982 983
	if (mmu->features->use_ns_alias_offset)
		mmu->base += IM_NS_ALIAS_OFFSET;
984

985 986 987
	mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
			     mmu->features->number_of_contexts);

988 989
	irq = platform_get_irq(pdev, 0);

990 991 992 993 994 995 996 997 998
	/*
	 * Determine if this IPMMU instance is a root device by checking for
	 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
	 */
	if (!mmu->features->has_cache_leaf_nodes ||
	    !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
		mmu->root = mmu;
	else
		mmu->root = ipmmu_find_root();
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	/*
	 * Wait until the root device has been registered for sure.
	 */
	if (!mmu->root)
		return -EPROBE_DEFER;

	/* Root devices have mandatory IRQs */
	if (ipmmu_is_root(mmu)) {
		if (irq < 0) {
			dev_err(&pdev->dev, "no IRQ found\n");
			return irq;
		}

		ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
				       dev_name(&pdev->dev), mmu);
		if (ret < 0) {
			dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
			return ret;
		}

		ipmmu_device_reset(mmu);
	}
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	/*
	 * Register the IPMMU to the IOMMU subsystem in the following cases:
	 * - R-Car Gen2 IPMMU (all devices registered)
	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
	 */
	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
					     dev_name(&pdev->dev));
		if (ret)
			return ret;
1033

1034 1035 1036
		iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
		iommu_device_set_fwnode(&mmu->iommu,
					&pdev->dev.of_node->fwnode);
1037

1038 1039 1040 1041 1042 1043 1044 1045 1046
		ret = iommu_device_register(&mmu->iommu);
		if (ret)
			return ret;

#if defined(CONFIG_IOMMU_DMA)
		if (!iommu_present(&platform_bus_type))
			bus_set_iommu(&platform_bus_type, &ipmmu_ops);
#endif
	}
1047

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	/*
	 * We can't create the ARM mapping here as it requires the bus to have
	 * an IOMMU, which only happens when bus_set_iommu() is called in
	 * ipmmu_init() after the probe function returns.
	 */

	platform_set_drvdata(pdev, mmu);

	return 0;
}

static int ipmmu_remove(struct platform_device *pdev)
{
	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);

1063
	iommu_device_sysfs_remove(&mmu->iommu);
1064 1065
	iommu_device_unregister(&mmu->iommu);

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	arm_iommu_release_mapping(mmu->mapping);

	ipmmu_device_reset(mmu);

	return 0;
}

static struct platform_driver ipmmu_driver = {
	.driver = {
		.name = "ipmmu-vmsa",
1076
		.of_match_table = of_match_ptr(ipmmu_of_ids),
1077 1078 1079 1080 1081 1082 1083
	},
	.probe = ipmmu_probe,
	.remove	= ipmmu_remove,
};

static int __init ipmmu_init(void)
{
1084
	static bool setup_done;
1085 1086
	int ret;

1087 1088 1089
	if (setup_done)
		return 0;

1090 1091 1092 1093
	ret = platform_driver_register(&ipmmu_driver);
	if (ret < 0)
		return ret;

1094
#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1095 1096
	if (!iommu_present(&platform_bus_type))
		bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1097
#endif
1098

1099
	setup_done = true;
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	return 0;
}

static void __exit ipmmu_exit(void)
{
	return platform_driver_unregister(&ipmmu_driver);
}

subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);

R
Robin Murphy 已提交
1111 1112
IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
1113

1114 1115 1116
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");