iommu2.c 8.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * omap iommu: omap2/3 architecture specific functions
 *
 * Copyright (C) 2008-2009 Nokia Corporation
 *
 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
 *		Paul Mundt and Toshihiro Kobayashi
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/module.h>
18
#include <linux/slab.h>
19 20
#include <linux/stringify.h>

21
#include <plat/iommu.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46

/*
 * omap2 architecture specific register bit definitions
 */
#define IOMMU_ARCH_VERSION	0x00000011

/* SYSCONF */
#define MMU_SYS_IDLE_SHIFT	3
#define MMU_SYS_IDLE_FORCE	(0 << MMU_SYS_IDLE_SHIFT)
#define MMU_SYS_IDLE_NONE	(1 << MMU_SYS_IDLE_SHIFT)
#define MMU_SYS_IDLE_SMART	(2 << MMU_SYS_IDLE_SHIFT)
#define MMU_SYS_IDLE_MASK	(3 << MMU_SYS_IDLE_SHIFT)

#define MMU_SYS_SOFTRESET	(1 << 1)
#define MMU_SYS_AUTOIDLE	1

/* SYSSTATUS */
#define MMU_SYS_RESETDONE	1

/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT	(1 << 4)
#define MMU_IRQ_TABLEWALKFAULT	(1 << 3)
#define MMU_IRQ_EMUMISS		(1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT	(1 << 1)
#define MMU_IRQ_TLBMISS		(1 << 0)
47 48 49 50 51 52 53

#define __MMU_IRQ_FAULT		\
	(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK		\
	(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK	(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK	(__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
54 55 56 57 58 59 60 61 62 63 64 65 66 67

/* MMU_CNTL */
#define MMU_CNTL_SHIFT		1
#define MMU_CNTL_MASK		(7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB	(1 << 3)
#define MMU_CNTL_TWL_EN		(1 << 2)
#define MMU_CNTL_MMU_EN		(1 << 1)

#define get_cam_va_mask(pgsz)				\
	(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 :	\
	 ((pgsz) == MMU_CAM_PGSZ_1M)  ? 0xfff00000 :	\
	 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 :	\
	 ((pgsz) == MMU_CAM_PGSZ_4K)  ? 0xfffff000 : 0)

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

static void __iommu_set_twl(struct iommu *obj, bool on)
{
	u32 l = iommu_read_reg(obj, MMU_CNTL);

	if (on)
		iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
	else
		iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);

	l &= ~MMU_CNTL_MASK;
	if (on)
		l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
	else
		l |= (MMU_CNTL_MMU_EN);

	iommu_write_reg(obj, l, MMU_CNTL);
}


88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static int omap2_iommu_enable(struct iommu *obj)
{
	u32 l, pa;
	unsigned long timeout;

	if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd,  SZ_16K))
		return -EINVAL;

	pa = virt_to_phys(obj->iopgd);
	if (!IS_ALIGNED(pa, SZ_16K))
		return -EINVAL;

	iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);

	timeout = jiffies + msecs_to_jiffies(20);
	do {
		l = iommu_read_reg(obj, MMU_SYSSTATUS);
		if (l & MMU_SYS_RESETDONE)
			break;
107
	} while (!time_after(jiffies, timeout));
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	if (!(l & MMU_SYS_RESETDONE)) {
		dev_err(obj->dev, "can't take mmu out of reset\n");
		return -ENODEV;
	}

	l = iommu_read_reg(obj, MMU_REVISION);
	dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
		 (l >> 4) & 0xf, l & 0xf);

	l = iommu_read_reg(obj, MMU_SYSCONFIG);
	l &= ~MMU_SYS_IDLE_MASK;
	l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
	iommu_write_reg(obj, l, MMU_SYSCONFIG);

	iommu_write_reg(obj, pa, MMU_TTB);

125
	__iommu_set_twl(obj, true);
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

	return 0;
}

static void omap2_iommu_disable(struct iommu *obj)
{
	u32 l = iommu_read_reg(obj, MMU_CNTL);

	l &= ~MMU_CNTL_MASK;
	iommu_write_reg(obj, l, MMU_CNTL);
	iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);

	dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}

141 142 143 144 145
static void omap2_iommu_set_twl(struct iommu *obj, bool on)
{
	__iommu_set_twl(obj, false);
}

146 147 148
static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
{
	u32 stat, da;
149
	u32 errs = 0;
150 151 152

	stat = iommu_read_reg(obj, MMU_IRQSTATUS);
	stat &= MMU_IRQ_MASK;
153 154
	if (!stat) {
		*ra = 0;
155
		return 0;
156
	}
157 158 159 160

	da = iommu_read_reg(obj, MMU_FAULT_AD);
	*ra = da;

161 162 163 164 165 166 167 168 169 170
	if (stat & MMU_IRQ_TLBMISS)
		errs |= OMAP_IOMMU_ERR_TLB_MISS;
	if (stat & MMU_IRQ_TRANSLATIONFAULT)
		errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
	if (stat & MMU_IRQ_EMUMISS)
		errs |= OMAP_IOMMU_ERR_EMU_MISS;
	if (stat & MMU_IRQ_TABLEWALKFAULT)
		errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
	if (stat & MMU_IRQ_MULTIHITFAULT)
		errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
171
	iommu_write_reg(obj, stat, MMU_IRQSTATUS);
172

173
	return errs;
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
}

static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr)
{
	cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
	cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}

static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr)
{
	iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
	iommu_write_reg(obj, cr->ram, MMU_RAM);
}

static u32 omap2_cr_to_virt(struct cr_regs *cr)
{
	u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
	u32 mask = get_cam_va_mask(cr->cam & page_size);

	return cr->cam & mask;
}

static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e)
{
	struct cr_regs *cr;

	if (e->da & ~(get_cam_va_mask(e->pgsz))) {
		dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
			e->da);
		return ERR_PTR(-EINVAL);
	}

	cr = kmalloc(sizeof(*cr), GFP_KERNEL);
	if (!cr)
		return ERR_PTR(-ENOMEM);

210
	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	cr->ram = e->pa | e->endian | e->elsz | e->mixed;

	return cr;
}

static inline int omap2_cr_valid(struct cr_regs *cr)
{
	return cr->cam & MMU_CAM_V;
}

static u32 omap2_get_pte_attr(struct iotlb_entry *e)
{
	u32 attr;

	attr = e->mixed << 5;
	attr |= e->endian;
	attr |= e->elsz >> 3;
S
Suman Anna 已提交
228 229
	attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
			(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
230 231 232 233 234 235 236 237
	return attr;
}

static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
{
	char *p = buf;

	/* FIXME: Need more detail analysis of cam/ram */
238 239
	p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
					(cr->cam & MMU_CAM_P) ? 1 : 0);
240 241 242 243 244

	return p - buf;
}

#define pr_reg(name)							\
245 246 247 248 249 250 251 252 253 254 255 256 257
	do {								\
		ssize_t bytes;						\
		const char *str = "%20s: %08x\n";			\
		const int maxcol = 32;					\
		bytes = snprintf(p, maxcol, str, __stringify(name),	\
				 iommu_read_reg(obj, MMU_##name));	\
		p += bytes;						\
		len -= bytes;						\
		if (len < maxcol)					\
			goto out;					\
	} while (0)

static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len)
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
{
	char *p = buf;

	pr_reg(REVISION);
	pr_reg(SYSCONFIG);
	pr_reg(SYSSTATUS);
	pr_reg(IRQSTATUS);
	pr_reg(IRQENABLE);
	pr_reg(WALKING_ST);
	pr_reg(CNTL);
	pr_reg(FAULT_AD);
	pr_reg(TTB);
	pr_reg(LOCK);
	pr_reg(LD_TLB);
	pr_reg(CAM);
	pr_reg(RAM);
	pr_reg(GFLUSH);
	pr_reg(FLUSH_ENTRY);
	pr_reg(READ_CAM);
	pr_reg(READ_RAM);
	pr_reg(EMU_FAULT_AD);
279
out:
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	return p - buf;
}

static void omap2_iommu_save_ctx(struct iommu *obj)
{
	int i;
	u32 *p = obj->ctx;

	for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
		p[i] = iommu_read_reg(obj, i * sizeof(u32));
		dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
	}

	BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}

static void omap2_iommu_restore_ctx(struct iommu *obj)
{
	int i;
	u32 *p = obj->ctx;

	for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
		iommu_write_reg(obj, p[i], i * sizeof(u32));
		dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
	}

	BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}

static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
	e->da		= cr->cam & MMU_CAM_VATAG_MASK;
	e->pa		= cr->ram & MMU_RAM_PADDR_MASK;
	e->valid	= cr->cam & MMU_CAM_V;
	e->pgsz		= cr->cam & MMU_CAM_PGSZ_MASK;
	e->endian	= cr->ram & MMU_RAM_ENDIAN_MASK;
	e->elsz		= cr->ram & MMU_RAM_ELSZ_MASK;
	e->mixed	= cr->ram & MMU_RAM_MIXED;
}

static const struct iommu_functions omap2_iommu_ops = {
	.version	= IOMMU_ARCH_VERSION,

	.enable		= omap2_iommu_enable,
	.disable	= omap2_iommu_disable,
325
	.set_twl	= omap2_iommu_set_twl,
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	.fault_isr	= omap2_iommu_fault_isr,

	.tlb_read_cr	= omap2_tlb_read_cr,
	.tlb_load_cr	= omap2_tlb_load_cr,

	.cr_to_e	= omap2_cr_to_e,
	.cr_to_virt	= omap2_cr_to_virt,
	.alloc_cr	= omap2_alloc_cr,
	.cr_valid	= omap2_cr_valid,
	.dump_cr	= omap2_dump_cr,

	.get_pte_attr	= omap2_get_pte_attr,

	.save_ctx	= omap2_iommu_save_ctx,
	.restore_ctx	= omap2_iommu_restore_ctx,
	.dump_ctx	= omap2_iommu_dump_ctx,
};

static int __init omap2_iommu_init(void)
{
	return install_iommu_arch(&omap2_iommu_ops);
}
module_init(omap2_iommu_init);

static void __exit omap2_iommu_exit(void)
{
	uninstall_iommu_arch(&omap2_iommu_ops);
}
module_exit(omap2_iommu_exit);

MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
MODULE_LICENSE("GPL v2");