iommu.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <joerg.roedel@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

19 20
#define pr_fmt(fmt)    "%s: " fmt, __func__

21
#include <linux/device.h>
22
#include <linux/kernel.h>
23 24
#include <linux/bug.h>
#include <linux/types.h>
25 26
#include <linux/module.h>
#include <linux/slab.h>
27 28 29
#include <linux/errno.h>
#include <linux/iommu.h>

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
static ssize_t show_iommu_group(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	unsigned int groupid;

	if (iommu_device_group(dev, &groupid))
		return 0;

	return sprintf(buf, "%u", groupid);
}
static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);

static int add_iommu_group(struct device *dev, void *data)
{
	unsigned int groupid;

	if (iommu_device_group(dev, &groupid) == 0)
		return device_create_file(dev, &dev_attr_iommu_group);

	return 0;
}

static int remove_iommu_group(struct device *dev)
{
	unsigned int groupid;

	if (iommu_device_group(dev, &groupid) == 0)
		device_remove_file(dev, &dev_attr_iommu_group);

	return 0;
}

static int iommu_device_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct device *dev = data;

	if (action == BUS_NOTIFY_ADD_DEVICE)
		return add_iommu_group(dev, NULL);
	else if (action == BUS_NOTIFY_DEL_DEVICE)
		return remove_iommu_group(dev);

	return 0;
}

static struct notifier_block iommu_device_nb = {
	.notifier_call = iommu_device_notifier,
};

79 80
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
81 82
	bus_register_notifier(bus, &iommu_device_nb);
	bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
83
}
84

85 86 87 88 89 90 91 92 93 94 95 96 97 98
/**
 * bus_set_iommu - set iommu-callbacks for the bus
 * @bus: bus.
 * @ops: the callbacks provided by the iommu-driver
 *
 * This function is called by an iommu driver to set the iommu methods
 * used for a particular bus. Drivers for devices on that bus can use
 * the iommu-api after these ops are registered.
 * This special function is needed because IOMMUs are usually devices on
 * the bus itself, so the iommu drivers are not initialized when the bus
 * is set up. With this function the iommu-driver can set the iommu-ops
 * afterwards.
 */
int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
99
{
100 101
	if (bus->iommu_ops != NULL)
		return -EBUSY;
102

103 104 105 106
	bus->iommu_ops = ops;

	/* Do IOMMU specific setup for this bus-type */
	iommu_bus_init(bus, ops);
107

108
	return 0;
109
}
110
EXPORT_SYMBOL_GPL(bus_set_iommu);
111

112
bool iommu_present(struct bus_type *bus)
113
{
114
	return bus->iommu_ops != NULL;
115
}
116
EXPORT_SYMBOL_GPL(iommu_present);
117

118 119 120 121
/**
 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 * @domain: iommu domain
 * @handler: fault handler
122 123 124 125 126 127
 *
 * This function should be used by IOMMU users which want to be notified
 * whenever an IOMMU fault happens.
 *
 * The fault handler itself should return 0 on success, and an appropriate
 * error code otherwise.
128 129 130 131 132 133 134 135
 */
void iommu_set_fault_handler(struct iommu_domain *domain,
					iommu_fault_handler_t handler)
{
	BUG_ON(!domain);

	domain->handler = handler;
}
136
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
137

138
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
139 140 141 142
{
	struct iommu_domain *domain;
	int ret;

143
	if (bus == NULL || bus->iommu_ops == NULL)
144 145
		return NULL;

146
	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
147 148 149
	if (!domain)
		return NULL;

150
	domain->ops = bus->iommu_ops;
151

152
	ret = domain->ops->domain_init(domain);
153 154 155 156 157 158 159 160 161 162 163 164 165 166
	if (ret)
		goto out_free;

	return domain;

out_free:
	kfree(domain);

	return NULL;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);

void iommu_domain_free(struct iommu_domain *domain)
{
167 168 169
	if (likely(domain->ops->domain_destroy != NULL))
		domain->ops->domain_destroy(domain);

170 171 172 173 174 175
	kfree(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);

int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
176 177 178 179
	if (unlikely(domain->ops->attach_dev == NULL))
		return -ENODEV;

	return domain->ops->attach_dev(domain, dev);
180 181 182 183 184
}
EXPORT_SYMBOL_GPL(iommu_attach_device);

void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
185 186 187 188
	if (unlikely(domain->ops->detach_dev == NULL))
		return;

	domain->ops->detach_dev(domain, dev);
189 190 191 192 193 194
}
EXPORT_SYMBOL_GPL(iommu_detach_device);

phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
			       unsigned long iova)
{
195 196 197 198
	if (unlikely(domain->ops->iova_to_phys == NULL))
		return 0;

	return domain->ops->iova_to_phys(domain, iova);
199 200
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
S
Sheng Yang 已提交
201 202 203 204

int iommu_domain_has_cap(struct iommu_domain *domain,
			 unsigned long cap)
{
205 206 207 208
	if (unlikely(domain->ops->domain_has_cap == NULL))
		return 0;

	return domain->ops->domain_has_cap(domain, cap);
S
Sheng Yang 已提交
209 210
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
211 212

int iommu_map(struct iommu_domain *domain, unsigned long iova,
213
	      phys_addr_t paddr, size_t size, int prot)
214
{
215 216 217 218
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;
219

220 221
	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;
222

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		/* Max page size that still fits into 'size' */
		pgsize_idx = __fls(size);

		/* need to consider alignment requirements ? */
		if (likely(addr_merge)) {
			/* Max page size allowed by both iova and paddr */
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		/* build a mask of acceptable page sizes */
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		/* throw away page sizes not supported by the hardware */
		pgsize &= domain->ops->pgsize_bitmap;
261

262 263
		/* make sure we're still sane */
		BUG_ON(!pgsize);
264

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
		/* pick the biggest page */
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return ret;
286 287 288
}
EXPORT_SYMBOL_GPL(iommu_map);

289
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
290
{
291 292
	size_t unmapped_page, unmapped = 0;
	unsigned int min_pagesz;
293

294 295 296
	if (unlikely(domain->ops->unmap == NULL))
		return -ENODEV;

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * The virtual address, as well as the size of the mapping, must be
	 * aligned (at least) to the size of the smallest page supported
	 * by the hardware
	 */
	if (!IS_ALIGNED(iova | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
					iova, (unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
							(unsigned long)size);

	/*
	 * Keep iterating until we either unmap 'size' bytes (or more)
	 * or we hit an area that isn't mapped.
	 */
	while (unmapped < size) {
		size_t left = size - unmapped;

		unmapped_page = domain->ops->unmap(domain, iova, left);
		if (!unmapped_page)
			break;

		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
					(unsigned long)unmapped_page);

		iova += unmapped_page;
		unmapped += unmapped_page;
	}

	return unmapped;
333 334
}
EXPORT_SYMBOL_GPL(iommu_unmap);
335 336 337 338 339 340 341 342 343

int iommu_device_group(struct device *dev, unsigned int *groupid)
{
	if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
		return dev->bus->iommu_ops->device_group(dev, groupid);

	return -ENODEV;
}
EXPORT_SYMBOL_GPL(iommu_device_group);