iommu.c 48.1 KB
Newer Older
1 2
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
J
Joerg Roedel 已提交
3
 * Author: Joerg Roedel <jroedel@suse.de>
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

19
#define pr_fmt(fmt)    "iommu: " fmt
20

21
#include <linux/device.h>
22
#include <linux/kernel.h>
23 24
#include <linux/bug.h>
#include <linux/types.h>
25 26
#include <linux/module.h>
#include <linux/slab.h>
27 28
#include <linux/errno.h>
#include <linux/iommu.h>
A
Alex Williamson 已提交
29 30 31
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
32
#include <linux/pci.h>
33
#include <linux/bitops.h>
R
Robin Murphy 已提交
34
#include <linux/property.h>
35
#include <trace/events/iommu.h>
A
Alex Williamson 已提交
36 37

static struct kset *iommu_group_kset;
H
Heiner Kallweit 已提交
38
static DEFINE_IDA(iommu_group_ida);
39
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
A
Alex Williamson 已提交
40

41 42 43 44
struct iommu_callback_data {
	const struct iommu_ops *ops;
};

A
Alex Williamson 已提交
45 46 47 48 49 50 51 52 53 54
struct iommu_group {
	struct kobject kobj;
	struct kobject *devices_kobj;
	struct list_head devices;
	struct mutex mutex;
	struct blocking_notifier_head notifier;
	void *iommu_data;
	void (*iommu_data_release)(void *iommu_data);
	char *name;
	int id;
55
	struct iommu_domain *default_domain;
56
	struct iommu_domain *domain;
A
Alex Williamson 已提交
57 58
};

J
Joerg Roedel 已提交
59
struct group_device {
A
Alex Williamson 已提交
60 61 62 63 64 65 66 67 68 69 70 71
	struct list_head list;
	struct device *dev;
	char *name;
};

struct iommu_group_attribute {
	struct attribute attr;
	ssize_t (*show)(struct iommu_group *group, char *buf);
	ssize_t (*store)(struct iommu_group *group,
			 const char *buf, size_t count);
};

72 73 74 75
static const char * const iommu_group_resv_type_string[] = {
	[IOMMU_RESV_DIRECT]	= "direct",
	[IOMMU_RESV_RESERVED]	= "reserved",
	[IOMMU_RESV_MSI]	= "msi",
76
	[IOMMU_RESV_SW_MSI]	= "msi",
77 78
};

A
Alex Williamson 已提交
79 80 81
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
struct iommu_group_attribute iommu_group_attr_##_name =		\
	__ATTR(_name, _mode, _show, _store)
82

A
Alex Williamson 已提交
83 84 85 86
#define to_iommu_group_attr(_attr)	\
	container_of(_attr, struct iommu_group_attribute, attr)
#define to_iommu_group(_kobj)		\
	container_of(_kobj, struct iommu_group, kobj)
87

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);

int iommu_device_register(struct iommu_device *iommu)
{
	spin_lock(&iommu_device_lock);
	list_add_tail(&iommu->list, &iommu_device_list);
	spin_unlock(&iommu_device_lock);

	return 0;
}

void iommu_device_unregister(struct iommu_device *iommu)
{
	spin_lock(&iommu_device_lock);
	list_del(&iommu->list);
	spin_unlock(&iommu_device_lock);
}

107 108
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type);
109 110 111 112 113 114
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group);
115

116 117 118 119 120 121 122 123 124 125 126 127
static int __init iommu_set_def_domain_type(char *str)
{
	bool pt;

	if (!str || strtobool(str, &pt))
		return -EINVAL;

	iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
	return 0;
}
early_param("iommu.passthrough", iommu_set_def_domain_type);

A
Alex Williamson 已提交
128 129
static ssize_t iommu_group_attr_show(struct kobject *kobj,
				     struct attribute *__attr, char *buf)
130
{
A
Alex Williamson 已提交
131 132 133
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
134

A
Alex Williamson 已提交
135 136 137 138 139 140 141 142 143 144 145 146
	if (attr->show)
		ret = attr->show(group, buf);
	return ret;
}

static ssize_t iommu_group_attr_store(struct kobject *kobj,
				      struct attribute *__attr,
				      const char *buf, size_t count)
{
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
147

A
Alex Williamson 已提交
148 149 150
	if (attr->store)
		ret = attr->store(group, buf, count);
	return ret;
151 152
}

A
Alex Williamson 已提交
153 154 155 156
static const struct sysfs_ops iommu_group_sysfs_ops = {
	.show = iommu_group_attr_show,
	.store = iommu_group_attr_store,
};
157

A
Alex Williamson 已提交
158 159 160 161
static int iommu_group_create_file(struct iommu_group *group,
				   struct iommu_group_attribute *attr)
{
	return sysfs_create_file(&group->kobj, &attr->attr);
162 163
}

A
Alex Williamson 已提交
164 165 166 167 168 169 170 171 172 173 174
static void iommu_group_remove_file(struct iommu_group *group,
				    struct iommu_group_attribute *attr)
{
	sysfs_remove_file(&group->kobj, &attr->attr);
}

static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{
	return sprintf(buf, "%s\n", group->name);
}

E
Eric Auger 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/**
 * iommu_insert_resv_region - Insert a new region in the
 * list of reserved regions.
 * @new: new region to insert
 * @regions: list of regions
 *
 * The new element is sorted by address with respect to the other
 * regions of the same type. In case it overlaps with another
 * region of the same type, regions are merged. In case it
 * overlaps with another region of different type, regions are
 * not merged.
 */
static int iommu_insert_resv_region(struct iommu_resv_region *new,
				    struct list_head *regions)
{
	struct iommu_resv_region *region;
	phys_addr_t start = new->start;
	phys_addr_t end = new->start + new->length - 1;
	struct list_head *pos = regions->next;

	while (pos != regions) {
		struct iommu_resv_region *entry =
			list_entry(pos, struct iommu_resv_region, list);
		phys_addr_t a = entry->start;
		phys_addr_t b = entry->start + entry->length - 1;
		int type = entry->type;

		if (end < a) {
			goto insert;
		} else if (start > b) {
			pos = pos->next;
		} else if ((start >= a) && (end <= b)) {
			if (new->type == type)
				goto done;
			else
				pos = pos->next;
		} else {
			if (new->type == type) {
				phys_addr_t new_start = min(a, start);
				phys_addr_t new_end = max(b, end);

				list_del(&entry->list);
				entry->start = new_start;
				entry->length = new_end - new_start + 1;
				iommu_insert_resv_region(entry, regions);
			} else {
				pos = pos->next;
			}
		}
	}
insert:
	region = iommu_alloc_resv_region(new->start, new->length,
					 new->prot, new->type);
	if (!region)
		return -ENOMEM;

	list_add_tail(&region->list, pos);
done:
	return 0;
}

static int
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
				 struct list_head *group_resv_regions)
{
	struct iommu_resv_region *entry;
241
	int ret = 0;
E
Eric Auger 已提交
242 243 244 245 246 247 248 249 250 251 252 253

	list_for_each_entry(entry, dev_resv_regions, list) {
		ret = iommu_insert_resv_region(entry, group_resv_regions);
		if (ret)
			break;
	}
	return ret;
}

int iommu_get_group_resv_regions(struct iommu_group *group,
				 struct list_head *head)
{
254
	struct group_device *device;
E
Eric Auger 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	int ret = 0;

	mutex_lock(&group->mutex);
	list_for_each_entry(device, &group->devices, list) {
		struct list_head dev_resv_regions;

		INIT_LIST_HEAD(&dev_resv_regions);
		iommu_get_resv_regions(device->dev, &dev_resv_regions);
		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
		iommu_put_resv_regions(device->dev, &dev_resv_regions);
		if (ret)
			break;
	}
	mutex_unlock(&group->mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
					     char *buf)
{
	struct iommu_resv_region *region, *next;
	struct list_head group_resv_regions;
	char *str = buf;

	INIT_LIST_HEAD(&group_resv_regions);
	iommu_get_group_resv_regions(group, &group_resv_regions);

	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
			       (long long int)region->start,
			       (long long int)(region->start +
						region->length - 1),
			       iommu_group_resv_type_string[region->type]);
		kfree(region);
	}

	return (str - buf);
}

A
Alex Williamson 已提交
295 296
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);

297 298 299
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
			iommu_group_show_resv_regions, NULL);

A
Alex Williamson 已提交
300 301 302 303
static void iommu_group_release(struct kobject *kobj)
{
	struct iommu_group *group = to_iommu_group(kobj);

304 305
	pr_debug("Releasing group %d\n", group->id);

A
Alex Williamson 已提交
306 307 308
	if (group->iommu_data_release)
		group->iommu_data_release(group->iommu_data);

309
	ida_simple_remove(&iommu_group_ida, group->id);
A
Alex Williamson 已提交
310

311 312 313
	if (group->default_domain)
		iommu_domain_free(group->default_domain);

A
Alex Williamson 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	kfree(group->name);
	kfree(group);
}

static struct kobj_type iommu_group_ktype = {
	.sysfs_ops = &iommu_group_sysfs_ops,
	.release = iommu_group_release,
};

/**
 * iommu_group_alloc - Allocate a new group
 * @name: Optional name to associate with group, visible in sysfs
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
336
{
A
Alex Williamson 已提交
337 338 339 340 341 342 343 344 345 346 347 348
	struct iommu_group *group;
	int ret;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		return ERR_PTR(-ENOMEM);

	group->kobj.kset = iommu_group_kset;
	mutex_init(&group->mutex);
	INIT_LIST_HEAD(&group->devices);
	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);

349 350
	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
	if (ret < 0) {
A
Alex Williamson 已提交
351
		kfree(group);
352
		return ERR_PTR(ret);
A
Alex Williamson 已提交
353
	}
354
	group->id = ret;
355

A
Alex Williamson 已提交
356 357 358
	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
				   NULL, "%d", group->id);
	if (ret) {
359
		ida_simple_remove(&iommu_group_ida, group->id);
A
Alex Williamson 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
		kfree(group);
		return ERR_PTR(ret);
	}

	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
	if (!group->devices_kobj) {
		kobject_put(&group->kobj); /* triggers .release & free */
		return ERR_PTR(-ENOMEM);
	}

	/*
	 * The devices_kobj holds a reference on the group kobject, so
	 * as long as that exists so will the group.  We can therefore
	 * use the devices_kobj for reference counting.
	 */
	kobject_put(&group->kobj);

377 378 379 380 381
	ret = iommu_group_create_file(group,
				      &iommu_group_attr_reserved_regions);
	if (ret)
		return ERR_PTR(ret);

382 383
	pr_debug("Allocated group %d\n", group->id);

A
Alex Williamson 已提交
384 385 386 387
	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
struct iommu_group *iommu_group_get_by_id(int id)
{
	struct kobject *group_kobj;
	struct iommu_group *group;
	const char *name;

	if (!iommu_group_kset)
		return NULL;

	name = kasprintf(GFP_KERNEL, "%d", id);
	if (!name)
		return NULL;

	group_kobj = kset_find_obj(iommu_group_kset, name);
	kfree(name);

	if (!group_kobj)
		return NULL;

	group = container_of(group_kobj, struct iommu_group, kobj);
	BUG_ON(group->id != id);

	kobject_get(group->devices_kobj);
	kobject_put(&group->kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get_by_id);

A
Alex Williamson 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/**
 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 * @group: the group
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to retrieve it.  Caller
 * should hold a group reference.
 */
void *iommu_group_get_iommudata(struct iommu_group *group)
{
	return group->iommu_data;
}
EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);

/**
 * iommu_group_set_iommudata - set iommu_data for a group
 * @group: the group
 * @iommu_data: new data
 * @release: release function for iommu_data
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to set the data after
 * the group has been allocated.  Caller should hold a group reference.
 */
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
			       void (*release)(void *iommu_data))
443
{
A
Alex Williamson 已提交
444 445 446 447
	group->iommu_data = iommu_data;
	group->iommu_data_release = release;
}
EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
448

A
Alex Williamson 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
/**
 * iommu_group_set_name - set name for a group
 * @group: the group
 * @name: name
 *
 * Allow iommu driver to set a name for a group.  When set it will
 * appear in a name attribute file under the group in sysfs.
 */
int iommu_group_set_name(struct iommu_group *group, const char *name)
{
	int ret;

	if (group->name) {
		iommu_group_remove_file(group, &iommu_group_attr_name);
		kfree(group->name);
		group->name = NULL;
		if (!name)
			return 0;
	}

	group->name = kstrdup(name, GFP_KERNEL);
	if (!group->name)
		return -ENOMEM;

	ret = iommu_group_create_file(group, &iommu_group_attr_name);
	if (ret) {
		kfree(group->name);
		group->name = NULL;
		return ret;
	}
479 480 481

	return 0;
}
A
Alex Williamson 已提交
482
EXPORT_SYMBOL_GPL(iommu_group_set_name);
483

484 485 486 487
static int iommu_group_create_direct_mappings(struct iommu_group *group,
					      struct device *dev)
{
	struct iommu_domain *domain = group->default_domain;
488
	struct iommu_resv_region *entry;
489 490 491 492 493 494 495
	struct list_head mappings;
	unsigned long pg_size;
	int ret = 0;

	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
		return 0;

496
	BUG_ON(!domain->pgsize_bitmap);
497

498
	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
499 500
	INIT_LIST_HEAD(&mappings);

501
	iommu_get_resv_regions(dev, &mappings);
502 503 504 505 506

	/* We need to consider overlapping regions for different devices */
	list_for_each_entry(entry, &mappings, list) {
		dma_addr_t start, end, addr;

507 508
		if (domain->ops->apply_resv_region)
			domain->ops->apply_resv_region(dev, domain, entry);
509

510 511 512
		start = ALIGN(entry->start, pg_size);
		end   = ALIGN(entry->start + entry->length, pg_size);

513 514 515
		if (entry->type != IOMMU_RESV_DIRECT)
			continue;

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
		for (addr = start; addr < end; addr += pg_size) {
			phys_addr_t phys_addr;

			phys_addr = iommu_iova_to_phys(domain, addr);
			if (phys_addr)
				continue;

			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
			if (ret)
				goto out;
		}

	}

out:
531
	iommu_put_resv_regions(dev, &mappings);
532 533 534 535

	return ret;
}

A
Alex Williamson 已提交
536 537 538 539 540 541 542 543 544
/**
 * iommu_group_add_device - add a device to an iommu group
 * @group: the group into which to add the device (reference should be held)
 * @dev: the device
 *
 * This function is called by an iommu driver to add a device into a
 * group.  Adding a device increments the group reference count.
 */
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
545
{
A
Alex Williamson 已提交
546
	int ret, i = 0;
J
Joerg Roedel 已提交
547
	struct group_device *device;
A
Alex Williamson 已提交
548 549 550 551 552 553

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	device->dev = dev;
554

A
Alex Williamson 已提交
555
	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
556 557
	if (ret)
		goto err_free_device;
A
Alex Williamson 已提交
558 559 560 561

	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
	if (!device->name) {
562 563
		ret = -ENOMEM;
		goto err_remove_link;
A
Alex Williamson 已提交
564
	}
565

A
Alex Williamson 已提交
566 567 568 569 570 571 572 573
	ret = sysfs_create_link_nowarn(group->devices_kobj,
				       &dev->kobj, device->name);
	if (ret) {
		if (ret == -EEXIST && i >= 0) {
			/*
			 * Account for the slim chance of collision
			 * and append an instance to the name.
			 */
574
			kfree(device->name);
A
Alex Williamson 已提交
575 576 577 578
			device->name = kasprintf(GFP_KERNEL, "%s.%d",
						 kobject_name(&dev->kobj), i++);
			goto rename;
		}
579
		goto err_free_name;
A
Alex Williamson 已提交
580 581 582 583 584 585
	}

	kobject_get(group->devices_kobj);

	dev->iommu_group = group;

586 587
	iommu_group_create_direct_mappings(group, dev);

A
Alex Williamson 已提交
588 589
	mutex_lock(&group->mutex);
	list_add_tail(&device->list, &group->devices);
590
	if (group->domain)
591
		ret = __iommu_attach_device(group->domain, dev);
A
Alex Williamson 已提交
592
	mutex_unlock(&group->mutex);
593 594
	if (ret)
		goto err_put_group;
A
Alex Williamson 已提交
595 596 597 598

	/* Notify any listeners about change to group. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
599 600

	trace_add_device_to_group(group->id, dev);
601 602 603

	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);

604
	return 0;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619

err_put_group:
	mutex_lock(&group->mutex);
	list_del(&device->list);
	mutex_unlock(&group->mutex);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
err_free_name:
	kfree(device->name);
err_remove_link:
	sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
	kfree(device);
	pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
	return ret;
620
}
A
Alex Williamson 已提交
621
EXPORT_SYMBOL_GPL(iommu_group_add_device);
622

A
Alex Williamson 已提交
623 624 625 626 627 628 629 630 631 632
/**
 * iommu_group_remove_device - remove a device from it's current group
 * @dev: device to be removed
 *
 * This function is called by an iommu driver to remove the device from
 * it's current group.  This decrements the iommu group reference count.
 */
void iommu_group_remove_device(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;
J
Joerg Roedel 已提交
633
	struct group_device *tmp_device, *device = NULL;
A
Alex Williamson 已提交
634

635 636
	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);

A
Alex Williamson 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
	/* Pre-notify listeners that a device is being removed. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);

	mutex_lock(&group->mutex);
	list_for_each_entry(tmp_device, &group->devices, list) {
		if (tmp_device->dev == dev) {
			device = tmp_device;
			list_del(&device->list);
			break;
		}
	}
	mutex_unlock(&group->mutex);

	if (!device)
		return;

	sysfs_remove_link(group->devices_kobj, device->name);
	sysfs_remove_link(&dev->kobj, "iommu_group");

657 658
	trace_remove_device_from_group(group->id, dev);

A
Alex Williamson 已提交
659 660 661 662 663 664 665
	kfree(device->name);
	kfree(device);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);

666 667
static int iommu_group_device_count(struct iommu_group *group)
{
J
Joerg Roedel 已提交
668
	struct group_device *entry;
669 670 671 672 673 674 675 676
	int ret = 0;

	list_for_each_entry(entry, &group->devices, list)
		ret++;

	return ret;
}

A
Alex Williamson 已提交
677 678 679 680 681 682 683 684 685 686 687
/**
 * iommu_group_for_each_dev - iterate over each device in the group
 * @group: the group
 * @data: caller opaque data to be passed to callback function
 * @fn: caller supplied callback function
 *
 * This function is called by group users to iterate over group devices.
 * Callers should hold a reference count to the group during callback.
 * The group->mutex is held across callbacks, which will block calls to
 * iommu_group_add/remove_device.
 */
688 689
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
				      int (*fn)(struct device *, void *))
A
Alex Williamson 已提交
690
{
J
Joerg Roedel 已提交
691
	struct group_device *device;
A
Alex Williamson 已提交
692 693 694 695 696 697 698
	int ret = 0;

	list_for_each_entry(device, &group->devices, list) {
		ret = fn(device->dev, data);
		if (ret)
			break;
	}
699 700 701 702 703 704 705 706 707 708 709
	return ret;
}


int iommu_group_for_each_dev(struct iommu_group *group, void *data,
			     int (*fn)(struct device *, void *))
{
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_group_for_each_dev(group, data, fn);
A
Alex Williamson 已提交
710
	mutex_unlock(&group->mutex);
711

A
Alex Williamson 已提交
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);

/**
 * iommu_group_get - Return the group for a device and increment reference
 * @dev: get the group that this device belongs to
 *
 * This function is called by iommu drivers and users to get the group
 * for the specified device.  If found, the group is returned and the group
 * reference in incremented, else NULL.
 */
struct iommu_group *iommu_group_get(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;

	if (group)
		kobject_get(group->devices_kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get);

735 736 737 738 739 740 741 742 743 744 745 746 747
/**
 * iommu_group_ref_get - Increment reference on a group
 * @group: the group to use, must not be NULL
 *
 * This function is called by iommu drivers to take additional references on an
 * existing group.  Returns the given group for convenience.
 */
struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{
	kobject_get(group->devices_kobj);
	return group;
}

A
Alex Williamson 已提交
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
/**
 * iommu_group_put - Decrement group reference
 * @group: the group to use
 *
 * This function is called by iommu drivers and users to release the
 * iommu group.  Once the reference count is zero, the group is released.
 */
void iommu_group_put(struct iommu_group *group)
{
	if (group)
		kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_put);

/**
 * iommu_group_register_notifier - Register a notifier for group changes
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * This function allows iommu group users to track changes in a group.
 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 * should hold a reference to the group throughout notifier registration.
 */
int iommu_group_register_notifier(struct iommu_group *group,
				  struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_register_notifier);

/**
 * iommu_group_unregister_notifier - Unregister a notifier
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * Unregister a previously registered group notifier block.
 */
int iommu_group_unregister_notifier(struct iommu_group *group,
				    struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);

/**
 * iommu_group_id - Return ID for a group
 * @group: the group to ID
 *
 * Return the unique ID for the group matching the sysfs group number.
 */
int iommu_group_id(struct iommu_group *group)
{
	return group->id;
}
EXPORT_SYMBOL_GPL(iommu_group_id);
803

804 805 806
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns);

807 808 809 810 811 812 813 814 815 816
/*
 * To consider a PCI device isolated, we require ACS to support Source
 * Validation, Request Redirection, Completer Redirection, and Upstream
 * Forwarding.  This effectively means that devices cannot spoof their
 * requester ID, requests and completions cannot be redirected, and all
 * transactions are forwarded upstream, even as it passes through a
 * bridge where the target device is downstream.
 */
#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
/*
 * For multifunction devices which are not isolated from each other, find
 * all the other non-isolated functions and look for existing groups.  For
 * each function, we also need to look for aliases to or from other devices
 * that may already have a group.
 */
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
							unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
		return NULL;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus ||
		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
			continue;

		group = get_pci_alias_group(tmp, devfns);
		if (group) {
			pci_dev_put(tmp);
			return group;
		}
	}

	return NULL;
}

/*
849 850
 * Look for aliases to or from the given device for existing groups. DMA
 * aliases are only supported on the same bus, therefore the search
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
 * space is quite small (especially since we're really only looking at pcie
 * device, and therefore only expect multiple slots on the root complex or
 * downstream switch ports).  It's conceivable though that a pair of
 * multifunction devices could have aliases between them that would cause a
 * loop.  To prevent this, we use a bitmap to track where we've been.
 */
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
		return NULL;

	group = iommu_group_get(&pdev->dev);
	if (group)
		return group;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus)
			continue;

		/* We alias them or they alias us */
875
		if (pci_devs_are_dma_aliases(pdev, tmp)) {
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
			group = get_pci_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}

			group = get_pci_function_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}
		}
	}

	return NULL;
}

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
struct group_for_pci_data {
	struct pci_dev *pdev;
	struct iommu_group *group;
};

/*
 * DMA alias iterator callback, return the last seen device.  Stop and return
 * the IOMMU group if we find one along the way.
 */
static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct group_for_pci_data *data = opaque;

	data->pdev = pdev;
	data->group = iommu_group_get(&pdev->dev);

	return data->group != NULL;
}

912 913 914 915 916 917
/*
 * Generic device_group call-back function. It just allocates one
 * iommu-group per device.
 */
struct iommu_group *generic_device_group(struct device *dev)
{
918
	return iommu_group_alloc();
919 920
}

921 922 923 924
/*
 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 * to find or create an IOMMU group for a device.
 */
925
struct iommu_group *pci_device_group(struct device *dev)
926
{
927
	struct pci_dev *pdev = to_pci_dev(dev);
928 929 930
	struct group_for_pci_data data;
	struct pci_bus *bus;
	struct iommu_group *group = NULL;
931
	u64 devfns[4] = { 0 };
932

933 934 935
	if (WARN_ON(!dev_is_pci(dev)))
		return ERR_PTR(-EINVAL);

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
	/*
	 * Find the upstream DMA alias for the device.  A device must not
	 * be aliased due to topology in order to have its own IOMMU group.
	 * If we find an alias along the way that already belongs to a
	 * group, use it.
	 */
	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
		return data.group;

	pdev = data.pdev;

	/*
	 * Continue upstream from the point of minimum IOMMU granularity
	 * due to aliases to the point where devices are protected from
	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
	 * group, use it.
	 */
	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
		if (!bus->self)
			continue;

		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
			break;

		pdev = bus->self;

		group = iommu_group_get(&pdev->dev);
		if (group)
			return group;
	}

	/*
968 969
	 * Look for existing groups on device aliases.  If we alias another
	 * device or another device aliases us, use the same group.
970
	 */
971 972 973
	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
974 975

	/*
976 977 978
	 * Look for existing groups on non-isolated functions on the same
	 * slot and aliases of those funcions, if any.  No need to clear
	 * the search bitmap, the tested devfns are still valid.
979
	 */
980 981 982
	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
983 984

	/* No shared group found, allocate new */
985
	return iommu_group_alloc();
986 987 988 989 990 991 992 993 994 995 996 997 998 999
}

/**
 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 * @dev: target device
 *
 * This function is intended to be called by IOMMU drivers and extended to
 * support common, bus-defined algorithms when determining or creating the
 * IOMMU group for a device.  On success, the caller will hold a reference
 * to the returned IOMMU group, which will already include the provided
 * device.  The reference should be released with iommu_group_put().
 */
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
1000
	const struct iommu_ops *ops = dev->bus->iommu_ops;
1001
	struct iommu_group *group;
1002 1003 1004 1005 1006 1007
	int ret;

	group = iommu_group_get(dev);
	if (group)
		return group;

1008 1009
	if (!ops)
		return ERR_PTR(-EINVAL);
1010

1011
	group = ops->device_group(dev);
1012 1013 1014
	if (WARN_ON_ONCE(group == NULL))
		return ERR_PTR(-EINVAL);

1015 1016 1017
	if (IS_ERR(group))
		return group;

1018 1019 1020 1021 1022
	/*
	 * Try to allocate a default domain - needs support from the
	 * IOMMU driver.
	 */
	if (!group->default_domain) {
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		struct iommu_domain *dom;

		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
			dev_warn(dev,
				 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
				 iommu_def_domain_type);
			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
		}

		group->default_domain = dom;
1034
		if (!group->domain)
1035
			group->domain = dom;
1036 1037
	}

1038 1039 1040 1041 1042 1043 1044 1045 1046
	ret = iommu_group_add_device(group, dev);
	if (ret) {
		iommu_group_put(group);
		return ERR_PTR(ret);
	}

	return group;
}

1047 1048 1049 1050 1051
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
	return group->default_domain;
}

A
Alex Williamson 已提交
1052
static int add_iommu_group(struct device *dev, void *data)
1053
{
1054 1055
	struct iommu_callback_data *cb = data;
	const struct iommu_ops *ops = cb->ops;
1056
	int ret;
A
Alex Williamson 已提交
1057 1058

	if (!ops->add_device)
1059
		return 0;
1060

A
Alex Williamson 已提交
1061 1062
	WARN_ON(dev->iommu_group);

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	ret = ops->add_device(dev);

	/*
	 * We ignore -ENODEV errors for now, as they just mean that the
	 * device is not translated by an IOMMU. We still care about
	 * other errors and fail to initialize when they happen.
	 */
	if (ret == -ENODEV)
		ret = 0;

	return ret;
1074 1075
}

1076 1077 1078 1079 1080 1081 1082
static int remove_iommu_group(struct device *dev, void *data)
{
	struct iommu_callback_data *cb = data;
	const struct iommu_ops *ops = cb->ops;

	if (ops->remove_device && dev->iommu_group)
		ops->remove_device(dev);
1083 1084 1085 1086

	return 0;
}

A
Alex Williamson 已提交
1087 1088
static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data)
1089 1090
{
	struct device *dev = data;
1091
	const struct iommu_ops *ops = dev->bus->iommu_ops;
A
Alex Williamson 已提交
1092 1093 1094 1095 1096 1097 1098 1099
	struct iommu_group *group;
	unsigned long group_action = 0;

	/*
	 * ADD/DEL call into iommu driver ops if provided, which may
	 * result in ADD/DEL notifiers to group->notifier
	 */
	if (action == BUS_NOTIFY_ADD_DEVICE) {
1100 1101 1102 1103 1104 1105
		if (ops->add_device) {
			int ret;

			ret = ops->add_device(dev);
			return (ret) ? NOTIFY_DONE : NOTIFY_OK;
		}
1106
	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
A
Alex Williamson 已提交
1107 1108 1109 1110 1111
		if (ops->remove_device && dev->iommu_group) {
			ops->remove_device(dev);
			return 0;
		}
	}
1112

A
Alex Williamson 已提交
1113 1114 1115 1116 1117 1118 1119
	/*
	 * Remaining BUS_NOTIFYs get filtered and republished to the
	 * group, if anyone is listening
	 */
	group = iommu_group_get(dev);
	if (!group)
		return 0;
1120

A
Alex Williamson 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	switch (action) {
	case BUS_NOTIFY_BIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
		break;
	case BUS_NOTIFY_BOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
		break;
	case BUS_NOTIFY_UNBIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
		break;
	case BUS_NOTIFY_UNBOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
		break;
	}
1135

A
Alex Williamson 已提交
1136 1137 1138
	if (group_action)
		blocking_notifier_call_chain(&group->notifier,
					     group_action, dev);
1139

A
Alex Williamson 已提交
1140
	iommu_group_put(group);
1141 1142 1143
	return 0;
}

M
Mark Salter 已提交
1144
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1145
{
M
Mark Salter 已提交
1146 1147
	int err;
	struct notifier_block *nb;
1148 1149 1150 1151
	struct iommu_callback_data cb = {
		.ops = ops,
	};

M
Mark Salter 已提交
1152 1153 1154 1155 1156 1157 1158
	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
	if (!nb)
		return -ENOMEM;

	nb->notifier_call = iommu_bus_notifier;

	err = bus_register_notifier(bus, nb);
1159 1160
	if (err)
		goto out_free;
1161 1162

	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
1163 1164 1165
	if (err)
		goto out_err;

1166 1167

	return 0;
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177

out_err:
	/* Clean up */
	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
	bus_unregister_notifier(bus, nb);

out_free:
	kfree(nb);

	return err;
1178
}
1179

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
/**
 * bus_set_iommu - set iommu-callbacks for the bus
 * @bus: bus.
 * @ops: the callbacks provided by the iommu-driver
 *
 * This function is called by an iommu driver to set the iommu methods
 * used for a particular bus. Drivers for devices on that bus can use
 * the iommu-api after these ops are registered.
 * This special function is needed because IOMMUs are usually devices on
 * the bus itself, so the iommu drivers are not initialized when the bus
 * is set up. With this function the iommu-driver can set the iommu-ops
 * afterwards.
 */
1193
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1194
{
1195 1196
	int err;

1197 1198
	if (bus->iommu_ops != NULL)
		return -EBUSY;
1199

1200 1201 1202
	bus->iommu_ops = ops;

	/* Do IOMMU specific setup for this bus-type */
1203 1204 1205 1206 1207
	err = iommu_bus_init(bus, ops);
	if (err)
		bus->iommu_ops = NULL;

	return err;
1208
}
1209
EXPORT_SYMBOL_GPL(bus_set_iommu);
1210

1211
bool iommu_present(struct bus_type *bus)
1212
{
1213
	return bus->iommu_ops != NULL;
1214
}
1215
EXPORT_SYMBOL_GPL(iommu_present);
1216

1217 1218 1219 1220 1221 1222 1223 1224 1225
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
	if (!bus->iommu_ops || !bus->iommu_ops->capable)
		return false;

	return bus->iommu_ops->capable(cap);
}
EXPORT_SYMBOL_GPL(iommu_capable);

1226 1227 1228 1229
/**
 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 * @domain: iommu domain
 * @handler: fault handler
1230
 * @token: user data, will be passed back to the fault handler
1231 1232 1233 1234 1235 1236
 *
 * This function should be used by IOMMU users which want to be notified
 * whenever an IOMMU fault happens.
 *
 * The fault handler itself should return 0 on success, and an appropriate
 * error code otherwise.
1237 1238
 */
void iommu_set_fault_handler(struct iommu_domain *domain,
1239 1240
					iommu_fault_handler_t handler,
					void *token)
1241 1242 1243 1244
{
	BUG_ON(!domain);

	domain->handler = handler;
1245
	domain->handler_token = token;
1246
}
1247
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1248

1249 1250
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type)
1251 1252 1253
{
	struct iommu_domain *domain;

1254
	if (bus == NULL || bus->iommu_ops == NULL)
1255 1256
		return NULL;

1257
	domain = bus->iommu_ops->domain_alloc(type);
1258 1259 1260
	if (!domain)
		return NULL;

1261
	domain->ops  = bus->iommu_ops;
1262
	domain->type = type;
1263 1264
	/* Assume all sizes by default; the driver may override this later */
	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1265

1266 1267 1268
	return domain;
}

1269 1270 1271
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1272 1273 1274 1275 1276
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);

void iommu_domain_free(struct iommu_domain *domain)
{
1277
	domain->ops->domain_free(domain);
1278 1279 1280
}
EXPORT_SYMBOL_GPL(iommu_domain_free);

1281 1282
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev)
1283
{
1284
	int ret;
1285 1286 1287
	if (unlikely(domain->ops->attach_dev == NULL))
		return -ENODEV;

1288 1289 1290 1291
	ret = domain->ops->attach_dev(domain, dev);
	if (!ret)
		trace_attach_device_to_domain(dev);
	return ret;
1292
}
1293 1294 1295 1296 1297 1298 1299 1300

int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;
	int ret;

	group = iommu_group_get(dev);
	/*
1301
	 * Lock the group to make sure the device-count doesn't
1302 1303 1304 1305 1306 1307 1308
	 * change while we are attaching
	 */
	mutex_lock(&group->mutex);
	ret = -EINVAL;
	if (iommu_group_device_count(group) != 1)
		goto out_unlock;

1309
	ret = __iommu_attach_group(domain, group);
1310 1311 1312 1313 1314 1315 1316

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);

	return ret;
}
1317 1318
EXPORT_SYMBOL_GPL(iommu_attach_device);

1319 1320
static void __iommu_detach_device(struct iommu_domain *domain,
				  struct device *dev)
1321
{
1322 1323 1324 1325
	if (unlikely(domain->ops->detach_dev == NULL))
		return;

	domain->ops->detach_dev(domain, dev);
1326
	trace_detach_device_from_domain(dev);
1327
}
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340

void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;

	group = iommu_group_get(dev);

	mutex_lock(&group->mutex);
	if (iommu_group_device_count(group) != 1) {
		WARN_ON(1);
		goto out_unlock;
	}

1341
	__iommu_detach_group(domain, group);
1342 1343 1344 1345 1346

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);
}
1347 1348
EXPORT_SYMBOL_GPL(iommu_detach_device);

1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{
	struct iommu_domain *domain;
	struct iommu_group *group;

	group = iommu_group_get(dev);

	domain = group->domain;

	iommu_group_put(group);

	return domain;
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1363

A
Alex Williamson 已提交
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
/*
 * IOMMU groups are really the natrual working unit of the IOMMU, but
 * the IOMMU API works on domains and devices.  Bridge that gap by
 * iterating over the devices in a group.  Ideally we'd have a single
 * device which represents the requestor ID of the group, but we also
 * allow IOMMU drivers to create policy defined minimum sets, where
 * the physical hardware may be able to distiguish members, but we
 * wish to group them at a higher level (ex. untrusted multi-function
 * PCI devices).  Thus we attach each device.
 */
static int iommu_group_do_attach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

1378
	return __iommu_attach_device(domain, dev);
A
Alex Williamson 已提交
1379 1380
}

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group)
{
	int ret;

	if (group->default_domain && group->domain != group->default_domain)
		return -EBUSY;

	ret = __iommu_group_for_each_dev(group, domain,
					 iommu_group_do_attach_device);
	if (ret == 0)
		group->domain = domain;

	return ret;
A
Alex Williamson 已提交
1395 1396 1397 1398
}

int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
1399 1400 1401 1402 1403 1404 1405
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_attach_group(domain, group);
	mutex_unlock(&group->mutex);

	return ret;
A
Alex Williamson 已提交
1406 1407 1408 1409 1410 1411 1412
}
EXPORT_SYMBOL_GPL(iommu_attach_group);

static int iommu_group_do_detach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

1413
	__iommu_detach_device(domain, dev);
A
Alex Williamson 已提交
1414 1415 1416 1417

	return 0;
}

1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group)
{
	int ret;

	if (!group->default_domain) {
		__iommu_group_for_each_dev(group, domain,
					   iommu_group_do_detach_device);
		group->domain = NULL;
		return;
	}

	if (group->domain == group->default_domain)
		return;

	/* Detach by re-attaching to the default domain */
	ret = __iommu_group_for_each_dev(group, group->default_domain,
					 iommu_group_do_attach_device);
	if (ret != 0)
		WARN_ON(1);
	else
		group->domain = group->default_domain;
}

A
Alex Williamson 已提交
1442 1443
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
1444 1445 1446
	mutex_lock(&group->mutex);
	__iommu_detach_group(domain, group);
	mutex_unlock(&group->mutex);
A
Alex Williamson 已提交
1447 1448 1449
}
EXPORT_SYMBOL_GPL(iommu_detach_group);

1450
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1451
{
1452 1453 1454 1455
	if (unlikely(domain->ops->iova_to_phys == NULL))
		return 0;

	return domain->ops->iova_to_phys(domain, iova);
1456 1457
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
S
Sheng Yang 已提交
1458

A
Alex Williamson 已提交
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
static size_t iommu_pgsize(struct iommu_domain *domain,
			   unsigned long addr_merge, size_t size)
{
	unsigned int pgsize_idx;
	size_t pgsize;

	/* Max page size that still fits into 'size' */
	pgsize_idx = __fls(size);

	/* need to consider alignment requirements ? */
	if (likely(addr_merge)) {
		/* Max page size allowed by address */
		unsigned int align_pgsize_idx = __ffs(addr_merge);
		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
	}

	/* build a mask of acceptable page sizes */
	pgsize = (1UL << (pgsize_idx + 1)) - 1;

	/* throw away page sizes not supported by the hardware */
1479
	pgsize &= domain->pgsize_bitmap;
A
Alex Williamson 已提交
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

	/* make sure we're still sane */
	BUG_ON(!pgsize);

	/* pick the biggest page */
	pgsize_idx = __fls(pgsize);
	pgsize = 1UL << pgsize_idx;

	return pgsize;
}

1491
int iommu_map(struct iommu_domain *domain, unsigned long iova,
1492
	      phys_addr_t paddr, size_t size, int prot)
1493
{
1494 1495 1496
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
1497
	phys_addr_t orig_paddr = paddr;
1498
	int ret = 0;
1499

1500
	if (unlikely(domain->ops->map == NULL ||
1501
		     domain->pgsize_bitmap == 0UL))
1502
		return -ENODEV;
1503

1504 1505 1506
	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
		return -EINVAL;

1507
	/* find out the minimum page size supported */
1508
	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1509 1510 1511 1512 1513 1514 1515

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1516
		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1517
		       iova, &paddr, size, min_pagesz);
1518 1519 1520
		return -EINVAL;
	}

1521
	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1522 1523

	while (size) {
A
Alex Williamson 已提交
1524
		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1525

1526
		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1527
			 iova, &paddr, pgsize);
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);
1541
	else
1542
		trace_map(orig_iova, orig_paddr, orig_size);
1543 1544

	return ret;
1545 1546 1547
}
EXPORT_SYMBOL_GPL(iommu_map);

1548
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1549
{
1550 1551
	size_t unmapped_page, unmapped = 0;
	unsigned int min_pagesz;
1552
	unsigned long orig_iova = iova;
1553

1554
	if (unlikely(domain->ops->unmap == NULL ||
1555
		     domain->pgsize_bitmap == 0UL))
1556 1557
		return -ENODEV;

1558 1559 1560
	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
		return -EINVAL;

1561
	/* find out the minimum page size supported */
1562
	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1563 1564 1565 1566 1567 1568 1569

	/*
	 * The virtual address, as well as the size of the mapping, must be
	 * aligned (at least) to the size of the smallest page supported
	 * by the hardware
	 */
	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1570 1571
		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
		       iova, size, min_pagesz);
1572 1573 1574
		return -EINVAL;
	}

1575
	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1576 1577 1578 1579 1580 1581

	/*
	 * Keep iterating until we either unmap 'size' bytes (or more)
	 * or we hit an area that isn't mapped.
	 */
	while (unmapped < size) {
A
Alex Williamson 已提交
1582
		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1583

A
Alex Williamson 已提交
1584
		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1585 1586 1587
		if (!unmapped_page)
			break;

1588 1589
		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
			 iova, unmapped_page);
1590 1591 1592 1593 1594

		iova += unmapped_page;
		unmapped += unmapped_page;
	}

1595
	trace_unmap(orig_iova, size, unmapped);
1596
	return unmapped;
1597 1598
}
EXPORT_SYMBOL_GPL(iommu_unmap);
1599

O
Olav Haugan 已提交
1600 1601 1602
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
			 struct scatterlist *sg, unsigned int nents, int prot)
{
1603
	struct scatterlist *s;
O
Olav Haugan 已提交
1604
	size_t mapped = 0;
1605
	unsigned int i, min_pagesz;
1606
	int ret;
O
Olav Haugan 已提交
1607

1608
	if (unlikely(domain->pgsize_bitmap == 0UL))
1609
		return 0;
O
Olav Haugan 已提交
1610

1611
	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1612 1613

	for_each_sg(sg, s, nents, i) {
D
Dan Williams 已提交
1614
		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1615 1616 1617 1618 1619 1620 1621 1622

		/*
		 * We are mapping on IOMMU page boundaries, so offset within
		 * the page must be 0. However, the IOMMU may support pages
		 * smaller than PAGE_SIZE, so s->offset may still represent
		 * an offset of that boundary within the CPU page.
		 */
		if (!IS_ALIGNED(s->offset, min_pagesz))
1623 1624 1625 1626 1627 1628 1629
			goto out_err;

		ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
		if (ret)
			goto out_err;

		mapped += s->length;
O
Olav Haugan 已提交
1630 1631 1632
	}

	return mapped;
1633 1634 1635 1636 1637 1638 1639

out_err:
	/* undo mappings already done */
	iommu_unmap(domain, iova, mapped);

	return 0;

O
Olav Haugan 已提交
1640 1641
}
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1642 1643

int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1644
			       phys_addr_t paddr, u64 size, int prot)
1645 1646 1647 1648
{
	if (unlikely(domain->ops->domain_window_enable == NULL))
		return -ENODEV;

1649 1650
	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
						 prot);
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
}
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);

void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
{
	if (unlikely(domain->ops->domain_window_disable == NULL))
		return;

	return domain->ops->domain_window_disable(domain, wnd_nr);
}
EXPORT_SYMBOL_GPL(iommu_domain_window_disable);

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
/**
 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
 * @domain: the iommu domain where the fault has happened
 * @dev: the device where the fault has happened
 * @iova: the faulting address
 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
 *
 * This function should be called by the low-level IOMMU implementations
 * whenever IOMMU faults happen, to allow high-level users, that are
 * interested in such events, to know about them.
 *
 * This event may be useful for several possible use cases:
 * - mere logging of the event
 * - dynamic TLB/PTE loading
 * - if restarting of the faulting device is required
 *
 * Returns 0 on success and an appropriate error code otherwise (if dynamic
 * PTE/TLB loading will one day be supported, implementations will be able
 * to tell whether it succeeded or not according to this return value).
 *
 * Specifically, -ENOSYS is returned if a fault handler isn't installed
 * (though fault handlers can also return -ENOSYS, in case they want to
 * elicit the default behavior of the IOMMU drivers).
 */
int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
		       unsigned long iova, int flags)
{
	int ret = -ENOSYS;

	/*
	 * if upper layers showed interest and installed a fault handler,
	 * invoke it.
	 */
	if (domain->handler)
		ret = domain->handler(domain, dev, iova, flags,
						domain->handler_token);

	trace_io_page_fault(dev, iova, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(report_iommu_fault);

A
Alex Williamson 已提交
1705
static int __init iommu_init(void)
1706
{
A
Alex Williamson 已提交
1707 1708 1709 1710 1711
	iommu_group_kset = kset_create_and_add("iommu_groups",
					       NULL, kernel_kobj);
	BUG_ON(!iommu_group_kset);

	return 0;
1712
}
1713
core_initcall(iommu_init);
1714 1715 1716 1717

int iommu_domain_get_attr(struct iommu_domain *domain,
			  enum iommu_attr attr, void *data)
{
1718
	struct iommu_domain_geometry *geometry;
1719
	bool *paging;
1720
	int ret = 0;
1721
	u32 *count;
1722 1723 1724 1725 1726 1727

	switch (attr) {
	case DOMAIN_ATTR_GEOMETRY:
		geometry  = data;
		*geometry = domain->geometry;

1728 1729 1730
		break;
	case DOMAIN_ATTR_PAGING:
		paging  = data;
1731
		*paging = (domain->pgsize_bitmap != 0UL);
1732 1733 1734 1735 1736 1737 1738 1739 1740
		break;
	case DOMAIN_ATTR_WINDOWS:
		count = data;

		if (domain->ops->domain_get_windows != NULL)
			*count = domain->ops->domain_get_windows(domain);
		else
			ret = -ENODEV;

1741 1742 1743 1744
		break;
	default:
		if (!domain->ops->domain_get_attr)
			return -EINVAL;
1745

1746 1747 1748 1749
		ret = domain->ops->domain_get_attr(domain, attr, data);
	}

	return ret;
1750 1751 1752 1753 1754 1755
}
EXPORT_SYMBOL_GPL(iommu_domain_get_attr);

int iommu_domain_set_attr(struct iommu_domain *domain,
			  enum iommu_attr attr, void *data)
{
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
	int ret = 0;
	u32 *count;

	switch (attr) {
	case DOMAIN_ATTR_WINDOWS:
		count = data;

		if (domain->ops->domain_set_windows != NULL)
			ret = domain->ops->domain_set_windows(domain, *count);
		else
			ret = -ENODEV;
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776
		break;
	default:
		if (domain->ops->domain_set_attr == NULL)
			return -EINVAL;

		ret = domain->ops->domain_set_attr(domain, attr, data);
	}

	return ret;
1777
}
1778
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1779

1780
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1781 1782 1783
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;

1784 1785
	if (ops && ops->get_resv_regions)
		ops->get_resv_regions(dev, list);
1786 1787
}

1788
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1789 1790 1791
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;

1792 1793
	if (ops && ops->put_resv_regions)
		ops->put_resv_regions(dev, list);
1794
}
1795

E
Eric Auger 已提交
1796
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1797 1798
						  size_t length, int prot,
						  enum iommu_resv_type type)
E
Eric Auger 已提交
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
{
	struct iommu_resv_region *region;

	region = kzalloc(sizeof(*region), GFP_KERNEL);
	if (!region)
		return NULL;

	INIT_LIST_HEAD(&region->list);
	region->start = start;
	region->length = length;
	region->prot = prot;
	region->type = type;
	return region;
1812
}
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822

/* Request that a device is direct mapped by the IOMMU */
int iommu_request_dm_for_dev(struct device *dev)
{
	struct iommu_domain *dm_domain;
	struct iommu_group *group;
	int ret;

	/* Device must already be in a group before calling this function */
	group = iommu_group_get_for_dev(dev);
1823 1824
	if (IS_ERR(group))
		return PTR_ERR(group);
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865

	mutex_lock(&group->mutex);

	/* Check if the default domain is already direct mapped */
	ret = 0;
	if (group->default_domain &&
	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
		goto out;

	/* Don't change mappings of existing devices */
	ret = -EBUSY;
	if (iommu_group_device_count(group) != 1)
		goto out;

	/* Allocate a direct mapped domain */
	ret = -ENOMEM;
	dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
	if (!dm_domain)
		goto out;

	/* Attach the device to the domain */
	ret = __iommu_attach_group(dm_domain, group);
	if (ret) {
		iommu_domain_free(dm_domain);
		goto out;
	}

	/* Make the direct mapped domain the default for this group */
	if (group->default_domain)
		iommu_domain_free(group->default_domain);
	group->default_domain = dm_domain;

	pr_info("Using direct mapping for device %s\n", dev_name(dev));

	ret = 0;
out:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);

	return ret;
}
R
Robin Murphy 已提交
1866

1867
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1868 1869
{
	const struct iommu_ops *ops = NULL;
1870
	struct iommu_device *iommu;
1871

1872 1873 1874 1875
	spin_lock(&iommu_device_lock);
	list_for_each_entry(iommu, &iommu_device_list, list)
		if (iommu->fwnode == fwnode) {
			ops = iommu->ops;
1876 1877
			break;
		}
1878
	spin_unlock(&iommu_device_lock);
1879 1880 1881
	return ops;
}

R
Robin Murphy 已提交
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
		      const struct iommu_ops *ops)
{
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;

	if (fwspec)
		return ops == fwspec->ops ? 0 : -EINVAL;

	fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
	if (!fwspec)
		return -ENOMEM;

	of_node_get(to_of_node(iommu_fwnode));
	fwspec->iommu_fwnode = iommu_fwnode;
	fwspec->ops = ops;
	dev->iommu_fwspec = fwspec;
	return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_init);

void iommu_fwspec_free(struct device *dev)
{
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;

	if (fwspec) {
		fwnode_handle_put(fwspec->iommu_fwnode);
		kfree(fwspec);
		dev->iommu_fwspec = NULL;
	}
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);

int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
{
	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
	size_t size;
	int i;

	if (!fwspec)
		return -EINVAL;

	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
	if (size > sizeof(*fwspec)) {
		fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
		if (!fwspec)
			return -ENOMEM;
1928 1929

		dev->iommu_fwspec = fwspec;
R
Robin Murphy 已提交
1930 1931 1932 1933 1934 1935 1936 1937 1938
	}

	for (i = 0; i < num_ids; i++)
		fwspec->ids[fwspec->num_ids + i] = ids[i];

	fwspec->num_ids += num_ids;
	return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);