iommu.c 83.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
J
Joerg Roedel 已提交
4
 * Author: Joerg Roedel <jroedel@suse.de>
5 6
 */

7
#define pr_fmt(fmt)    "iommu: " fmt
8

9
#include <linux/device.h>
10
#include <linux/kernel.h>
11
#include <linux/bits.h>
12 13
#include <linux/bug.h>
#include <linux/types.h>
14 15
#include <linux/init.h>
#include <linux/export.h>
16
#include <linux/slab.h>
17 18
#include <linux/errno.h>
#include <linux/iommu.h>
A
Alex Williamson 已提交
19 20 21
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
22
#include <linux/pci.h>
23
#include <linux/bitops.h>
R
Robin Murphy 已提交
24
#include <linux/property.h>
25
#include <linux/fsl/mc.h>
26
#include <linux/module.h>
27
#include <trace/events/iommu.h>
A
Alex Williamson 已提交
28 29

static struct kset *iommu_group_kset;
H
Heiner Kallweit 已提交
30
static DEFINE_IDA(iommu_group_ida);
31 32

static unsigned int iommu_def_domain_type __read_mostly;
33
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_STRICT);
34
static u32 iommu_cmd_line __read_mostly;
A
Alex Williamson 已提交
35 36 37 38 39 40 41 42 43 44 45

struct iommu_group {
	struct kobject kobj;
	struct kobject *devices_kobj;
	struct list_head devices;
	struct mutex mutex;
	struct blocking_notifier_head notifier;
	void *iommu_data;
	void (*iommu_data_release)(void *iommu_data);
	char *name;
	int id;
46
	struct iommu_domain *default_domain;
47
	struct iommu_domain *domain;
48
	struct list_head entry;
A
Alex Williamson 已提交
49 50
};

J
Joerg Roedel 已提交
51
struct group_device {
A
Alex Williamson 已提交
52 53 54 55 56 57 58 59 60 61 62 63
	struct list_head list;
	struct device *dev;
	char *name;
};

struct iommu_group_attribute {
	struct attribute attr;
	ssize_t (*show)(struct iommu_group *group, char *buf);
	ssize_t (*store)(struct iommu_group *group,
			 const char *buf, size_t count);
};

64
static const char * const iommu_group_resv_type_string[] = {
65 66 67 68 69
	[IOMMU_RESV_DIRECT]			= "direct",
	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
	[IOMMU_RESV_RESERVED]			= "reserved",
	[IOMMU_RESV_MSI]			= "msi",
	[IOMMU_RESV_SW_MSI]			= "msi",
70 71
};

72
#define IOMMU_CMD_LINE_DMA_API		BIT(0)
73
#define IOMMU_CMD_LINE_STRICT		BIT(1)
74

75 76
static int iommu_alloc_default_domain(struct iommu_group *group,
				      struct device *dev);
77 78 79 80 81 82 83 84
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group);
85 86
static int iommu_create_device_direct_mappings(struct iommu_group *group,
					       struct device *dev);
87
static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
88 89
static ssize_t iommu_group_store_type(struct iommu_group *group,
				      const char *buf, size_t count);
90

A
Alex Williamson 已提交
91 92 93
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
struct iommu_group_attribute iommu_group_attr_##_name =		\
	__ATTR(_name, _mode, _show, _store)
94

A
Alex Williamson 已提交
95 96 97 98
#define to_iommu_group_attr(_attr)	\
	container_of(_attr, struct iommu_group_attribute, attr)
#define to_iommu_group(_kobj)		\
	container_of(_kobj, struct iommu_group, kobj)
99

100 101 102
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
/*
 * Use a function instead of an array here because the domain-type is a
 * bit-field, so an array would waste memory.
 */
static const char *iommu_domain_type_str(unsigned int t)
{
	switch (t) {
	case IOMMU_DOMAIN_BLOCKED:
		return "Blocked";
	case IOMMU_DOMAIN_IDENTITY:
		return "Passthrough";
	case IOMMU_DOMAIN_UNMANAGED:
		return "Unmanaged";
	case IOMMU_DOMAIN_DMA:
		return "Translated";
	default:
		return "Unknown";
	}
}

static int __init iommu_subsys_init(void)
{
125
	if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
126 127 128 129
		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
			iommu_set_default_passthrough(false);
		else
			iommu_set_default_translated(false);
130

131 132
		if (iommu_default_passthrough() && mem_encrypt_active()) {
			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
133 134
			iommu_set_default_translated(false);
		}
135 136 137 138
	}

	pr_info("Default domain type: %s %s\n",
		iommu_domain_type_str(iommu_def_domain_type),
139 140
		(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
			"(set via kernel command line)" : "");
141

142 143 144 145 146
	pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
		iommu_dma_strict ? "strict" : "lazy",
		(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
			"(set via kernel command line)" : "");

147 148 149 150
	return 0;
}
subsys_initcall(iommu_subsys_init);

151 152 153 154 155 156 157 158 159 160
/**
 * iommu_device_register() - Register an IOMMU hardware instance
 * @iommu: IOMMU handle for the instance
 * @ops:   IOMMU ops to associate with the instance
 * @hwdev: (optional) actual instance device, used for fwnode lookup
 *
 * Return: 0 on success, or an error.
 */
int iommu_device_register(struct iommu_device *iommu,
			  const struct iommu_ops *ops, struct device *hwdev)
161
{
162 163 164 165 166 167 168 169
	/* We need to be able to take module references appropriately */
	if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
		return -EINVAL;

	iommu->ops = ops;
	if (hwdev)
		iommu->fwnode = hwdev->fwnode;

170 171 172 173 174
	spin_lock(&iommu_device_lock);
	list_add_tail(&iommu->list, &iommu_device_list);
	spin_unlock(&iommu_device_lock);
	return 0;
}
175
EXPORT_SYMBOL_GPL(iommu_device_register);
176 177 178 179 180 181 182

void iommu_device_unregister(struct iommu_device *iommu)
{
	spin_lock(&iommu_device_lock);
	list_del(&iommu->list);
	spin_unlock(&iommu_device_lock);
}
183
EXPORT_SYMBOL_GPL(iommu_device_unregister);
184

185
static struct dev_iommu *dev_iommu_get(struct device *dev)
186
{
187
	struct dev_iommu *param = dev->iommu;
188 189 190 191 192 193 194 195 196

	if (param)
		return param;

	param = kzalloc(sizeof(*param), GFP_KERNEL);
	if (!param)
		return NULL;

	mutex_init(&param->lock);
197
	dev->iommu = param;
198 199 200
	return param;
}

201
static void dev_iommu_free(struct device *dev)
202
{
203
	iommu_fwspec_free(dev);
204 205
	kfree(dev->iommu);
	dev->iommu = NULL;
206 207
}

208
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
209 210
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;
211 212
	struct iommu_device *iommu_dev;
	struct iommu_group *group;
213
	int ret;
214

215
	if (!ops)
216
		return -ENODEV;
217

218
	if (!dev_iommu_get(dev))
219
		return -ENOMEM;
220

221 222
	if (!try_module_get(ops->owner)) {
		ret = -EINVAL;
223
		goto err_free;
224 225
	}

226
	iommu_dev = ops->probe_device(dev);
227 228 229 230
	if (IS_ERR(iommu_dev)) {
		ret = PTR_ERR(iommu_dev);
		goto out_module_put;
	}
231 232 233 234

	dev->iommu->iommu_dev = iommu_dev;

	group = iommu_group_get_for_dev(dev);
235
	if (IS_ERR(group)) {
236 237 238 239 240
		ret = PTR_ERR(group);
		goto out_release;
	}
	iommu_group_put(group);

241 242 243
	if (group_list && !group->default_domain && list_empty(&group->entry))
		list_add_tail(&group->entry, group_list);

244
	iommu_device_link(iommu_dev, dev);
245 246

	return 0;
247

248 249 250
out_release:
	ops->release_device(dev);

251
out_module_put:
252
	module_put(ops->owner);
253 254

err_free:
255
	dev_iommu_free(dev);
256

257
	return ret;
258 259
}

260
int iommu_probe_device(struct device *dev)
261 262
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;
263 264
	struct iommu_group *group;
	int ret;
265

266 267 268 269
	ret = __iommu_probe_device(dev, NULL);
	if (ret)
		goto err_out;

270
	group = iommu_group_get(dev);
271 272
	if (!group) {
		ret = -ENODEV;
273
		goto err_release;
274
	}
275

276 277 278 279 280 281
	/*
	 * Try to allocate a default domain - needs support from the
	 * IOMMU driver. There are still some drivers which don't
	 * support default domains, so the return value is not yet
	 * checked.
	 */
282
	iommu_alloc_default_domain(group, dev);
283

284
	if (group->default_domain) {
285
		ret = __iommu_attach_device(group->default_domain, dev);
286 287 288 289 290
		if (ret) {
			iommu_group_put(group);
			goto err_release;
		}
	}
291

292 293
	iommu_create_device_direct_mappings(group, dev);

294 295 296 297 298 299 300 301 302
	iommu_group_put(group);

	if (ops->probe_finalize)
		ops->probe_finalize(dev);

	return 0;

err_release:
	iommu_release_device(dev);
303

304 305
err_out:
	return ret;
306

307 308
}

309
void iommu_release_device(struct device *dev)
310 311
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;
312

313 314
	if (!dev->iommu)
		return;
315 316 317 318

	iommu_device_unlink(dev->iommu->iommu_dev, dev);

	ops->release_device(dev);
319

320
	iommu_group_remove_device(dev);
321 322
	module_put(ops->owner);
	dev_iommu_free(dev);
323
}
324

325 326 327
static int __init iommu_set_def_domain_type(char *str)
{
	bool pt;
328
	int ret;
329

330 331 332
	ret = kstrtobool(str, &pt);
	if (ret)
		return ret;
333

334 335 336 337
	if (pt)
		iommu_set_default_passthrough(true);
	else
		iommu_set_default_translated(true);
338

339 340 341 342
	return 0;
}
early_param("iommu.passthrough", iommu_set_def_domain_type);

343 344
static int __init iommu_dma_setup(char *str)
{
345 346 347 348 349
	int ret = kstrtobool(str, &iommu_dma_strict);

	if (!ret)
		iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
	return ret;
350 351 352
}
early_param("iommu.strict", iommu_dma_setup);

353
void iommu_set_dma_strict(void)
354
{
355
	iommu_dma_strict = true;
356 357 358 359 360 361 362 363 364 365 366
}

bool iommu_get_dma_strict(struct iommu_domain *domain)
{
	/* only allow lazy flushing for DMA domains */
	if (domain->type == IOMMU_DOMAIN_DMA)
		return iommu_dma_strict;
	return true;
}
EXPORT_SYMBOL_GPL(iommu_get_dma_strict);

A
Alex Williamson 已提交
367 368
static ssize_t iommu_group_attr_show(struct kobject *kobj,
				     struct attribute *__attr, char *buf)
369
{
A
Alex Williamson 已提交
370 371 372
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
373

A
Alex Williamson 已提交
374 375 376 377 378 379 380 381 382 383 384 385
	if (attr->show)
		ret = attr->show(group, buf);
	return ret;
}

static ssize_t iommu_group_attr_store(struct kobject *kobj,
				      struct attribute *__attr,
				      const char *buf, size_t count)
{
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
386

A
Alex Williamson 已提交
387 388 389
	if (attr->store)
		ret = attr->store(group, buf, count);
	return ret;
390 391
}

A
Alex Williamson 已提交
392 393 394 395
static const struct sysfs_ops iommu_group_sysfs_ops = {
	.show = iommu_group_attr_show,
	.store = iommu_group_attr_store,
};
396

A
Alex Williamson 已提交
397 398 399 400
static int iommu_group_create_file(struct iommu_group *group,
				   struct iommu_group_attribute *attr)
{
	return sysfs_create_file(&group->kobj, &attr->attr);
401 402
}

A
Alex Williamson 已提交
403 404 405 406 407 408 409 410 411 412 413
static void iommu_group_remove_file(struct iommu_group *group,
				    struct iommu_group_attribute *attr)
{
	sysfs_remove_file(&group->kobj, &attr->attr);
}

static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{
	return sprintf(buf, "%s\n", group->name);
}

E
Eric Auger 已提交
414 415 416 417 418 419
/**
 * iommu_insert_resv_region - Insert a new region in the
 * list of reserved regions.
 * @new: new region to insert
 * @regions: list of regions
 *
420 421
 * Elements are sorted by start address and overlapping segments
 * of the same type are merged.
E
Eric Auger 已提交
422
 */
W
Wei Yongjun 已提交
423 424
static int iommu_insert_resv_region(struct iommu_resv_region *new,
				    struct list_head *regions)
E
Eric Auger 已提交
425
{
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	struct iommu_resv_region *iter, *tmp, *nr, *top;
	LIST_HEAD(stack);

	nr = iommu_alloc_resv_region(new->start, new->length,
				     new->prot, new->type);
	if (!nr)
		return -ENOMEM;

	/* First add the new element based on start address sorting */
	list_for_each_entry(iter, regions, list) {
		if (nr->start < iter->start ||
		    (nr->start == iter->start && nr->type <= iter->type))
			break;
	}
	list_add_tail(&nr->list, &iter->list);

	/* Merge overlapping segments of type nr->type in @regions, if any */
	list_for_each_entry_safe(iter, tmp, regions, list) {
		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;

446 447
		/* no merge needed on elements of different types than @new */
		if (iter->type != new->type) {
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
			list_move_tail(&iter->list, &stack);
			continue;
		}

		/* look for the last stack element of same type as @iter */
		list_for_each_entry_reverse(top, &stack, list)
			if (top->type == iter->type)
				goto check_overlap;

		list_move_tail(&iter->list, &stack);
		continue;

check_overlap:
		top_end = top->start + top->length - 1;

		if (iter->start > top_end + 1) {
			list_move_tail(&iter->list, &stack);
E
Eric Auger 已提交
465
		} else {
466 467 468
			top->length = max(top_end, iter_end) - top->start + 1;
			list_del(&iter->list);
			kfree(iter);
E
Eric Auger 已提交
469 470
		}
	}
471
	list_splice(&stack, regions);
E
Eric Auger 已提交
472 473 474 475 476 477 478 479
	return 0;
}

static int
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
				 struct list_head *group_resv_regions)
{
	struct iommu_resv_region *entry;
480
	int ret = 0;
E
Eric Auger 已提交
481 482 483 484 485 486 487 488 489 490 491 492

	list_for_each_entry(entry, dev_resv_regions, list) {
		ret = iommu_insert_resv_region(entry, group_resv_regions);
		if (ret)
			break;
	}
	return ret;
}

int iommu_get_group_resv_regions(struct iommu_group *group,
				 struct list_head *head)
{
493
	struct group_device *device;
E
Eric Auger 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	int ret = 0;

	mutex_lock(&group->mutex);
	list_for_each_entry(device, &group->devices, list) {
		struct list_head dev_resv_regions;

		INIT_LIST_HEAD(&dev_resv_regions);
		iommu_get_resv_regions(device->dev, &dev_resv_regions);
		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
		iommu_put_resv_regions(device->dev, &dev_resv_regions);
		if (ret)
			break;
	}
	mutex_unlock(&group->mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
					     char *buf)
{
	struct iommu_resv_region *region, *next;
	struct list_head group_resv_regions;
	char *str = buf;

	INIT_LIST_HEAD(&group_resv_regions);
	iommu_get_group_resv_regions(group, &group_resv_regions);

	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
			       (long long int)region->start,
			       (long long int)(region->start +
						region->length - 1),
			       iommu_group_resv_type_string[region->type]);
		kfree(region);
	}

	return (str - buf);
}

534 535 536 537 538
static ssize_t iommu_group_show_type(struct iommu_group *group,
				     char *buf)
{
	char *type = "unknown\n";

539
	mutex_lock(&group->mutex);
540 541 542 543 544 545 546 547 548 549 550 551
	if (group->default_domain) {
		switch (group->default_domain->type) {
		case IOMMU_DOMAIN_BLOCKED:
			type = "blocked\n";
			break;
		case IOMMU_DOMAIN_IDENTITY:
			type = "identity\n";
			break;
		case IOMMU_DOMAIN_UNMANAGED:
			type = "unmanaged\n";
			break;
		case IOMMU_DOMAIN_DMA:
552
			type = "DMA\n";
553 554 555
			break;
		}
	}
556
	mutex_unlock(&group->mutex);
557 558 559 560 561
	strcpy(buf, type);

	return strlen(type);
}

A
Alex Williamson 已提交
562 563
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);

564 565 566
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
			iommu_group_show_resv_regions, NULL);

567 568
static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
			iommu_group_store_type);
569

A
Alex Williamson 已提交
570 571 572 573
static void iommu_group_release(struct kobject *kobj)
{
	struct iommu_group *group = to_iommu_group(kobj);

574 575
	pr_debug("Releasing group %d\n", group->id);

A
Alex Williamson 已提交
576 577 578
	if (group->iommu_data_release)
		group->iommu_data_release(group->iommu_data);

579
	ida_simple_remove(&iommu_group_ida, group->id);
A
Alex Williamson 已提交
580

581 582 583
	if (group->default_domain)
		iommu_domain_free(group->default_domain);

A
Alex Williamson 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	kfree(group->name);
	kfree(group);
}

static struct kobj_type iommu_group_ktype = {
	.sysfs_ops = &iommu_group_sysfs_ops,
	.release = iommu_group_release,
};

/**
 * iommu_group_alloc - Allocate a new group
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
605
{
A
Alex Williamson 已提交
606 607 608 609 610 611 612 613 614 615
	struct iommu_group *group;
	int ret;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		return ERR_PTR(-ENOMEM);

	group->kobj.kset = iommu_group_kset;
	mutex_init(&group->mutex);
	INIT_LIST_HEAD(&group->devices);
616
	INIT_LIST_HEAD(&group->entry);
A
Alex Williamson 已提交
617 618
	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);

619 620
	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
	if (ret < 0) {
A
Alex Williamson 已提交
621
		kfree(group);
622
		return ERR_PTR(ret);
A
Alex Williamson 已提交
623
	}
624
	group->id = ret;
625

A
Alex Williamson 已提交
626 627 628
	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
				   NULL, "%d", group->id);
	if (ret) {
629
		ida_simple_remove(&iommu_group_ida, group->id);
630
		kobject_put(&group->kobj);
A
Alex Williamson 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
		return ERR_PTR(ret);
	}

	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
	if (!group->devices_kobj) {
		kobject_put(&group->kobj); /* triggers .release & free */
		return ERR_PTR(-ENOMEM);
	}

	/*
	 * The devices_kobj holds a reference on the group kobject, so
	 * as long as that exists so will the group.  We can therefore
	 * use the devices_kobj for reference counting.
	 */
	kobject_put(&group->kobj);

647 648 649 650 651
	ret = iommu_group_create_file(group,
				      &iommu_group_attr_reserved_regions);
	if (ret)
		return ERR_PTR(ret);

652 653 654 655
	ret = iommu_group_create_file(group, &iommu_group_attr_type);
	if (ret)
		return ERR_PTR(ret);

656 657
	pr_debug("Allocated group %d\n", group->id);

A
Alex Williamson 已提交
658 659 660 661
	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
struct iommu_group *iommu_group_get_by_id(int id)
{
	struct kobject *group_kobj;
	struct iommu_group *group;
	const char *name;

	if (!iommu_group_kset)
		return NULL;

	name = kasprintf(GFP_KERNEL, "%d", id);
	if (!name)
		return NULL;

	group_kobj = kset_find_obj(iommu_group_kset, name);
	kfree(name);

	if (!group_kobj)
		return NULL;

	group = container_of(group_kobj, struct iommu_group, kobj);
	BUG_ON(group->id != id);

	kobject_get(group->devices_kobj);
	kobject_put(&group->kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get_by_id);

A
Alex Williamson 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/**
 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 * @group: the group
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to retrieve it.  Caller
 * should hold a group reference.
 */
void *iommu_group_get_iommudata(struct iommu_group *group)
{
	return group->iommu_data;
}
EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);

/**
 * iommu_group_set_iommudata - set iommu_data for a group
 * @group: the group
 * @iommu_data: new data
 * @release: release function for iommu_data
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to set the data after
 * the group has been allocated.  Caller should hold a group reference.
 */
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
			       void (*release)(void *iommu_data))
717
{
A
Alex Williamson 已提交
718 719 720 721
	group->iommu_data = iommu_data;
	group->iommu_data_release = release;
}
EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
722

A
Alex Williamson 已提交
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
/**
 * iommu_group_set_name - set name for a group
 * @group: the group
 * @name: name
 *
 * Allow iommu driver to set a name for a group.  When set it will
 * appear in a name attribute file under the group in sysfs.
 */
int iommu_group_set_name(struct iommu_group *group, const char *name)
{
	int ret;

	if (group->name) {
		iommu_group_remove_file(group, &iommu_group_attr_name);
		kfree(group->name);
		group->name = NULL;
		if (!name)
			return 0;
	}

	group->name = kstrdup(name, GFP_KERNEL);
	if (!group->name)
		return -ENOMEM;

	ret = iommu_group_create_file(group, &iommu_group_attr_name);
	if (ret) {
		kfree(group->name);
		group->name = NULL;
		return ret;
	}
753 754 755

	return 0;
}
A
Alex Williamson 已提交
756
EXPORT_SYMBOL_GPL(iommu_group_set_name);
757

758 759
static int iommu_create_device_direct_mappings(struct iommu_group *group,
					       struct device *dev)
760 761
{
	struct iommu_domain *domain = group->default_domain;
762
	struct iommu_resv_region *entry;
763 764 765 766 767 768 769
	struct list_head mappings;
	unsigned long pg_size;
	int ret = 0;

	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
		return 0;

770
	BUG_ON(!domain->pgsize_bitmap);
771

772
	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
773 774
	INIT_LIST_HEAD(&mappings);

775
	iommu_get_resv_regions(dev, &mappings);
776 777 778 779

	/* We need to consider overlapping regions for different devices */
	list_for_each_entry(entry, &mappings, list) {
		dma_addr_t start, end, addr;
780
		size_t map_size = 0;
781

782 783
		if (domain->ops->apply_resv_region)
			domain->ops->apply_resv_region(dev, domain, entry);
784

785 786 787
		start = ALIGN(entry->start, pg_size);
		end   = ALIGN(entry->start + entry->length, pg_size);

788 789
		if (entry->type != IOMMU_RESV_DIRECT &&
		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
790 791
			continue;

792
		for (addr = start; addr <= end; addr += pg_size) {
793 794
			phys_addr_t phys_addr;

795 796 797
			if (addr == end)
				goto map_end;

798
			phys_addr = iommu_iova_to_phys(domain, addr);
799 800
			if (!phys_addr) {
				map_size += pg_size;
801
				continue;
802
			}
803

804 805 806 807 808 809 810 811 812
map_end:
			if (map_size) {
				ret = iommu_map(domain, addr - map_size,
						addr - map_size, map_size,
						entry->prot);
				if (ret)
					goto out;
				map_size = 0;
			}
813 814 815 816
		}

	}

817
	iommu_flush_iotlb_all(domain);
818

819
out:
820
	iommu_put_resv_regions(dev, &mappings);
821 822 823 824

	return ret;
}

825 826 827 828 829 830 831 832 833
static bool iommu_is_attach_deferred(struct iommu_domain *domain,
				     struct device *dev)
{
	if (domain->ops->is_attach_deferred)
		return domain->ops->is_attach_deferred(domain, dev);

	return false;
}

A
Alex Williamson 已提交
834 835 836 837 838 839 840 841 842
/**
 * iommu_group_add_device - add a device to an iommu group
 * @group: the group into which to add the device (reference should be held)
 * @dev: the device
 *
 * This function is called by an iommu driver to add a device into a
 * group.  Adding a device increments the group reference count.
 */
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
843
{
A
Alex Williamson 已提交
844
	int ret, i = 0;
J
Joerg Roedel 已提交
845
	struct group_device *device;
A
Alex Williamson 已提交
846 847 848 849 850 851

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	device->dev = dev;
852

A
Alex Williamson 已提交
853
	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
854 855
	if (ret)
		goto err_free_device;
A
Alex Williamson 已提交
856 857 858 859

	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
	if (!device->name) {
860 861
		ret = -ENOMEM;
		goto err_remove_link;
A
Alex Williamson 已提交
862
	}
863

A
Alex Williamson 已提交
864 865 866 867 868 869 870 871
	ret = sysfs_create_link_nowarn(group->devices_kobj,
				       &dev->kobj, device->name);
	if (ret) {
		if (ret == -EEXIST && i >= 0) {
			/*
			 * Account for the slim chance of collision
			 * and append an instance to the name.
			 */
872
			kfree(device->name);
A
Alex Williamson 已提交
873 874 875 876
			device->name = kasprintf(GFP_KERNEL, "%s.%d",
						 kobject_name(&dev->kobj), i++);
			goto rename;
		}
877
		goto err_free_name;
A
Alex Williamson 已提交
878 879 880 881 882 883 884 885
	}

	kobject_get(group->devices_kobj);

	dev->iommu_group = group;

	mutex_lock(&group->mutex);
	list_add_tail(&device->list, &group->devices);
886
	if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
887
		ret = __iommu_attach_device(group->domain, dev);
A
Alex Williamson 已提交
888
	mutex_unlock(&group->mutex);
889 890
	if (ret)
		goto err_put_group;
A
Alex Williamson 已提交
891 892 893 894

	/* Notify any listeners about change to group. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
895 896

	trace_add_device_to_group(group->id, dev);
897

898
	dev_info(dev, "Adding to iommu group %d\n", group->id);
899

900
	return 0;
901 902 903 904 905 906 907

err_put_group:
	mutex_lock(&group->mutex);
	list_del(&device->list);
	mutex_unlock(&group->mutex);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
908
	sysfs_remove_link(group->devices_kobj, device->name);
909 910 911 912 913 914
err_free_name:
	kfree(device->name);
err_remove_link:
	sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
	kfree(device);
915
	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
916
	return ret;
917
}
A
Alex Williamson 已提交
918
EXPORT_SYMBOL_GPL(iommu_group_add_device);
919

A
Alex Williamson 已提交
920 921 922 923 924 925 926 927 928 929
/**
 * iommu_group_remove_device - remove a device from it's current group
 * @dev: device to be removed
 *
 * This function is called by an iommu driver to remove the device from
 * it's current group.  This decrements the iommu group reference count.
 */
void iommu_group_remove_device(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;
J
Joerg Roedel 已提交
930
	struct group_device *tmp_device, *device = NULL;
A
Alex Williamson 已提交
931

932
	dev_info(dev, "Removing from iommu group %d\n", group->id);
933

A
Alex Williamson 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	/* Pre-notify listeners that a device is being removed. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);

	mutex_lock(&group->mutex);
	list_for_each_entry(tmp_device, &group->devices, list) {
		if (tmp_device->dev == dev) {
			device = tmp_device;
			list_del(&device->list);
			break;
		}
	}
	mutex_unlock(&group->mutex);

	if (!device)
		return;

	sysfs_remove_link(group->devices_kobj, device->name);
	sysfs_remove_link(&dev->kobj, "iommu_group");

954 955
	trace_remove_device_from_group(group->id, dev);

A
Alex Williamson 已提交
956 957 958 959 960 961 962
	kfree(device->name);
	kfree(device);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);

963 964
static int iommu_group_device_count(struct iommu_group *group)
{
J
Joerg Roedel 已提交
965
	struct group_device *entry;
966 967 968 969 970 971 972 973
	int ret = 0;

	list_for_each_entry(entry, &group->devices, list)
		ret++;

	return ret;
}

A
Alex Williamson 已提交
974 975 976 977 978 979 980 981 982 983 984
/**
 * iommu_group_for_each_dev - iterate over each device in the group
 * @group: the group
 * @data: caller opaque data to be passed to callback function
 * @fn: caller supplied callback function
 *
 * This function is called by group users to iterate over group devices.
 * Callers should hold a reference count to the group during callback.
 * The group->mutex is held across callbacks, which will block calls to
 * iommu_group_add/remove_device.
 */
985 986
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
				      int (*fn)(struct device *, void *))
A
Alex Williamson 已提交
987
{
J
Joerg Roedel 已提交
988
	struct group_device *device;
A
Alex Williamson 已提交
989 990 991 992 993 994 995
	int ret = 0;

	list_for_each_entry(device, &group->devices, list) {
		ret = fn(device->dev, data);
		if (ret)
			break;
	}
996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	return ret;
}


int iommu_group_for_each_dev(struct iommu_group *group, void *data,
			     int (*fn)(struct device *, void *))
{
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_group_for_each_dev(group, data, fn);
A
Alex Williamson 已提交
1007
	mutex_unlock(&group->mutex);
1008

A
Alex Williamson 已提交
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);

/**
 * iommu_group_get - Return the group for a device and increment reference
 * @dev: get the group that this device belongs to
 *
 * This function is called by iommu drivers and users to get the group
 * for the specified device.  If found, the group is returned and the group
 * reference in incremented, else NULL.
 */
struct iommu_group *iommu_group_get(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;

	if (group)
		kobject_get(group->devices_kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get);

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
/**
 * iommu_group_ref_get - Increment reference on a group
 * @group: the group to use, must not be NULL
 *
 * This function is called by iommu drivers to take additional references on an
 * existing group.  Returns the given group for convenience.
 */
struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{
	kobject_get(group->devices_kobj);
	return group;
}
1044
EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1045

A
Alex Williamson 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
/**
 * iommu_group_put - Decrement group reference
 * @group: the group to use
 *
 * This function is called by iommu drivers and users to release the
 * iommu group.  Once the reference count is zero, the group is released.
 */
void iommu_group_put(struct iommu_group *group)
{
	if (group)
		kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_put);

/**
 * iommu_group_register_notifier - Register a notifier for group changes
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * This function allows iommu group users to track changes in a group.
 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 * should hold a reference to the group throughout notifier registration.
 */
int iommu_group_register_notifier(struct iommu_group *group,
				  struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_register_notifier);

/**
 * iommu_group_unregister_notifier - Unregister a notifier
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * Unregister a previously registered group notifier block.
 */
int iommu_group_unregister_notifier(struct iommu_group *group,
				    struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);

1090 1091 1092 1093 1094 1095 1096
/**
 * iommu_register_device_fault_handler() - Register a device fault handler
 * @dev: the device
 * @handler: the fault handler
 * @data: private data passed as argument to the handler
 *
 * When an IOMMU fault event is received, this handler gets called with the
1097 1098 1099 1100 1101 1102 1103 1104
 * fault event and data as argument. The handler should return 0 on success. If
 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
 * complete the fault by calling iommu_page_response() with one of the following
 * response code:
 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
 *   page faults if possible.
1105 1106 1107 1108 1109 1110 1111
 *
 * Return 0 if the fault handler was installed successfully, or an error.
 */
int iommu_register_device_fault_handler(struct device *dev,
					iommu_dev_fault_handler_t handler,
					void *data)
{
1112
	struct dev_iommu *param = dev->iommu;
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	int ret = 0;

	if (!param)
		return -EINVAL;

	mutex_lock(&param->lock);
	/* Only allow one fault handler registered for each device */
	if (param->fault_param) {
		ret = -EBUSY;
		goto done_unlock;
	}

	get_device(dev);
	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
	if (!param->fault_param) {
		put_device(dev);
		ret = -ENOMEM;
		goto done_unlock;
	}
	param->fault_param->handler = handler;
	param->fault_param->data = data;
1134 1135
	mutex_init(&param->fault_param->lock);
	INIT_LIST_HEAD(&param->fault_param->faults);
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154

done_unlock:
	mutex_unlock(&param->lock);

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);

/**
 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
 * @dev: the device
 *
 * Remove the device fault handler installed with
 * iommu_register_device_fault_handler().
 *
 * Return 0 on success, or an error.
 */
int iommu_unregister_device_fault_handler(struct device *dev)
{
1155
	struct dev_iommu *param = dev->iommu;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	int ret = 0;

	if (!param)
		return -EINVAL;

	mutex_lock(&param->lock);

	if (!param->fault_param)
		goto unlock;

1166 1167 1168 1169 1170 1171
	/* we cannot unregister handler if there are pending faults */
	if (!list_empty(&param->fault_param->faults)) {
		ret = -EBUSY;
		goto unlock;
	}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	kfree(param->fault_param);
	param->fault_param = NULL;
	put_device(dev);
unlock:
	mutex_unlock(&param->lock);

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);

/**
 * iommu_report_device_fault() - Report fault event to device driver
 * @dev: the device
 * @evt: fault event data
 *
 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1188 1189
 * handler. When this function fails and the fault is recoverable, it is the
 * caller's responsibility to complete the fault.
1190 1191 1192 1193 1194
 *
 * Return 0 on success, or an error.
 */
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
1195
	struct dev_iommu *param = dev->iommu;
1196
	struct iommu_fault_event *evt_pending = NULL;
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	struct iommu_fault_param *fparam;
	int ret = 0;

	if (!param || !evt)
		return -EINVAL;

	/* we only report device fault if there is a handler registered */
	mutex_lock(&param->lock);
	fparam = param->fault_param;
	if (!fparam || !fparam->handler) {
		ret = -EINVAL;
		goto done_unlock;
	}
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223

	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
				      GFP_KERNEL);
		if (!evt_pending) {
			ret = -ENOMEM;
			goto done_unlock;
		}
		mutex_lock(&fparam->lock);
		list_add_tail(&evt_pending->list, &fparam->faults);
		mutex_unlock(&fparam->lock);
	}

1224
	ret = fparam->handler(&evt->fault, fparam->data);
1225 1226 1227 1228 1229 1230
	if (ret && evt_pending) {
		mutex_lock(&fparam->lock);
		list_del(&evt_pending->list);
		mutex_unlock(&fparam->lock);
		kfree(evt_pending);
	}
1231 1232 1233 1234 1235 1236
done_unlock:
	mutex_unlock(&param->lock);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_report_device_fault);

1237 1238 1239
int iommu_page_response(struct device *dev,
			struct iommu_page_response *msg)
{
1240
	bool needs_pasid;
1241 1242 1243
	int ret = -EINVAL;
	struct iommu_fault_event *evt;
	struct iommu_fault_page_request *prm;
1244
	struct dev_iommu *param = dev->iommu;
1245
	bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

	if (!domain || !domain->ops->page_response)
		return -ENODEV;

	if (!param || !param->fault_param)
		return -EINVAL;

	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
		return -EINVAL;

	/* Only send response if there is a fault report pending */
	mutex_lock(&param->fault_param->lock);
	if (list_empty(&param->fault_param->faults)) {
		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
		goto done_unlock;
	}
	/*
	 * Check if we have a matching page request pending to respond,
	 * otherwise return -EINVAL
	 */
	list_for_each_entry(evt, &param->fault_param->faults, list) {
		prm = &evt->fault.prm;
1270 1271
		if (prm->grpid != msg->grpid)
			continue;
1272

1273 1274 1275 1276 1277 1278 1279 1280
		/*
		 * If the PASID is required, the corresponding request is
		 * matched using the group ID, the PASID valid bit and the PASID
		 * value. Otherwise only the group ID matches request and
		 * response.
		 */
		needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
		if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1281 1282
			continue;

1283 1284 1285 1286 1287
		if (!needs_pasid && has_pasid) {
			/* No big deal, just clear it. */
			msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
			msg->pasid = 0;
		}
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300

		ret = domain->ops->page_response(dev, evt, msg);
		list_del(&evt->list);
		kfree(evt);
		break;
	}

done_unlock:
	mutex_unlock(&param->fault_param->lock);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_page_response);

A
Alex Williamson 已提交
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
/**
 * iommu_group_id - Return ID for a group
 * @group: the group to ID
 *
 * Return the unique ID for the group matching the sysfs group number.
 */
int iommu_group_id(struct iommu_group *group)
{
	return group->id;
}
EXPORT_SYMBOL_GPL(iommu_group_id);
1312

1313 1314 1315
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns);

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
/*
 * To consider a PCI device isolated, we require ACS to support Source
 * Validation, Request Redirection, Completer Redirection, and Upstream
 * Forwarding.  This effectively means that devices cannot spoof their
 * requester ID, requests and completions cannot be redirected, and all
 * transactions are forwarded upstream, even as it passes through a
 * bridge where the target device is downstream.
 */
#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
/*
 * For multifunction devices which are not isolated from each other, find
 * all the other non-isolated functions and look for existing groups.  For
 * each function, we also need to look for aliases to or from other devices
 * that may already have a group.
 */
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
							unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
		return NULL;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus ||
		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
			continue;

		group = get_pci_alias_group(tmp, devfns);
		if (group) {
			pci_dev_put(tmp);
			return group;
		}
	}

	return NULL;
}

/*
1358 1359
 * Look for aliases to or from the given device for existing groups. DMA
 * aliases are only supported on the same bus, therefore the search
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
 * space is quite small (especially since we're really only looking at pcie
 * device, and therefore only expect multiple slots on the root complex or
 * downstream switch ports).  It's conceivable though that a pair of
 * multifunction devices could have aliases between them that would cause a
 * loop.  To prevent this, we use a bitmap to track where we've been.
 */
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
		return NULL;

	group = iommu_group_get(&pdev->dev);
	if (group)
		return group;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus)
			continue;

		/* We alias them or they alias us */
1384
		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
			group = get_pci_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}

			group = get_pci_function_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}
		}
	}

	return NULL;
}

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
struct group_for_pci_data {
	struct pci_dev *pdev;
	struct iommu_group *group;
};

/*
 * DMA alias iterator callback, return the last seen device.  Stop and return
 * the IOMMU group if we find one along the way.
 */
static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct group_for_pci_data *data = opaque;

	data->pdev = pdev;
	data->group = iommu_group_get(&pdev->dev);

	return data->group != NULL;
}

1421 1422 1423 1424 1425 1426
/*
 * Generic device_group call-back function. It just allocates one
 * iommu-group per device.
 */
struct iommu_group *generic_device_group(struct device *dev)
{
1427
	return iommu_group_alloc();
1428
}
1429
EXPORT_SYMBOL_GPL(generic_device_group);
1430

1431 1432 1433 1434
/*
 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 * to find or create an IOMMU group for a device.
 */
1435
struct iommu_group *pci_device_group(struct device *dev)
1436
{
1437
	struct pci_dev *pdev = to_pci_dev(dev);
1438 1439 1440
	struct group_for_pci_data data;
	struct pci_bus *bus;
	struct iommu_group *group = NULL;
1441
	u64 devfns[4] = { 0 };
1442

1443 1444 1445
	if (WARN_ON(!dev_is_pci(dev)))
		return ERR_PTR(-EINVAL);

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	/*
	 * Find the upstream DMA alias for the device.  A device must not
	 * be aliased due to topology in order to have its own IOMMU group.
	 * If we find an alias along the way that already belongs to a
	 * group, use it.
	 */
	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
		return data.group;

	pdev = data.pdev;

	/*
	 * Continue upstream from the point of minimum IOMMU granularity
	 * due to aliases to the point where devices are protected from
	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
	 * group, use it.
	 */
	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
		if (!bus->self)
			continue;

		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
			break;

		pdev = bus->self;

		group = iommu_group_get(&pdev->dev);
		if (group)
			return group;
	}

	/*
1478 1479
	 * Look for existing groups on device aliases.  If we alias another
	 * device or another device aliases us, use the same group.
1480
	 */
1481 1482 1483
	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
1484 1485

	/*
1486 1487 1488
	 * Look for existing groups on non-isolated functions on the same
	 * slot and aliases of those funcions, if any.  No need to clear
	 * the search bitmap, the tested devfns are still valid.
1489
	 */
1490 1491 1492
	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
1493 1494

	/* No shared group found, allocate new */
1495
	return iommu_group_alloc();
1496
}
1497
EXPORT_SYMBOL_GPL(pci_device_group);
1498

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
/* Get the IOMMU group for device on fsl-mc bus */
struct iommu_group *fsl_mc_device_group(struct device *dev)
{
	struct device *cont_dev = fsl_mc_cont_dev(dev);
	struct iommu_group *group;

	group = iommu_group_get(cont_dev);
	if (!group)
		group = iommu_group_alloc();
	return group;
}
1510
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1511

1512 1513 1514
static int iommu_get_def_domain_type(struct device *dev)
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;
1515 1516 1517

	if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
		return IOMMU_DOMAIN_DMA;
1518 1519

	if (ops->def_domain_type)
1520
		return ops->def_domain_type(dev);
1521

1522
	return 0;
1523 1524
}

1525 1526 1527
static int iommu_group_alloc_default_domain(struct bus_type *bus,
					    struct iommu_group *group,
					    unsigned int type)
1528 1529 1530
{
	struct iommu_domain *dom;

1531
	dom = __iommu_domain_alloc(bus, type);
1532
	if (!dom && type != IOMMU_DOMAIN_DMA) {
1533 1534 1535 1536
		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
		if (dom)
			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
				type, group->name);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	}

	if (!dom)
		return -ENOMEM;

	group->default_domain = dom;
	if (!group->domain)
		group->domain = dom;
	return 0;
}

1548 1549
static int iommu_alloc_default_domain(struct iommu_group *group,
				      struct device *dev)
1550 1551 1552 1553 1554 1555
{
	unsigned int type;

	if (group->default_domain)
		return 0;

1556
	type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1557 1558 1559 1560

	return iommu_group_alloc_default_domain(dev->bus, group, type);
}

1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
/**
 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 * @dev: target device
 *
 * This function is intended to be called by IOMMU drivers and extended to
 * support common, bus-defined algorithms when determining or creating the
 * IOMMU group for a device.  On success, the caller will hold a reference
 * to the returned IOMMU group, which will already include the provided
 * device.  The reference should be released with iommu_group_put().
 */
1571
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1572
{
1573
	const struct iommu_ops *ops = dev->bus->iommu_ops;
1574
	struct iommu_group *group;
1575 1576 1577 1578 1579 1580
	int ret;

	group = iommu_group_get(dev);
	if (group)
		return group;

1581 1582
	if (!ops)
		return ERR_PTR(-EINVAL);
1583

1584
	group = ops->device_group(dev);
1585 1586 1587
	if (WARN_ON_ONCE(group == NULL))
		return ERR_PTR(-EINVAL);

1588 1589 1590 1591
	if (IS_ERR(group))
		return group;

	ret = iommu_group_add_device(group, dev);
1592 1593
	if (ret)
		goto out_put_group;
1594 1595

	return group;
1596 1597 1598 1599 1600

out_put_group:
	iommu_group_put(group);

	return ERR_PTR(ret);
1601 1602
}

1603 1604 1605 1606 1607
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
	return group->default_domain;
}

1608
static int probe_iommu_group(struct device *dev, void *data)
1609
{
1610
	struct list_head *group_list = data;
1611
	struct iommu_group *group;
1612
	int ret;
1613

1614 1615 1616 1617 1618 1619 1620
	/* Device is probed already if in a group */
	group = iommu_group_get(dev);
	if (group) {
		iommu_group_put(group);
		return 0;
	}

1621
	ret = __iommu_probe_device(dev, group_list);
1622 1623 1624 1625
	if (ret == -ENODEV)
		ret = 0;

	return ret;
1626 1627
}

1628 1629
static int remove_iommu_group(struct device *dev, void *data)
{
1630
	iommu_release_device(dev);
1631 1632 1633 1634

	return 0;
}

A
Alex Williamson 已提交
1635 1636
static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data)
1637
{
1638
	unsigned long group_action = 0;
1639
	struct device *dev = data;
A
Alex Williamson 已提交
1640 1641 1642 1643 1644 1645 1646
	struct iommu_group *group;

	/*
	 * ADD/DEL call into iommu driver ops if provided, which may
	 * result in ADD/DEL notifiers to group->notifier
	 */
	if (action == BUS_NOTIFY_ADD_DEVICE) {
1647
		int ret;
1648

1649 1650
		ret = iommu_probe_device(dev);
		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1651
	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1652 1653
		iommu_release_device(dev);
		return NOTIFY_OK;
A
Alex Williamson 已提交
1654
	}
1655

A
Alex Williamson 已提交
1656 1657 1658 1659 1660 1661 1662
	/*
	 * Remaining BUS_NOTIFYs get filtered and republished to the
	 * group, if anyone is listening
	 */
	group = iommu_group_get(dev);
	if (!group)
		return 0;
1663

A
Alex Williamson 已提交
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
	switch (action) {
	case BUS_NOTIFY_BIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
		break;
	case BUS_NOTIFY_BOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
		break;
	case BUS_NOTIFY_UNBIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
		break;
	case BUS_NOTIFY_UNBOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
		break;
	}
1678

A
Alex Williamson 已提交
1679 1680 1681
	if (group_action)
		blocking_notifier_call_chain(&group->notifier,
					     group_action, dev);
1682

A
Alex Williamson 已提交
1683
	iommu_group_put(group);
1684 1685 1686
	return 0;
}

1687 1688 1689 1690 1691 1692 1693 1694
struct __group_domain_type {
	struct device *dev;
	unsigned int type;
};

static int probe_get_default_domain_type(struct device *dev, void *data)
{
	struct __group_domain_type *gtype = data;
1695
	unsigned int type = iommu_get_def_domain_type(dev);
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729

	if (type) {
		if (gtype->type && gtype->type != type) {
			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
				 iommu_domain_type_str(type),
				 dev_name(gtype->dev),
				 iommu_domain_type_str(gtype->type));
			gtype->type = 0;
		}

		if (!gtype->dev) {
			gtype->dev  = dev;
			gtype->type = type;
		}
	}

	return 0;
}

static void probe_alloc_default_domain(struct bus_type *bus,
				       struct iommu_group *group)
{
	struct __group_domain_type gtype;

	memset(&gtype, 0, sizeof(gtype));

	/* Ask for default domain requirements of all devices in the group */
	__iommu_group_for_each_dev(group, &gtype,
				   probe_get_default_domain_type);

	if (!gtype.type)
		gtype.type = iommu_def_domain_type;

	iommu_group_alloc_default_domain(bus, group, gtype.type);
1730

1731 1732 1733 1734 1735
}

static int iommu_group_do_dma_attach(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;
1736 1737 1738 1739
	int ret = 0;

	if (!iommu_is_attach_deferred(domain, dev))
		ret = __iommu_attach_device(domain, dev);
1740

1741
	return ret;
1742 1743 1744 1745 1746 1747 1748 1749
}

static int __iommu_group_dma_attach(struct iommu_group *group)
{
	return __iommu_group_for_each_dev(group, group->default_domain,
					  iommu_group_do_dma_attach);
}

1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

	if (domain->ops->probe_finalize)
		domain->ops->probe_finalize(dev);

	return 0;
}

static void __iommu_group_dma_finalize(struct iommu_group *group)
{
	__iommu_group_for_each_dev(group, group->default_domain,
				   iommu_group_do_probe_finalize);
}
1765

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static int iommu_do_create_direct_mappings(struct device *dev, void *data)
{
	struct iommu_group *group = data;

	iommu_create_device_direct_mappings(group, dev);

	return 0;
}

static int iommu_group_create_direct_mappings(struct iommu_group *group)
{
	return __iommu_group_for_each_dev(group, group,
					  iommu_do_create_direct_mappings);
}

1781
int bus_iommu_probe(struct bus_type *bus)
1782
{
1783 1784
	struct iommu_group *group, *next;
	LIST_HEAD(group_list);
1785 1786
	int ret;

1787 1788 1789 1790 1791 1792 1793 1794
	/*
	 * This code-path does not allocate the default domain when
	 * creating the iommu group, so do it after the groups are
	 * created.
	 */
	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
	if (ret)
		return ret;
1795

1796 1797 1798
	list_for_each_entry_safe(group, next, &group_list, entry) {
		/* Remove item from the list */
		list_del_init(&group->entry);
1799

1800
		mutex_lock(&group->mutex);
1801

1802 1803
		/* Try to allocate default domain */
		probe_alloc_default_domain(bus, group);
1804

1805 1806 1807 1808
		if (!group->default_domain) {
			mutex_unlock(&group->mutex);
			continue;
		}
1809

1810
		iommu_group_create_direct_mappings(group);
1811

1812
		ret = __iommu_group_dma_attach(group);
1813

1814
		mutex_unlock(&group->mutex);
1815

1816 1817
		if (ret)
			break;
1818 1819

		__iommu_group_dma_finalize(group);
1820 1821 1822 1823 1824
	}

	return ret;
}

M
Mark Salter 已提交
1825
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1826
{
M
Mark Salter 已提交
1827
	struct notifier_block *nb;
1828
	int err;
1829

M
Mark Salter 已提交
1830 1831 1832 1833 1834 1835 1836
	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
	if (!nb)
		return -ENOMEM;

	nb->notifier_call = iommu_bus_notifier;

	err = bus_register_notifier(bus, nb);
1837 1838
	if (err)
		goto out_free;
1839

1840
	err = bus_iommu_probe(bus);
1841 1842 1843
	if (err)
		goto out_err;

1844 1845

	return 0;
1846 1847 1848

out_err:
	/* Clean up */
L
Lu Baolu 已提交
1849
	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1850 1851 1852 1853 1854 1855
	bus_unregister_notifier(bus, nb);

out_free:
	kfree(nb);

	return err;
1856
}
1857

1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
/**
 * bus_set_iommu - set iommu-callbacks for the bus
 * @bus: bus.
 * @ops: the callbacks provided by the iommu-driver
 *
 * This function is called by an iommu driver to set the iommu methods
 * used for a particular bus. Drivers for devices on that bus can use
 * the iommu-api after these ops are registered.
 * This special function is needed because IOMMUs are usually devices on
 * the bus itself, so the iommu drivers are not initialized when the bus
 * is set up. With this function the iommu-driver can set the iommu-ops
 * afterwards.
 */
1871
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1872
{
1873 1874
	int err;

1875 1876 1877 1878 1879
	if (ops == NULL) {
		bus->iommu_ops = NULL;
		return 0;
	}

1880 1881
	if (bus->iommu_ops != NULL)
		return -EBUSY;
1882

1883 1884 1885
	bus->iommu_ops = ops;

	/* Do IOMMU specific setup for this bus-type */
1886 1887 1888 1889 1890
	err = iommu_bus_init(bus, ops);
	if (err)
		bus->iommu_ops = NULL;

	return err;
1891
}
1892
EXPORT_SYMBOL_GPL(bus_set_iommu);
1893

1894
bool iommu_present(struct bus_type *bus)
1895
{
1896
	return bus->iommu_ops != NULL;
1897
}
1898
EXPORT_SYMBOL_GPL(iommu_present);
1899

1900 1901 1902 1903 1904 1905 1906 1907 1908
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
	if (!bus->iommu_ops || !bus->iommu_ops->capable)
		return false;

	return bus->iommu_ops->capable(cap);
}
EXPORT_SYMBOL_GPL(iommu_capable);

1909 1910 1911 1912
/**
 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 * @domain: iommu domain
 * @handler: fault handler
1913
 * @token: user data, will be passed back to the fault handler
1914 1915 1916 1917 1918 1919
 *
 * This function should be used by IOMMU users which want to be notified
 * whenever an IOMMU fault happens.
 *
 * The fault handler itself should return 0 on success, and an appropriate
 * error code otherwise.
1920 1921
 */
void iommu_set_fault_handler(struct iommu_domain *domain,
1922 1923
					iommu_fault_handler_t handler,
					void *token)
1924 1925 1926 1927
{
	BUG_ON(!domain);

	domain->handler = handler;
1928
	domain->handler_token = token;
1929
}
1930
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1931

1932 1933
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type)
1934 1935 1936
{
	struct iommu_domain *domain;

1937
	if (bus == NULL || bus->iommu_ops == NULL)
1938 1939
		return NULL;

1940
	domain = bus->iommu_ops->domain_alloc(type);
1941 1942 1943
	if (!domain)
		return NULL;

1944
	domain->ops  = bus->iommu_ops;
1945
	domain->type = type;
1946 1947
	/* Assume all sizes by default; the driver may override this later */
	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1948

1949 1950 1951
	return domain;
}

1952 1953 1954
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1955 1956 1957 1958 1959
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);

void iommu_domain_free(struct iommu_domain *domain)
{
1960
	domain->ops->domain_free(domain);
1961 1962 1963
}
EXPORT_SYMBOL_GPL(iommu_domain_free);

1964 1965
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev)
1966
{
1967
	int ret;
1968

1969 1970 1971
	if (unlikely(domain->ops->attach_dev == NULL))
		return -ENODEV;

1972 1973 1974 1975
	ret = domain->ops->attach_dev(domain, dev);
	if (!ret)
		trace_attach_device_to_domain(dev);
	return ret;
1976
}
1977 1978 1979 1980 1981 1982 1983

int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;
	int ret;

	group = iommu_group_get(dev);
1984 1985 1986
	if (!group)
		return -ENODEV;

1987
	/*
1988
	 * Lock the group to make sure the device-count doesn't
1989 1990 1991 1992 1993 1994 1995
	 * change while we are attaching
	 */
	mutex_lock(&group->mutex);
	ret = -EINVAL;
	if (iommu_group_device_count(group) != 1)
		goto out_unlock;

1996
	ret = __iommu_attach_group(domain, group);
1997 1998 1999 2000 2001 2002 2003

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);

	return ret;
}
2004 2005
EXPORT_SYMBOL_GPL(iommu_attach_device);

2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
	const struct iommu_ops *ops = domain->ops;

	if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
		return __iommu_attach_device(domain, dev);

	return 0;
}

2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
/*
 * Check flags and other user provided data for valid combinations. We also
 * make sure no reserved fields or unused flags are set. This is to ensure
 * not breaking userspace in the future when these fields or flags are used.
 */
static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
{
	u32 mask;
	int i;

	if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
		return -EINVAL;

	mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
	if (info->cache & ~mask)
		return -EINVAL;

	if (info->granularity >= IOMMU_INV_GRANU_NR)
		return -EINVAL;

	switch (info->granularity) {
	case IOMMU_INV_GRANU_ADDR:
		if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
			return -EINVAL;

		mask = IOMMU_INV_ADDR_FLAGS_PASID |
			IOMMU_INV_ADDR_FLAGS_ARCHID |
			IOMMU_INV_ADDR_FLAGS_LEAF;

		if (info->granu.addr_info.flags & ~mask)
			return -EINVAL;
		break;
	case IOMMU_INV_GRANU_PASID:
		mask = IOMMU_INV_PASID_FLAGS_PASID |
			IOMMU_INV_PASID_FLAGS_ARCHID;
		if (info->granu.pasid_info.flags & ~mask)
			return -EINVAL;

		break;
	case IOMMU_INV_GRANU_DOMAIN:
		if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

	/* Check reserved padding fields */
	for (i = 0; i < sizeof(info->padding); i++) {
		if (info->padding[i])
			return -EINVAL;
	}

	return 0;
}

J
Jacob Pan 已提交
2072
int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2073
				void __user *uinfo)
Y
Yi L Liu 已提交
2074
{
2075 2076 2077 2078
	struct iommu_cache_invalidate_info inv_info = { 0 };
	u32 minsz;
	int ret;

Y
Yi L Liu 已提交
2079 2080 2081
	if (unlikely(!domain->ops->cache_invalidate))
		return -ENODEV;

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	/*
	 * No new spaces can be added before the variable sized union, the
	 * minimum size is the offset to the union.
	 */
	minsz = offsetof(struct iommu_cache_invalidate_info, granu);

	/* Copy minsz from user to get flags and argsz */
	if (copy_from_user(&inv_info, uinfo, minsz))
		return -EFAULT;

	/* Fields before the variable size union are mandatory */
	if (inv_info.argsz < minsz)
		return -EINVAL;

	/* PASID and address granu require additional info beyond minsz */
	if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
		return -EINVAL;

	if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
		return -EINVAL;

	/*
	 * User might be using a newer UAPI header which has a larger data
	 * size, we shall support the existing flags within the current
	 * size. Copy the remaining user data _after_ minsz but not more
	 * than the current kernel supported size.
	 */
	if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
			   min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
		return -EFAULT;

	/* Now the argsz is validated, check the content */
	ret = iommu_check_cache_invl_data(&inv_info);
	if (ret)
		return ret;

	return domain->ops->cache_invalidate(domain, dev, &inv_info);
Y
Yi L Liu 已提交
2121
}
J
Jacob Pan 已提交
2122
EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
Y
Yi L Liu 已提交
2123

2124
static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2125
{
2126
	u64 mask;
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
	int i;

	if (data->version != IOMMU_GPASID_BIND_VERSION_1)
		return -EINVAL;

	/* Check the range of supported formats */
	if (data->format >= IOMMU_PASID_FORMAT_LAST)
		return -EINVAL;

	/* Check all flags */
	mask = IOMMU_SVA_GPASID_VAL;
	if (data->flags & ~mask)
		return -EINVAL;

	/* Check reserved padding fields */
	for (i = 0; i < sizeof(data->padding); i++) {
		if (data->padding[i])
			return -EINVAL;
	}

	return 0;
}

static int iommu_sva_prepare_bind_data(void __user *udata,
				       struct iommu_gpasid_bind_data *data)
{
	u32 minsz;

	/*
	 * No new spaces can be added before the variable sized union, the
	 * minimum size is the offset to the union.
	 */
	minsz = offsetof(struct iommu_gpasid_bind_data, vendor);

	/* Copy minsz from user to get flags and argsz */
	if (copy_from_user(data, udata, minsz))
		return -EFAULT;

	/* Fields before the variable size union are mandatory */
	if (data->argsz < minsz)
		return -EINVAL;
	/*
	 * User might be using a newer UAPI header, we shall let IOMMU vendor
	 * driver decide on what size it needs. Since the guest PASID bind data
	 * can be vendor specific, larger argsz could be the result of extension
	 * for one vendor but it should not affect another vendor.
	 * Copy the remaining user data _after_ minsz
	 */
	if (copy_from_user((void *)data + minsz, udata + minsz,
			   min_t(u32, data->argsz, sizeof(*data)) - minsz))
		return -EFAULT;

	return iommu_check_bind_data(data);
Y
Yi L Liu 已提交
2180 2181
}

2182 2183
int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
			       void __user *udata)
2184
{
2185 2186 2187
	struct iommu_gpasid_bind_data data = { 0 };
	int ret;

2188 2189 2190
	if (unlikely(!domain->ops->sva_bind_gpasid))
		return -ENODEV;

2191 2192 2193 2194 2195
	ret = iommu_sva_prepare_bind_data(udata, &data);
	if (ret)
		return ret;

	return domain->ops->sva_bind_gpasid(domain, dev, &data);
2196
}
J
Jacob Pan 已提交
2197
EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208

int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
			     ioasid_t pasid)
{
	if (unlikely(!domain->ops->sva_unbind_gpasid))
		return -ENODEV;

	return domain->ops->sva_unbind_gpasid(dev, pasid);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);

2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
				 void __user *udata)
{
	struct iommu_gpasid_bind_data data = { 0 };
	int ret;

	if (unlikely(!domain->ops->sva_bind_gpasid))
		return -ENODEV;

	ret = iommu_sva_prepare_bind_data(udata, &data);
	if (ret)
		return ret;

	return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
}
J
Jacob Pan 已提交
2224
EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2225

2226 2227
static void __iommu_detach_device(struct iommu_domain *domain,
				  struct device *dev)
2228
{
2229
	if (iommu_is_attach_deferred(domain, dev))
2230 2231
		return;

2232 2233 2234 2235
	if (unlikely(domain->ops->detach_dev == NULL))
		return;

	domain->ops->detach_dev(domain, dev);
2236
	trace_detach_device_from_domain(dev);
2237
}
2238 2239 2240 2241 2242 2243

void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;

	group = iommu_group_get(dev);
2244 2245
	if (!group)
		return;
2246 2247 2248 2249 2250 2251 2252

	mutex_lock(&group->mutex);
	if (iommu_group_device_count(group) != 1) {
		WARN_ON(1);
		goto out_unlock;
	}

2253
	__iommu_detach_group(domain, group);
2254 2255 2256 2257 2258

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);
}
2259 2260
EXPORT_SYMBOL_GPL(iommu_detach_device);

2261 2262 2263 2264 2265 2266
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{
	struct iommu_domain *domain;
	struct iommu_group *group;

	group = iommu_group_get(dev);
2267
	if (!group)
2268 2269 2270 2271 2272 2273 2274 2275 2276
		return NULL;

	domain = group->domain;

	iommu_group_put(group);

	return domain;
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2277

A
Alex Williamson 已提交
2278
/*
2279 2280 2281 2282 2283 2284 2285 2286
 * For IOMMU_DOMAIN_DMA implementations which already provide their own
 * guarantees that the group and its default domain are valid and correct.
 */
struct iommu_domain *iommu_get_dma_domain(struct device *dev)
{
	return dev->iommu_group->default_domain;
}

A
Alex Williamson 已提交
2287
/*
R
Rami Rosen 已提交
2288
 * IOMMU groups are really the natural working unit of the IOMMU, but
A
Alex Williamson 已提交
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
 * the IOMMU API works on domains and devices.  Bridge that gap by
 * iterating over the devices in a group.  Ideally we'd have a single
 * device which represents the requestor ID of the group, but we also
 * allow IOMMU drivers to create policy defined minimum sets, where
 * the physical hardware may be able to distiguish members, but we
 * wish to group them at a higher level (ex. untrusted multi-function
 * PCI devices).  Thus we attach each device.
 */
static int iommu_group_do_attach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

2301
	return __iommu_attach_device(domain, dev);
A
Alex Williamson 已提交
2302 2303
}

2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group)
{
	int ret;

	if (group->default_domain && group->domain != group->default_domain)
		return -EBUSY;

	ret = __iommu_group_for_each_dev(group, domain,
					 iommu_group_do_attach_device);
	if (ret == 0)
		group->domain = domain;

	return ret;
A
Alex Williamson 已提交
2318 2319 2320 2321
}

int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
2322 2323 2324 2325 2326 2327 2328
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_attach_group(domain, group);
	mutex_unlock(&group->mutex);

	return ret;
A
Alex Williamson 已提交
2329 2330 2331 2332 2333 2334 2335
}
EXPORT_SYMBOL_GPL(iommu_attach_group);

static int iommu_group_do_detach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

2336
	__iommu_detach_device(domain, dev);
A
Alex Williamson 已提交
2337 2338 2339 2340

	return 0;
}

2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group)
{
	int ret;

	if (!group->default_domain) {
		__iommu_group_for_each_dev(group, domain,
					   iommu_group_do_detach_device);
		group->domain = NULL;
		return;
	}

	if (group->domain == group->default_domain)
		return;

	/* Detach by re-attaching to the default domain */
	ret = __iommu_group_for_each_dev(group, group->default_domain,
					 iommu_group_do_attach_device);
	if (ret != 0)
		WARN_ON(1);
	else
		group->domain = group->default_domain;
}

A
Alex Williamson 已提交
2365 2366
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
2367 2368 2369
	mutex_lock(&group->mutex);
	__iommu_detach_group(domain, group);
	mutex_unlock(&group->mutex);
A
Alex Williamson 已提交
2370 2371 2372
}
EXPORT_SYMBOL_GPL(iommu_detach_group);

2373
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2374
{
2375 2376 2377 2378
	if (domain->type == IOMMU_DOMAIN_IDENTITY)
		return iova;

	if (domain->type == IOMMU_DOMAIN_BLOCKED)
2379 2380 2381
		return 0;

	return domain->ops->iova_to_phys(domain, iova);
2382 2383
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
S
Sheng Yang 已提交
2384

2385
static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2386
			   phys_addr_t paddr, size_t size, size_t *count)
A
Alex Williamson 已提交
2387
{
2388
	unsigned int pgsize_idx, pgsize_idx_next;
2389
	unsigned long pgsizes;
2390
	size_t offset, pgsize, pgsize_next;
2391
	unsigned long addr_merge = paddr | iova;
A
Alex Williamson 已提交
2392

2393 2394
	/* Page sizes supported by the hardware and small enough for @size */
	pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
A
Alex Williamson 已提交
2395

2396 2397 2398
	/* Constrain the page sizes further based on the maximum alignment */
	if (likely(addr_merge))
		pgsizes &= GENMASK(__ffs(addr_merge), 0);
A
Alex Williamson 已提交
2399

2400 2401
	/* Make sure we have at least one suitable page size */
	BUG_ON(!pgsizes);
A
Alex Williamson 已提交
2402

2403 2404 2405
	/* Pick the biggest page size remaining */
	pgsize_idx = __fls(pgsizes);
	pgsize = BIT(pgsize_idx);
2406 2407
	if (!count)
		return pgsize;
A
Alex Williamson 已提交
2408

2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
	/* Find the next biggest support page size, if it exists */
	pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
	if (!pgsizes)
		goto out_set_count;

	pgsize_idx_next = __ffs(pgsizes);
	pgsize_next = BIT(pgsize_idx_next);

	/*
	 * There's no point trying a bigger page size unless the virtual
	 * and physical addresses are similarly offset within the larger page.
	 */
	if ((iova ^ paddr) & (pgsize_next - 1))
		goto out_set_count;

	/* Calculate the offset to the next page size alignment boundary */
	offset = pgsize_next - (addr_merge & (pgsize_next - 1));

	/*
	 * If size is big enough to accommodate the larger page, reduce
	 * the number of smaller pages.
	 */
	if (offset + pgsize_next <= size)
		size = offset;

out_set_count:
	*count = size >> pgsize_idx;
A
Alex Williamson 已提交
2436 2437 2438
	return pgsize;
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
			     phys_addr_t paddr, size_t size, int prot,
			     gfp_t gfp, size_t *mapped)
{
	const struct iommu_ops *ops = domain->ops;
	size_t pgsize, count;
	int ret;

	pgsize = iommu_pgsize(domain, iova, paddr, size, &count);

	pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
		 iova, &paddr, pgsize, count);

	if (ops->map_pages) {
		ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
				     gfp, mapped);
	} else {
		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
		*mapped = ret ? 0 : pgsize;
	}

	return ret;
}

W
Wei Yongjun 已提交
2463 2464
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2465
{
2466
	const struct iommu_ops *ops = domain->ops;
2467 2468 2469
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
2470
	phys_addr_t orig_paddr = paddr;
2471
	int ret = 0;
2472

2473
	if (unlikely(!(ops->map || ops->map_pages) ||
2474
		     domain->pgsize_bitmap == 0UL))
2475
		return -ENODEV;
2476

2477 2478 2479
	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
		return -EINVAL;

2480
	/* find out the minimum page size supported */
2481
	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2482 2483 2484 2485 2486 2487 2488

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2489
		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2490
		       iova, &paddr, size, min_pagesz);
2491 2492 2493
		return -EINVAL;
	}

2494
	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2495 2496

	while (size) {
2497
		size_t mapped = 0;
2498

2499 2500 2501 2502 2503 2504 2505
		ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
					&mapped);
		/*
		 * Some pages may have been mapped, even if an error occurred,
		 * so we should account for those so they can be unmapped.
		 */
		size -= mapped;
2506 2507 2508 2509

		if (ret)
			break;

2510 2511
		iova += mapped;
		paddr += mapped;
2512 2513 2514 2515 2516
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);
2517
	else
2518
		trace_map(orig_iova, orig_paddr, orig_size);
2519 2520

	return ret;
2521
}
2522

2523 2524 2525 2526 2527 2528
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
	const struct iommu_ops *ops = domain->ops;
	int ret;

2529
	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2530
	if (ret == 0 && ops->iotlb_sync_map)
2531
		ops->iotlb_sync_map(domain, iova, size);
2532 2533 2534 2535

	return ret;
}

2536 2537 2538 2539
int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
	might_sleep();
2540
	return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2541
}
2542 2543
EXPORT_SYMBOL_GPL(iommu_map);

2544 2545 2546
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
2547
	return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2548 2549 2550
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
				  unsigned long iova, size_t size,
				  struct iommu_iotlb_gather *iotlb_gather)
{
	const struct iommu_ops *ops = domain->ops;
	size_t pgsize, count;

	pgsize = iommu_pgsize(domain, iova, iova, size, &count);
	return ops->unmap_pages ?
	       ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
	       ops->unmap(domain, iova, pgsize, iotlb_gather);
}

2564 2565
static size_t __iommu_unmap(struct iommu_domain *domain,
			    unsigned long iova, size_t size,
2566
			    struct iommu_iotlb_gather *iotlb_gather)
2567
{
2568
	const struct iommu_ops *ops = domain->ops;
2569
	size_t unmapped_page, unmapped = 0;
2570
	unsigned long orig_iova = iova;
2571
	unsigned int min_pagesz;
2572

2573
	if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2574
		     domain->pgsize_bitmap == 0UL))
2575
		return 0;
2576

2577
	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2578
		return 0;
2579

2580
	/* find out the minimum page size supported */
2581
	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2582 2583 2584 2585 2586 2587 2588

	/*
	 * The virtual address, as well as the size of the mapping, must be
	 * aligned (at least) to the size of the smallest page supported
	 * by the hardware
	 */
	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2589 2590
		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
		       iova, size, min_pagesz);
2591
		return 0;
2592 2593
	}

2594
	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2595 2596 2597 2598 2599 2600

	/*
	 * Keep iterating until we either unmap 'size' bytes (or more)
	 * or we hit an area that isn't mapped.
	 */
	while (unmapped < size) {
2601 2602 2603
		unmapped_page = __iommu_unmap_pages(domain, iova,
						    size - unmapped,
						    iotlb_gather);
2604 2605 2606
		if (!unmapped_page)
			break;

2607 2608
		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
			 iova, unmapped_page);
2609 2610 2611 2612 2613

		iova += unmapped_page;
		unmapped += unmapped_page;
	}

2614
	trace_unmap(orig_iova, size, unmapped);
2615
	return unmapped;
2616
}
2617 2618 2619 2620

size_t iommu_unmap(struct iommu_domain *domain,
		   unsigned long iova, size_t size)
{
2621 2622 2623 2624 2625
	struct iommu_iotlb_gather iotlb_gather;
	size_t ret;

	iommu_iotlb_gather_init(&iotlb_gather);
	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2626
	iommu_iotlb_sync(domain, &iotlb_gather);
2627 2628

	return ret;
2629
}
2630
EXPORT_SYMBOL_GPL(iommu_unmap);
2631

2632
size_t iommu_unmap_fast(struct iommu_domain *domain,
2633 2634
			unsigned long iova, size_t size,
			struct iommu_iotlb_gather *iotlb_gather)
2635
{
2636
	return __iommu_unmap(domain, iova, size, iotlb_gather);
2637 2638 2639
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);

2640 2641 2642
static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
			     struct scatterlist *sg, unsigned int nents, int prot,
			     gfp_t gfp)
O
Olav Haugan 已提交
2643
{
2644
	const struct iommu_ops *ops = domain->ops;
2645 2646 2647
	size_t len = 0, mapped = 0;
	phys_addr_t start;
	unsigned int i = 0;
2648
	int ret;
O
Olav Haugan 已提交
2649

2650 2651
	while (i <= nents) {
		phys_addr_t s_phys = sg_phys(sg);
2652

2653
		if (len && s_phys != start + len) {
2654 2655 2656
			ret = __iommu_map(domain, iova + mapped, start,
					len, prot, gfp);

2657 2658
			if (ret)
				goto out_err;
2659

2660 2661 2662
			mapped += len;
			len = 0;
		}
2663

2664 2665 2666 2667 2668 2669
		if (len) {
			len += sg->length;
		} else {
			len = sg->length;
			start = s_phys;
		}
2670

2671 2672
		if (++i < nents)
			sg = sg_next(sg);
O
Olav Haugan 已提交
2673 2674
	}

2675
	if (ops->iotlb_sync_map)
2676
		ops->iotlb_sync_map(domain, iova, mapped);
O
Olav Haugan 已提交
2677
	return mapped;
2678 2679 2680 2681 2682 2683 2684

out_err:
	/* undo mappings already done */
	iommu_unmap(domain, iova, mapped);

	return 0;

O
Olav Haugan 已提交
2685
}
2686 2687 2688 2689 2690 2691 2692

size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
		    struct scatterlist *sg, unsigned int nents, int prot)
{
	might_sleep();
	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
}
2693
EXPORT_SYMBOL_GPL(iommu_map_sg);
2694

2695 2696 2697 2698 2699 2700
size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
		    struct scatterlist *sg, unsigned int nents, int prot)
{
	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
/**
 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
 * @domain: the iommu domain where the fault has happened
 * @dev: the device where the fault has happened
 * @iova: the faulting address
 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
 *
 * This function should be called by the low-level IOMMU implementations
 * whenever IOMMU faults happen, to allow high-level users, that are
 * interested in such events, to know about them.
 *
 * This event may be useful for several possible use cases:
 * - mere logging of the event
 * - dynamic TLB/PTE loading
 * - if restarting of the faulting device is required
 *
 * Returns 0 on success and an appropriate error code otherwise (if dynamic
 * PTE/TLB loading will one day be supported, implementations will be able
 * to tell whether it succeeded or not according to this return value).
 *
 * Specifically, -ENOSYS is returned if a fault handler isn't installed
 * (though fault handlers can also return -ENOSYS, in case they want to
 * elicit the default behavior of the IOMMU drivers).
 */
int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
		       unsigned long iova, int flags)
{
	int ret = -ENOSYS;

	/*
	 * if upper layers showed interest and installed a fault handler,
	 * invoke it.
	 */
	if (domain->handler)
		ret = domain->handler(domain, dev, iova, flags,
						domain->handler_token);

	trace_io_page_fault(dev, iova, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(report_iommu_fault);

A
Alex Williamson 已提交
2743
static int __init iommu_init(void)
2744
{
A
Alex Williamson 已提交
2745 2746 2747 2748
	iommu_group_kset = kset_create_and_add("iommu_groups",
					       NULL, kernel_kobj);
	BUG_ON(!iommu_group_kset);

2749 2750
	iommu_debugfs_setup();

A
Alex Williamson 已提交
2751
	return 0;
2752
}
2753
core_initcall(iommu_init);
2754

2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
int iommu_enable_nesting(struct iommu_domain *domain)
{
	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;
	if (!domain->ops->enable_nesting)
		return -EINVAL;
	return domain->ops->enable_nesting(domain);
}
EXPORT_SYMBOL_GPL(iommu_enable_nesting);

2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
		unsigned long quirk)
{
	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
		return -EINVAL;
	if (!domain->ops->set_pgtable_quirks)
		return -EINVAL;
	return domain->ops->set_pgtable_quirks(domain, quirk);
}
EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);

2776
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2777 2778 2779
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;

2780 2781
	if (ops && ops->get_resv_regions)
		ops->get_resv_regions(dev, list);
2782 2783
}

2784
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2785 2786 2787
{
	const struct iommu_ops *ops = dev->bus->iommu_ops;

2788 2789
	if (ops && ops->put_resv_regions)
		ops->put_resv_regions(dev, list);
2790
}
2791

2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810
/**
 * generic_iommu_put_resv_regions - Reserved region driver helper
 * @dev: device for which to free reserved regions
 * @list: reserved region list for device
 *
 * IOMMU drivers can use this to implement their .put_resv_regions() callback
 * for simple reservations. Memory allocated for each reserved region will be
 * freed. If an IOMMU driver allocates additional resources per region, it is
 * going to have to implement a custom callback.
 */
void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
	struct iommu_resv_region *entry, *next;

	list_for_each_entry_safe(entry, next, list, list)
		kfree(entry);
}
EXPORT_SYMBOL(generic_iommu_put_resv_regions);

E
Eric Auger 已提交
2811
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2812 2813
						  size_t length, int prot,
						  enum iommu_resv_type type)
E
Eric Auger 已提交
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
{
	struct iommu_resv_region *region;

	region = kzalloc(sizeof(*region), GFP_KERNEL);
	if (!region)
		return NULL;

	INIT_LIST_HEAD(&region->list);
	region->start = start;
	region->length = length;
	region->prot = prot;
	region->type = type;
	return region;
2827
}
2828
EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2829

2830 2831 2832
void iommu_set_default_passthrough(bool cmd_line)
{
	if (cmd_line)
2833
		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2834 2835 2836 2837 2838 2839
	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
}

void iommu_set_default_translated(bool cmd_line)
{
	if (cmd_line)
2840
		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2841 2842 2843 2844 2845 2846 2847 2848 2849
	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
}

bool iommu_default_passthrough(void)
{
	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);

2850
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2851 2852
{
	const struct iommu_ops *ops = NULL;
2853
	struct iommu_device *iommu;
2854

2855 2856 2857 2858
	spin_lock(&iommu_device_lock);
	list_for_each_entry(iommu, &iommu_device_list, list)
		if (iommu->fwnode == fwnode) {
			ops = iommu->ops;
2859 2860
			break;
		}
2861
	spin_unlock(&iommu_device_lock);
2862 2863 2864
	return ops;
}

R
Robin Murphy 已提交
2865 2866 2867
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
		      const struct iommu_ops *ops)
{
2868
	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
R
Robin Murphy 已提交
2869 2870 2871 2872

	if (fwspec)
		return ops == fwspec->ops ? 0 : -EINVAL;

2873 2874 2875
	if (!dev_iommu_get(dev))
		return -ENOMEM;

2876 2877
	/* Preallocate for the overwhelmingly common case of 1 ID */
	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
R
Robin Murphy 已提交
2878 2879 2880 2881 2882 2883
	if (!fwspec)
		return -ENOMEM;

	of_node_get(to_of_node(iommu_fwnode));
	fwspec->iommu_fwnode = iommu_fwnode;
	fwspec->ops = ops;
2884
	dev_iommu_fwspec_set(dev, fwspec);
R
Robin Murphy 已提交
2885 2886 2887 2888 2889 2890
	return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_init);

void iommu_fwspec_free(struct device *dev)
{
2891
	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
R
Robin Murphy 已提交
2892 2893 2894 2895

	if (fwspec) {
		fwnode_handle_put(fwspec->iommu_fwnode);
		kfree(fwspec);
2896
		dev_iommu_fwspec_set(dev, NULL);
R
Robin Murphy 已提交
2897 2898 2899 2900 2901 2902
	}
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);

int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
{
2903
	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2904
	int i, new_num;
R
Robin Murphy 已提交
2905 2906 2907 2908

	if (!fwspec)
		return -EINVAL;

2909 2910 2911 2912
	new_num = fwspec->num_ids + num_ids;
	if (new_num > 1) {
		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
				  GFP_KERNEL);
R
Robin Murphy 已提交
2913 2914
		if (!fwspec)
			return -ENOMEM;
2915

2916
		dev_iommu_fwspec_set(dev, fwspec);
R
Robin Murphy 已提交
2917 2918 2919 2920 2921
	}

	for (i = 0; i < num_ids; i++)
		fwspec->ids[fwspec->num_ids + i] = ids[i];

2922
	fwspec->num_ids = new_num;
R
Robin Murphy 已提交
2923 2924 2925
	return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2926 2927 2928 2929 2930 2931

/*
 * Per device IOMMU features.
 */
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
2932 2933
	if (dev->iommu && dev->iommu->iommu_dev) {
		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2934

2935 2936 2937
		if (ops->dev_enable_feat)
			return ops->dev_enable_feat(dev, feat);
	}
2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949

	return -ENODEV;
}
EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);

/*
 * The device drivers should do the necessary cleanups before calling this.
 * For example, before disabling the aux-domain feature, the device driver
 * should detach all aux-domains. Otherwise, this will return -EBUSY.
 */
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
2950 2951
	if (dev->iommu && dev->iommu->iommu_dev) {
		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2952

2953 2954 2955
		if (ops->dev_disable_feat)
			return ops->dev_disable_feat(dev, feat);
	}
2956 2957 2958 2959 2960 2961 2962

	return -EBUSY;
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);

bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
{
2963 2964
	if (dev->iommu && dev->iommu->iommu_dev) {
		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2965

2966 2967 2968
		if (ops->dev_feat_enabled)
			return ops->dev_feat_enabled(dev, feat);
	}
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016

	return false;
}
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);

/*
 * Aux-domain specific attach/detach.
 *
 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
 * true. Also, as long as domains are attached to a device through this
 * interface, any tries to call iommu_attach_device() should fail
 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
 * This should make us safe against a device being attached to a guest as a
 * whole while there are still pasid users on it (aux and sva).
 */
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
{
	int ret = -ENODEV;

	if (domain->ops->aux_attach_dev)
		ret = domain->ops->aux_attach_dev(domain, dev);

	if (!ret)
		trace_attach_device_to_domain(dev);

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_aux_attach_device);

void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
{
	if (domain->ops->aux_detach_dev) {
		domain->ops->aux_detach_dev(domain, dev);
		trace_detach_device_from_domain(dev);
	}
}
EXPORT_SYMBOL_GPL(iommu_aux_detach_device);

int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
{
	int ret = -ENODEV;

	if (domain->ops->aux_get_pasid)
		ret = domain->ops->aux_get_pasid(domain, dev);

	return ret;
}
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097

/**
 * iommu_sva_bind_device() - Bind a process address space to a device
 * @dev: the device
 * @mm: the mm to bind, caller must hold a reference to it
 *
 * Create a bond between device and address space, allowing the device to access
 * the mm using the returned PASID. If a bond already exists between @device and
 * @mm, it is returned and an additional reference is taken. Caller must call
 * iommu_sva_unbind_device() to release each reference.
 *
 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
 * initialize the required SVA features.
 *
 * On error, returns an ERR_PTR value.
 */
struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
	struct iommu_group *group;
	struct iommu_sva *handle = ERR_PTR(-EINVAL);
	const struct iommu_ops *ops = dev->bus->iommu_ops;

	if (!ops || !ops->sva_bind)
		return ERR_PTR(-ENODEV);

	group = iommu_group_get(dev);
	if (!group)
		return ERR_PTR(-ENODEV);

	/* Ensure device count and domain don't change while we're binding */
	mutex_lock(&group->mutex);

	/*
	 * To keep things simple, SVA currently doesn't support IOMMU groups
	 * with more than one device. Existing SVA-capable systems are not
	 * affected by the problems that required IOMMU groups (lack of ACS
	 * isolation, device ID aliasing and other hardware issues).
	 */
	if (iommu_group_device_count(group) != 1)
		goto out_unlock;

	handle = ops->sva_bind(dev, mm, drvdata);

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);

	return handle;
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);

/**
 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
 * @handle: the handle returned by iommu_sva_bind_device()
 *
 * Put reference to a bond between device and address space. The device should
 * not be issuing any more transaction for this PASID. All outstanding page
 * requests for this PASID must have been flushed to the IOMMU.
 */
void iommu_sva_unbind_device(struct iommu_sva *handle)
{
	struct iommu_group *group;
	struct device *dev = handle->dev;
	const struct iommu_ops *ops = dev->bus->iommu_ops;

	if (!ops || !ops->sva_unbind)
		return;

	group = iommu_group_get(dev);
	if (!group)
		return;

	mutex_lock(&group->mutex);
	ops->sva_unbind(handle);
	mutex_unlock(&group->mutex);

	iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);

3098
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3099 3100 3101 3102 3103 3104 3105 3106 3107
{
	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;

	if (!ops || !ops->sva_get_pasid)
		return IOMMU_PASID_INVALID;

	return ops->sva_get_pasid(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329

/*
 * Changes the default domain of an iommu group that has *only* one device
 *
 * @group: The group for which the default domain should be changed
 * @prev_dev: The device in the group (this is used to make sure that the device
 *	 hasn't changed after the caller has called this function)
 * @type: The type of the new default domain that gets associated with the group
 *
 * Returns 0 on success and error code on failure
 *
 * Note:
 * 1. Presently, this function is called only when user requests to change the
 *    group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
 *    Please take a closer look if intended to use for other purposes.
 */
static int iommu_change_dev_def_domain(struct iommu_group *group,
				       struct device *prev_dev, int type)
{
	struct iommu_domain *prev_dom;
	struct group_device *grp_dev;
	int ret, dev_def_dom;
	struct device *dev;

	mutex_lock(&group->mutex);

	if (group->default_domain != group->domain) {
		dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
		ret = -EBUSY;
		goto out;
	}

	/*
	 * iommu group wasn't locked while acquiring device lock in
	 * iommu_group_store_type(). So, make sure that the device count hasn't
	 * changed while acquiring device lock.
	 *
	 * Changing default domain of an iommu group with two or more devices
	 * isn't supported because there could be a potential deadlock. Consider
	 * the following scenario. T1 is trying to acquire device locks of all
	 * the devices in the group and before it could acquire all of them,
	 * there could be another thread T2 (from different sub-system and use
	 * case) that has already acquired some of the device locks and might be
	 * waiting for T1 to release other device locks.
	 */
	if (iommu_group_device_count(group) != 1) {
		dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
		ret = -EINVAL;
		goto out;
	}

	/* Since group has only one device */
	grp_dev = list_first_entry(&group->devices, struct group_device, list);
	dev = grp_dev->dev;

	if (prev_dev != dev) {
		dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
		ret = -EBUSY;
		goto out;
	}

	prev_dom = group->default_domain;
	if (!prev_dom) {
		ret = -EINVAL;
		goto out;
	}

	dev_def_dom = iommu_get_def_domain_type(dev);
	if (!type) {
		/*
		 * If the user hasn't requested any specific type of domain and
		 * if the device supports both the domains, then default to the
		 * domain the device was booted with
		 */
		type = dev_def_dom ? : iommu_def_domain_type;
	} else if (dev_def_dom && type != dev_def_dom) {
		dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
				    iommu_domain_type_str(type));
		ret = -EINVAL;
		goto out;
	}

	/*
	 * Switch to a new domain only if the requested domain type is different
	 * from the existing default domain type
	 */
	if (prev_dom->type == type) {
		ret = 0;
		goto out;
	}

	/* Sets group->default_domain to the newly allocated domain */
	ret = iommu_group_alloc_default_domain(dev->bus, group, type);
	if (ret)
		goto out;

	ret = iommu_create_device_direct_mappings(group, dev);
	if (ret)
		goto free_new_domain;

	ret = __iommu_attach_device(group->default_domain, dev);
	if (ret)
		goto free_new_domain;

	group->domain = group->default_domain;

	/*
	 * Release the mutex here because ops->probe_finalize() call-back of
	 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
	 * in-turn might call back into IOMMU core code, where it tries to take
	 * group->mutex, resulting in a deadlock.
	 */
	mutex_unlock(&group->mutex);

	/* Make sure dma_ops is appropriatley set */
	iommu_group_do_probe_finalize(dev, group->default_domain);
	iommu_domain_free(prev_dom);
	return 0;

free_new_domain:
	iommu_domain_free(group->default_domain);
	group->default_domain = prev_dom;
	group->domain = prev_dom;

out:
	mutex_unlock(&group->mutex);

	return ret;
}

/*
 * Changing the default domain through sysfs requires the users to ubind the
 * drivers from the devices in the iommu group. Return failure if this doesn't
 * meet.
 *
 * We need to consider the race between this and the device release path.
 * device_lock(dev) is used here to guarantee that the device release path
 * will not be entered at the same time.
 */
static ssize_t iommu_group_store_type(struct iommu_group *group,
				      const char *buf, size_t count)
{
	struct group_device *grp_dev;
	struct device *dev;
	int ret, req_type;

	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
		return -EACCES;

	if (WARN_ON(!group))
		return -EINVAL;

	if (sysfs_streq(buf, "identity"))
		req_type = IOMMU_DOMAIN_IDENTITY;
	else if (sysfs_streq(buf, "DMA"))
		req_type = IOMMU_DOMAIN_DMA;
	else if (sysfs_streq(buf, "auto"))
		req_type = 0;
	else
		return -EINVAL;

	/*
	 * Lock/Unlock the group mutex here before device lock to
	 * 1. Make sure that the iommu group has only one device (this is a
	 *    prerequisite for step 2)
	 * 2. Get struct *dev which is needed to lock device
	 */
	mutex_lock(&group->mutex);
	if (iommu_group_device_count(group) != 1) {
		mutex_unlock(&group->mutex);
		pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
		return -EINVAL;
	}

	/* Since group has only one device */
	grp_dev = list_first_entry(&group->devices, struct group_device, list);
	dev = grp_dev->dev;
	get_device(dev);

	/*
	 * Don't hold the group mutex because taking group mutex first and then
	 * the device lock could potentially cause a deadlock as below. Assume
	 * two threads T1 and T2. T1 is trying to change default domain of an
	 * iommu group and T2 is trying to hot unplug a device or release [1] VF
	 * of a PCIe device which is in the same iommu group. T1 takes group
	 * mutex and before it could take device lock assume T2 has taken device
	 * lock and is yet to take group mutex. Now, both the threads will be
	 * waiting for the other thread to release lock. Below, lock order was
	 * suggested.
	 * device_lock(dev);
	 *	mutex_lock(&group->mutex);
	 *		iommu_change_dev_def_domain();
	 *	mutex_unlock(&group->mutex);
	 * device_unlock(dev);
	 *
	 * [1] Typical device release path
	 * device_lock() from device/driver core code
	 *  -> bus_notifier()
	 *   -> iommu_bus_notifier()
	 *    -> iommu_release_device()
	 *     -> ops->release_device() vendor driver calls back iommu core code
	 *      -> mutex_lock() from iommu core code
	 */
	mutex_unlock(&group->mutex);

	/* Check if the device in the group still has a driver bound to it */
	device_lock(dev);
	if (device_is_bound(dev)) {
		pr_err_ratelimited("Device is still bound to driver\n");
		ret = -EBUSY;
		goto out;
	}

	ret = iommu_change_dev_def_domain(group, dev, req_type);
	ret = ret ?: count;

out:
	device_unlock(dev);
	put_device(dev);

	return ret;
}