amd_iommu.c 105.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
J
Joerg Roedel 已提交
3
 * Author: Joerg Roedel <jroedel@suse.de>
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *         Leo Duran <leo.duran@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

20
#include <linux/ratelimit.h>
21
#include <linux/pci.h>
22
#include <linux/acpi.h>
23
#include <linux/amba/bus.h>
24
#include <linux/platform_device.h>
25
#include <linux/pci-ats.h>
A
Akinobu Mita 已提交
26
#include <linux/bitmap.h>
27
#include <linux/slab.h>
28
#include <linux/debugfs.h>
29
#include <linux/scatterlist.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/iommu-helper.h>
32
#include <linux/iommu.h>
33
#include <linux/delay.h>
34
#include <linux/amd-iommu.h>
35 36
#include <linux/notifier.h>
#include <linux/export.h>
37 38
#include <linux/irq.h>
#include <linux/msi.h>
39
#include <linux/dma-contiguous.h>
40
#include <linux/irqdomain.h>
41
#include <linux/percpu.h>
42
#include <linux/iova.h>
43 44 45 46
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/hw_irq.h>
47
#include <asm/msidef.h>
48
#include <asm/proto.h>
49
#include <asm/iommu.h>
50
#include <asm/gart.h>
51
#include <asm/dma.h>
52 53 54

#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
55
#include "irq_remapping.h"
56 57 58

#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))

59
#define LOOP_TIMEOUT	100000
60

61 62 63 64 65
/* IO virtual address start page frame number */
#define IOVA_START_PFN		(1)
#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))

66 67 68 69 70 71
/* Reserved IOVA ranges */
#define MSI_RANGE_START		(0xfee00000)
#define MSI_RANGE_END		(0xfeefffff)
#define HT_RANGE_START		(0xfd00000000ULL)
#define HT_RANGE_END		(0xffffffffffULL)

72 73 74 75 76 77
/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
J
Joerg Roedel 已提交
78
 * 512GB Pages are not supported due to a hardware bug
79
 */
J
Joerg Roedel 已提交
80
#define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
81

82 83
static DEFINE_RWLOCK(amd_iommu_devtable_lock);

84 85 86 87
/* List of all available dev_data structures */
static LIST_HEAD(dev_data_list);
static DEFINE_SPINLOCK(dev_data_list_lock);

88 89
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
90
LIST_HEAD(acpihid_map);
91

92 93 94 95
/*
 * Domain for untranslated devices - only allocated
 * if iommu=pt passed on kernel cmd line.
 */
96
const struct iommu_ops amd_iommu_ops;
97

98
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
99
int amd_iommu_max_glx_val = -1;
100

101
static const struct dma_map_ops amd_iommu_dma_ops;
102

103 104 105 106 107 108 109 110
/*
 * This struct contains device specific data for the IOMMU
 */
struct iommu_dev_data {
	struct list_head list;		  /* For domain->dev_list */
	struct list_head dev_data_list;	  /* For global dev_data_list */
	struct protection_domain *domain; /* Domain the device is bound to */
	u16 devid;			  /* PCI Device ID */
111
	u16 alias;			  /* Alias Device ID */
112
	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
113
	bool passthrough;		  /* Device is identity mapped */
114 115 116 117 118 119 120
	struct {
		bool enabled;
		int qdep;
	} ats;				  /* ATS state */
	bool pri_tlp;			  /* PASID TLB required for
					     PPR completions */
	u32 errata;			  /* Bitmap for errata to apply */
121
	bool use_vapic;			  /* Enable device to use vapic mode */
122 123

	struct ratelimit_state rs;	  /* Ratelimit IOPF messages */
124 125
};

126 127 128
/*
 * general struct to manage commands send to an IOMMU
 */
129
struct iommu_cmd {
130 131 132
	u32 data[4];
};

133 134
struct kmem_cache *amd_iommu_irq_cache;

135
static void update_domain(struct protection_domain *domain);
136
static int protection_domain_init(struct protection_domain *domain);
137
static void detach_device(struct device *dev);
138

139 140 141 142 143
#define FLUSH_QUEUE_SIZE 256

struct flush_queue_entry {
	unsigned long iova_pfn;
	unsigned long pages;
144
	u64 counter; /* Flush counter when this entry was added to the queue */
145 146 147 148 149
};

struct flush_queue {
	struct flush_queue_entry *entries;
	unsigned head, tail;
150
	spinlock_t lock;
151 152
};

153 154 155 156 157 158 159
/*
 * Data container for a dma_ops specific protection domain
 */
struct dma_ops_domain {
	/* generic protection domain information */
	struct protection_domain domain;

160 161
	/* IOVA RB-Tree */
	struct iova_domain iovad;
162 163

	struct flush_queue __percpu *flush_queue;
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

	/*
	 * We need two counter here to be race-free wrt. IOTLB flushing and
	 * adding entries to the flush queue.
	 *
	 * The flush_start_cnt is incremented _before_ the IOTLB flush starts.
	 * New entries added to the flush ring-buffer get their 'counter' value
	 * from here. This way we can make sure that entries added to the queue
	 * (or other per-cpu queues of the same domain) while the TLB is about
	 * to be flushed are not considered to be flushed already.
	 */
	atomic64_t flush_start_cnt;

	/*
	 * The flush_finish_cnt is incremented when an IOTLB flush is complete.
	 * This value is always smaller than flush_start_cnt. The queue_add
	 * function frees all IOVAs that have a counter value smaller than
	 * flush_finish_cnt. This makes sure that we only free IOVAs that are
	 * flushed out of the IOTLB of the domain.
	 */
	atomic64_t flush_finish_cnt;
185 186 187 188 189 190 191

	/*
	 * Timer to make sure we don't keep IOVAs around unflushed
	 * for too long
	 */
	struct timer_list flush_timer;
	atomic_t flush_timer_on;
192 193
};

194 195 196
static struct iova_domain reserved_iova_ranges;
static struct lock_class_key reserved_rbtree_key;

197 198 199 200 201 202
/****************************************************************************
 *
 * Helper functions
 *
 ****************************************************************************/

203 204
static inline int match_hid_uid(struct device *dev,
				struct acpihid_map_entry *entry)
205
{
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	const char *hid, *uid;

	hid = acpi_device_hid(ACPI_COMPANION(dev));
	uid = acpi_device_uid(ACPI_COMPANION(dev));

	if (!hid || !(*hid))
		return -ENODEV;

	if (!uid || !(*uid))
		return strcmp(hid, entry->hid);

	if (!(*entry->uid))
		return strcmp(hid, entry->hid);

	return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
221 222
}

223
static inline u16 get_pci_device_id(struct device *dev)
224 225 226 227 228 229
{
	struct pci_dev *pdev = to_pci_dev(dev);

	return PCI_DEVID(pdev->bus->number, pdev->devfn);
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static inline int get_acpihid_device_id(struct device *dev,
					struct acpihid_map_entry **entry)
{
	struct acpihid_map_entry *p;

	list_for_each_entry(p, &acpihid_map, list) {
		if (!match_hid_uid(dev, p)) {
			if (entry)
				*entry = p;
			return p->devid;
		}
	}
	return -EINVAL;
}

static inline int get_device_id(struct device *dev)
{
	int devid;

	if (dev_is_pci(dev))
		devid = get_pci_device_id(dev);
	else
		devid = get_acpihid_device_id(dev, NULL);

	return devid;
}

257 258 259 260 261
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
{
	return container_of(dom, struct protection_domain, domain);
}

262 263 264 265 266 267
static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
{
	BUG_ON(domain->flags != PD_DMA_OPS_MASK);
	return container_of(domain, struct dma_ops_domain, domain);
}

268
static struct iommu_dev_data *alloc_dev_data(u16 devid)
269 270 271 272 273 274 275 276
{
	struct iommu_dev_data *dev_data;
	unsigned long flags;

	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
	if (!dev_data)
		return NULL;

277
	dev_data->devid = devid;
278 279 280 281 282

	spin_lock_irqsave(&dev_data_list_lock, flags);
	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
	spin_unlock_irqrestore(&dev_data_list_lock, flags);

283 284
	ratelimit_default_init(&dev_data->rs);

285 286 287
	return dev_data;
}

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static struct iommu_dev_data *search_dev_data(u16 devid)
{
	struct iommu_dev_data *dev_data;
	unsigned long flags;

	spin_lock_irqsave(&dev_data_list_lock, flags);
	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
		if (dev_data->devid == devid)
			goto out_unlock;
	}

	dev_data = NULL;

out_unlock:
	spin_unlock_irqrestore(&dev_data_list_lock, flags);

	return dev_data;
}

307 308 309 310 311 312 313 314 315 316 317
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
{
	*(u16 *)data = alias;
	return 0;
}

static u16 get_alias(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	u16 devid, ivrs_alias, pci_alias;

318
	/* The callers make sure that get_device_id() does not fail here */
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	devid = get_device_id(dev);
	ivrs_alias = amd_iommu_alias_table[devid];
	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);

	if (ivrs_alias == pci_alias)
		return ivrs_alias;

	/*
	 * DMA alias showdown
	 *
	 * The IVRS is fairly reliable in telling us about aliases, but it
	 * can't know about every screwy device.  If we don't have an IVRS
	 * reported alias, use the PCI reported alias.  In that case we may
	 * still need to initialize the rlookup and dev_table entries if the
	 * alias is to a non-existent device.
	 */
	if (ivrs_alias == devid) {
		if (!amd_iommu_rlookup_table[pci_alias]) {
			amd_iommu_rlookup_table[pci_alias] =
				amd_iommu_rlookup_table[devid];
			memcpy(amd_iommu_dev_table[pci_alias].data,
			       amd_iommu_dev_table[devid].data,
			       sizeof(amd_iommu_dev_table[pci_alias].data));
		}

		return pci_alias;
	}

	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
		"for device %s[%04x:%04x], kernel reported alias "
		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
		PCI_FUNC(pci_alias));

	/*
	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
	 * bus, then the IVRS table may know about a quirk that we don't.
	 */
	if (pci_alias == devid &&
	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
360
		pci_add_dma_alias(pdev, ivrs_alias & 0xff);
361 362 363 364 365 366 367 368
		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
			dev_name(dev));
	}

	return ivrs_alias;
}

369 370 371 372 373 374 375 376 377 378 379 380
static struct iommu_dev_data *find_dev_data(u16 devid)
{
	struct iommu_dev_data *dev_data;

	dev_data = search_dev_data(devid);

	if (dev_data == NULL)
		dev_data = alloc_dev_data(devid);

	return dev_data;
}

381 382 383 384 385
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
	return dev->archdata.iommu;
}

386 387 388 389
/*
* Find or create an IOMMU group for a acpihid device.
*/
static struct iommu_group *acpihid_device_group(struct device *dev)
390
{
391
	struct acpihid_map_entry *p, *entry = NULL;
392
	int devid;
393 394 395 396 397 398 399 400 401 402 403 404

	devid = get_acpihid_device_id(dev, &entry);
	if (devid < 0)
		return ERR_PTR(devid);

	list_for_each_entry(p, &acpihid_map, list) {
		if ((devid == p->devid) && p->group)
			entry->group = p->group;
	}

	if (!entry->group)
		entry->group = generic_device_group(dev);
R
Robin Murphy 已提交
405 406
	else
		iommu_group_ref_get(entry->group);
407 408

	return entry->group;
409 410
}

411 412 413 414
static bool pci_iommuv2_capable(struct pci_dev *pdev)
{
	static const int caps[] = {
		PCI_EXT_CAP_ID_ATS,
415 416
		PCI_EXT_CAP_ID_PRI,
		PCI_EXT_CAP_ID_PASID,
417 418 419 420 421 422 423 424 425 426 427 428
	};
	int i, pos;

	for (i = 0; i < 3; ++i) {
		pos = pci_find_ext_capability(pdev, caps[i]);
		if (pos == 0)
			return false;
	}

	return true;
}

429 430 431 432 433 434 435 436 437
static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
{
	struct iommu_dev_data *dev_data;

	dev_data = get_dev_data(&pdev->dev);

	return dev_data->errata & (1 << erratum) ? true : false;
}

438 439 440 441 442 443
/*
 * This function checks if the driver got a valid device from the caller to
 * avoid dereferencing invalid pointers.
 */
static bool check_device(struct device *dev)
{
444
	int devid;
445 446 447 448 449

	if (!dev || !dev->dma_mask)
		return false;

	devid = get_device_id(dev);
450
	if (devid < 0)
451
		return false;
452 453 454 455 456 457 458 459 460 461 462

	/* Out of our scope? */
	if (devid > amd_iommu_last_bdf)
		return false;

	if (amd_iommu_rlookup_table[devid] == NULL)
		return false;

	return true;
}

463
static void init_iommu_group(struct device *dev)
464 465 466
{
	struct iommu_group *group;

467
	group = iommu_group_get_for_dev(dev);
468 469 470 471
	if (IS_ERR(group))
		return;

	iommu_group_put(group);
472 473 474 475 476
}

static int iommu_init_device(struct device *dev)
{
	struct iommu_dev_data *dev_data;
477
	struct amd_iommu *iommu;
478
	int devid;
479 480 481 482

	if (dev->archdata.iommu)
		return 0;

483
	devid = get_device_id(dev);
484
	if (devid < 0)
485 486
		return devid;

487 488
	iommu = amd_iommu_rlookup_table[devid];

489
	dev_data = find_dev_data(devid);
490 491 492
	if (!dev_data)
		return -ENOMEM;

493 494
	dev_data->alias = get_alias(dev);

495
	if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
496 497
		struct amd_iommu *iommu;

498
		iommu = amd_iommu_rlookup_table[dev_data->devid];
499 500 501
		dev_data->iommu_v2 = iommu->is_iommu_v2;
	}

502 503
	dev->archdata.iommu = dev_data;

504
	iommu_device_link(&iommu->iommu, dev);
A
Alex Williamson 已提交
505

506 507 508
	return 0;
}

509 510
static void iommu_ignore_device(struct device *dev)
{
511 512
	u16 alias;
	int devid;
513 514

	devid = get_device_id(dev);
515
	if (devid < 0)
516 517
		return;

518
	alias = get_alias(dev);
519 520 521 522 523 524 525 526

	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));

	amd_iommu_rlookup_table[devid] = NULL;
	amd_iommu_rlookup_table[alias] = NULL;
}

527 528
static void iommu_uninit_device(struct device *dev)
{
529
	struct iommu_dev_data *dev_data;
530 531
	struct amd_iommu *iommu;
	int devid;
532

533
	devid = get_device_id(dev);
534
	if (devid < 0)
535
		return;
536

537 538
	iommu = amd_iommu_rlookup_table[devid];

539
	dev_data = search_dev_data(devid);
540 541 542
	if (!dev_data)
		return;

543 544 545
	if (dev_data->domain)
		detach_device(dev);

546
	iommu_device_unlink(&iommu->iommu, dev);
A
Alex Williamson 已提交
547

548 549
	iommu_group_remove_device(dev);

550
	/* Remove dma-ops */
551
	dev->dma_ops = NULL;
552

553
	/*
554 555
	 * We keep dev_data around for unplugged devices and reuse it when the
	 * device is re-plugged - not doing so would introduce a ton of races.
556
	 */
557
}
J
Joerg Roedel 已提交
558

559 560 561 562 563 564
/****************************************************************************
 *
 * Interrupt handling functions
 *
 ****************************************************************************/

565 566 567 568
static void dump_dte_entry(u16 devid)
{
	int i;

569 570
	for (i = 0; i < 4; ++i)
		pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
571 572 573
			amd_iommu_dev_table[devid].data[i]);
}

574 575 576 577 578 579 580 581 582
static void dump_command(unsigned long phys_addr)
{
	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
	int i;

	for (i = 0; i < 4; ++i)
		pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
}

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
					u64 address, int flags)
{
	struct iommu_dev_data *dev_data = NULL;
	struct pci_dev *pdev;

	pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff);
	if (pdev)
		dev_data = get_dev_data(&pdev->dev);

	if (dev_data && __ratelimit(&dev_data->rs)) {
		dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n",
			domain_id, address, flags);
	} else if (printk_ratelimit()) {
		pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
			domain_id, address, flags);
	}

	if (pdev)
		pci_dev_put(pdev);
}

606
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
607
{
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	int type, devid, domid, flags;
	volatile u32 *event = __evt;
	int count = 0;
	u64 address;

retry:
	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
	address = (u64)(((u64)event[3]) << 32) | event[2];

	if (type == 0) {
		/* Did we hit the erratum? */
		if (++count == LOOP_TIMEOUT) {
			pr_err("AMD-Vi: No event written to event log\n");
			return;
		}
		udelay(1);
		goto retry;
	}
629

630 631 632 633 634 635
	if (type == EVENT_TYPE_IO_FAULT) {
		amd_iommu_report_page_fault(devid, domid, address, flags);
		return;
	} else {
		printk(KERN_ERR "AMD-Vi: Event logged [");
	}
636 637 638 639 640

	switch (type) {
	case EVENT_TYPE_ILL_DEV:
		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
641
		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
642
		       address, flags);
643
		dump_dte_entry(devid);
644 645 646 647
		break;
	case EVENT_TYPE_DEV_TAB_ERR:
		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
648
		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
649 650 651 652 653
		       address, flags);
		break;
	case EVENT_TYPE_PAGE_TAB_ERR:
		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
654
		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
655 656 657 658
		       domid, address, flags);
		break;
	case EVENT_TYPE_ILL_CMD:
		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
659
		dump_command(address);
660 661 662 663 664 665 666 667
		break;
	case EVENT_TYPE_CMD_HARD_ERR:
		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
		       "flags=0x%04x]\n", address, flags);
		break;
	case EVENT_TYPE_IOTLB_INV_TO:
		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
		       "address=0x%016llx]\n",
668
		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
669 670 671 672 673
		       address);
		break;
	case EVENT_TYPE_INV_DEV_REQ:
		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
		       "address=0x%016llx flags=0x%04x]\n",
674
		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
675 676 677 678 679
		       address, flags);
		break;
	default:
		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
	}
680 681

	memset(__evt, 0, 4 * sizeof(u32));
682 683 684 685 686 687 688 689 690 691
}

static void iommu_poll_events(struct amd_iommu *iommu)
{
	u32 head, tail;

	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);

	while (head != tail) {
692
		iommu_print_event(iommu, iommu->evt_buf + head);
693
		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
694 695 696 697 698
	}

	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}

699
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
{
	struct amd_iommu_fault fault;

	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
		return;
	}

	fault.address   = raw[1];
	fault.pasid     = PPR_PASID(raw[0]);
	fault.device_id = PPR_DEVID(raw[0]);
	fault.tag       = PPR_TAG(raw[0]);
	fault.flags     = PPR_FLAGS(raw[0]);

	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}

static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
	u32 head, tail;

	if (iommu->ppr_log == NULL)
		return;

	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);

	while (head != tail) {
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
		volatile u64 *raw;
		u64 entry[2];
		int i;

		raw = (u64 *)(iommu->ppr_log + head);

		/*
		 * Hardware bug: Interrupt may arrive before the entry is
		 * written to memory. If this happens we need to wait for the
		 * entry to arrive.
		 */
		for (i = 0; i < LOOP_TIMEOUT; ++i) {
			if (PPR_REQ_TYPE(raw[0]) != 0)
				break;
			udelay(1);
		}
744

745 746 747
		/* Avoid memcpy function-call overhead */
		entry[0] = raw[0];
		entry[1] = raw[1];
748

749 750 751 752 753 754 755
		/*
		 * To detect the hardware bug we need to clear the entry
		 * back to zero.
		 */
		raw[0] = raw[1] = 0UL;

		/* Update head pointer of hardware ring-buffer */
756 757
		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
758 759 760 761 762 763

		/* Handle PPR entry */
		iommu_handle_ppr_entry(iommu, entry);

		/* Refresh ring-buffer information */
		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
764 765 766 767
		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
	}
}

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);

int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
	iommu_ga_log_notifier = notifier;

	return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);

static void iommu_poll_ga_log(struct amd_iommu *iommu)
{
	u32 head, tail, cnt = 0;

	if (iommu->ga_log == NULL)
		return;

	head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
	tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);

	while (head != tail) {
		volatile u64 *raw;
		u64 log_entry;

		raw = (u64 *)(iommu->ga_log + head);
		cnt++;

		/* Avoid memcpy function-call overhead */
		log_entry = *raw;

		/* Update head pointer of hardware ring-buffer */
		head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
		writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);

		/* Handle GA entry */
		switch (GA_REQ_TYPE(log_entry)) {
		case GA_GUEST_NR:
			if (!iommu_ga_log_notifier)
				break;

			pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
				 __func__, GA_DEVID(log_entry),
				 GA_TAG(log_entry));

			if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
				pr_err("AMD-Vi: GA log notifier failed.\n");
			break;
		default:
			break;
		}
	}
}
#endif /* CONFIG_IRQ_REMAP */

#define AMD_IOMMU_INT_MASK	\
	(MMIO_STATUS_EVT_INT_MASK | \
	 MMIO_STATUS_PPR_INT_MASK | \
	 MMIO_STATUS_GALOG_INT_MASK)

828
irqreturn_t amd_iommu_int_thread(int irq, void *data)
829
{
830 831
	struct amd_iommu *iommu = (struct amd_iommu *) data;
	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
832

833 834 835
	while (status & AMD_IOMMU_INT_MASK) {
		/* Enable EVT and PPR and GA interrupts again */
		writel(AMD_IOMMU_INT_MASK,
836
			iommu->mmio_base + MMIO_STATUS_OFFSET);
837

838 839 840 841
		if (status & MMIO_STATUS_EVT_INT_MASK) {
			pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
			iommu_poll_events(iommu);
		}
842

843 844 845 846
		if (status & MMIO_STATUS_PPR_INT_MASK) {
			pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
			iommu_poll_ppr_log(iommu);
		}
847

848 849 850 851 852 853 854
#ifdef CONFIG_IRQ_REMAP
		if (status & MMIO_STATUS_GALOG_INT_MASK) {
			pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
			iommu_poll_ga_log(iommu);
		}
#endif

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
		/*
		 * Hardware bug: ERBT1312
		 * When re-enabling interrupt (by writing 1
		 * to clear the bit), the hardware might also try to set
		 * the interrupt bit in the event status register.
		 * In this scenario, the bit will be set, and disable
		 * subsequent interrupts.
		 *
		 * Workaround: The IOMMU driver should read back the
		 * status register and check if the interrupt bits are cleared.
		 * If not, driver will need to go through the interrupt handler
		 * again and re-clear the bits
		 */
		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
	}
870
	return IRQ_HANDLED;
871 872
}

873 874 875 876 877
irqreturn_t amd_iommu_int_handler(int irq, void *data)
{
	return IRQ_WAKE_THREAD;
}

878 879 880 881 882 883
/****************************************************************************
 *
 * IOMMU command queuing functions
 *
 ****************************************************************************/

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
static int wait_on_sem(volatile u64 *sem)
{
	int i = 0;

	while (*sem == 0 && i < LOOP_TIMEOUT) {
		udelay(1);
		i += 1;
	}

	if (i == LOOP_TIMEOUT) {
		pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
		return -EIO;
	}

	return 0;
}

static void copy_cmd_to_buffer(struct amd_iommu *iommu,
902
			       struct iommu_cmd *cmd)
903 904 905
{
	u8 *target;

906 907 908 909
	target = iommu->cmd_buf + iommu->cmd_buf_tail;

	iommu->cmd_buf_tail += sizeof(*cmd);
	iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
910 911 912 913 914

	/* Copy command to buffer */
	memcpy(target, cmd, sizeof(*cmd));

	/* Tell the IOMMU about it */
915
	writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
916
}
917

918
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
919
{
920 921
	WARN_ON(address & 0x7ULL);

922
	memset(cmd, 0, sizeof(*cmd));
923 924 925
	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
	cmd->data[1] = upper_32_bits(__pa(address));
	cmd->data[2] = 1;
926 927 928
	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}

929 930 931 932 933 934 935
static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
{
	memset(cmd, 0, sizeof(*cmd));
	cmd->data[0] = devid;
	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
}

936 937 938 939
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
				  size_t size, u16 domid, int pde)
{
	u64 pages;
940
	bool s;
941 942

	pages = iommu_num_pages(address, size, PAGE_SIZE);
943
	s     = false;
944 945 946 947 948 949 950

	if (pages > 1) {
		/*
		 * If we have to flush more than one page, flush all
		 * TLB entries for this domain
		 */
		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
951
		s = true;
952 953 954 955 956 957 958 959 960 961 962
	}

	address &= PAGE_MASK;

	memset(cmd, 0, sizeof(*cmd));
	cmd->data[1] |= domid;
	cmd->data[2]  = lower_32_bits(address);
	cmd->data[3]  = upper_32_bits(address);
	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
	if (s) /* size bit - we flush more than one 4kb page */
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
F
Frank Arnold 已提交
963
	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
964 965 966
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}

967 968 969 970
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
				  u64 address, size_t size)
{
	u64 pages;
971
	bool s;
972 973

	pages = iommu_num_pages(address, size, PAGE_SIZE);
974
	s     = false;
975 976 977 978 979 980 981

	if (pages > 1) {
		/*
		 * If we have to flush more than one page, flush all
		 * TLB entries for this domain
		 */
		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
982
		s = true;
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
	}

	address &= PAGE_MASK;

	memset(cmd, 0, sizeof(*cmd));
	cmd->data[0]  = devid;
	cmd->data[0] |= (qdep & 0xff) << 24;
	cmd->data[1]  = devid;
	cmd->data[2]  = lower_32_bits(address);
	cmd->data[3]  = upper_32_bits(address);
	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
	if (s)
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}

998 999 1000 1001 1002 1003 1004
static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
				  u64 address, bool size)
{
	memset(cmd, 0, sizeof(*cmd));

	address &= ~(0xfffULL);

1005
	cmd->data[0]  = pasid;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	cmd->data[1]  = domid;
	cmd->data[2]  = lower_32_bits(address);
	cmd->data[3]  = upper_32_bits(address);
	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
	if (size)
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
}

static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
				  int qdep, u64 address, bool size)
{
	memset(cmd, 0, sizeof(*cmd));

	address &= ~(0xfffULL);

	cmd->data[0]  = devid;
1024
	cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1025 1026
	cmd->data[0] |= (qdep  & 0xff) << 24;
	cmd->data[1]  = devid;
1027
	cmd->data[1] |= (pasid & 0xff) << 16;
1028 1029 1030 1031 1032 1033 1034 1035
	cmd->data[2]  = lower_32_bits(address);
	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
	cmd->data[3]  = upper_32_bits(address);
	if (size)
		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}

1036 1037 1038 1039 1040 1041 1042
static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
			       int status, int tag, bool gn)
{
	memset(cmd, 0, sizeof(*cmd));

	cmd->data[0]  = devid;
	if (gn) {
1043
		cmd->data[1]  = pasid;
1044 1045 1046 1047 1048 1049 1050 1051
		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
	}
	cmd->data[3]  = tag & 0x1ff;
	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;

	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
}

1052 1053 1054 1055
static void build_inv_all(struct iommu_cmd *cmd)
{
	memset(cmd, 0, sizeof(*cmd));
	CMD_SET_TYPE(cmd, CMD_INV_ALL);
1056 1057
}

1058 1059 1060 1061 1062 1063 1064
static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
{
	memset(cmd, 0, sizeof(*cmd));
	cmd->data[0] = devid;
	CMD_SET_TYPE(cmd, CMD_INV_IRT);
}

1065 1066
/*
 * Writes the command to the IOMMUs command buffer and informs the
1067
 * hardware about the new command.
1068
 */
1069 1070 1071
static int __iommu_queue_command_sync(struct amd_iommu *iommu,
				      struct iommu_cmd *cmd,
				      bool sync)
1072
{
1073
	unsigned int count = 0;
1074
	u32 left, next_tail;
1075

1076
	next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1077
again:
1078
	left      = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1079

1080
	if (left <= 0x20) {
1081 1082 1083 1084 1085 1086
		/* Skip udelay() the first time around */
		if (count++) {
			if (count == LOOP_TIMEOUT) {
				pr_err("AMD-Vi: Command buffer timeout\n");
				return -EIO;
			}
1087

1088 1089
			udelay(1);
		}
1090

1091 1092 1093
		/* Update head and recheck remaining space */
		iommu->cmd_buf_head = readl(iommu->mmio_base +
					    MMIO_CMD_HEAD_OFFSET);
1094 1095

		goto again;
1096 1097
	}

1098
	copy_cmd_to_buffer(iommu, cmd);
1099

1100
	/* Do we need to make sure all commands are processed? */
1101
	iommu->need_sync = sync;
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	return 0;
}

static int iommu_queue_command_sync(struct amd_iommu *iommu,
				    struct iommu_cmd *cmd,
				    bool sync)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&iommu->lock, flags);
	ret = __iommu_queue_command_sync(iommu, cmd, sync);
1115
	spin_unlock_irqrestore(&iommu->lock, flags);
1116

1117
	return ret;
1118 1119
}

1120 1121 1122 1123 1124
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
{
	return iommu_queue_command_sync(iommu, cmd, true);
}

1125 1126 1127 1128
/*
 * This function queues a completion wait command into the command
 * buffer of an IOMMU
 */
1129
static int iommu_completion_wait(struct amd_iommu *iommu)
1130 1131
{
	struct iommu_cmd cmd;
1132
	unsigned long flags;
1133
	int ret;
1134

1135
	if (!iommu->need_sync)
1136
		return 0;
1137

1138

1139 1140 1141 1142 1143 1144 1145
	build_completion_wait(&cmd, (u64)&iommu->cmd_sem);

	spin_lock_irqsave(&iommu->lock, flags);

	iommu->cmd_sem = 0;

	ret = __iommu_queue_command_sync(iommu, &cmd, false);
1146
	if (ret)
1147 1148 1149 1150 1151 1152
		goto out_unlock;

	ret = wait_on_sem(&iommu->cmd_sem);

out_unlock:
	spin_unlock_irqrestore(&iommu->lock, flags);
1153

1154
	return ret;
1155 1156
}

1157
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1158
{
1159
	struct iommu_cmd cmd;
1160

1161
	build_inv_dte(&cmd, devid);
1162

1163 1164
	return iommu_queue_command(iommu, &cmd);
}
1165

1166 1167 1168
static void iommu_flush_dte_all(struct amd_iommu *iommu)
{
	u32 devid;
1169

1170 1171
	for (devid = 0; devid <= 0xffff; ++devid)
		iommu_flush_dte(iommu, devid);
1172

1173 1174
	iommu_completion_wait(iommu);
}
1175

1176 1177 1178 1179 1180 1181 1182
/*
 * This function uses heavy locking and may disable irqs for some time. But
 * this is no issue because it is only called during resume.
 */
static void iommu_flush_tlb_all(struct amd_iommu *iommu)
{
	u32 dom_id;
1183

1184 1185 1186 1187 1188 1189
	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
		struct iommu_cmd cmd;
		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
				      dom_id, 1);
		iommu_queue_command(iommu, &cmd);
	}
1190

1191
	iommu_completion_wait(iommu);
1192 1193
}

1194
static void iommu_flush_all(struct amd_iommu *iommu)
1195
{
1196
	struct iommu_cmd cmd;
1197

1198
	build_inv_all(&cmd);
1199

1200 1201 1202 1203
	iommu_queue_command(iommu, &cmd);
	iommu_completion_wait(iommu);
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
{
	struct iommu_cmd cmd;

	build_inv_irt(&cmd, devid);

	iommu_queue_command(iommu, &cmd);
}

static void iommu_flush_irt_all(struct amd_iommu *iommu)
{
	u32 devid;

	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
		iommu_flush_irt(iommu, devid);

	iommu_completion_wait(iommu);
}

1223 1224
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
1225 1226 1227 1228
	if (iommu_feature(iommu, FEATURE_IA)) {
		iommu_flush_all(iommu);
	} else {
		iommu_flush_dte_all(iommu);
1229
		iommu_flush_irt_all(iommu);
1230
		iommu_flush_tlb_all(iommu);
1231 1232 1233
	}
}

1234
/*
1235
 * Command send function for flushing on-device TLB
1236
 */
1237 1238
static int device_flush_iotlb(struct iommu_dev_data *dev_data,
			      u64 address, size_t size)
1239 1240
{
	struct amd_iommu *iommu;
1241
	struct iommu_cmd cmd;
1242
	int qdep;
1243

1244 1245
	qdep     = dev_data->ats.qdep;
	iommu    = amd_iommu_rlookup_table[dev_data->devid];
1246

1247
	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1248 1249

	return iommu_queue_command(iommu, &cmd);
1250 1251
}

1252 1253 1254
/*
 * Command send function for invalidating a device table entry
 */
1255
static int device_flush_dte(struct iommu_dev_data *dev_data)
1256
{
1257
	struct amd_iommu *iommu;
1258
	u16 alias;
1259
	int ret;
1260

1261
	iommu = amd_iommu_rlookup_table[dev_data->devid];
1262
	alias = dev_data->alias;
1263

1264
	ret = iommu_flush_dte(iommu, dev_data->devid);
1265 1266
	if (!ret && alias != dev_data->devid)
		ret = iommu_flush_dte(iommu, alias);
1267 1268 1269
	if (ret)
		return ret;

1270
	if (dev_data->ats.enabled)
1271
		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1272 1273

	return ret;
1274 1275
}

1276 1277 1278 1279 1280
/*
 * TLB invalidation function which is called from the mapping functions.
 * It invalidates a single PTE if the range to flush is within a single
 * page. Otherwise it flushes the whole TLB of the IOMMU.
 */
1281 1282
static void __domain_flush_pages(struct protection_domain *domain,
				 u64 address, size_t size, int pde)
1283
{
1284
	struct iommu_dev_data *dev_data;
1285 1286
	struct iommu_cmd cmd;
	int ret = 0, i;
1287

1288
	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1289

1290
	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1291 1292 1293 1294 1295 1296 1297
		if (!domain->dev_iommu[i])
			continue;

		/*
		 * Devices of this domain are behind this IOMMU
		 * We need a TLB flush
		 */
1298
		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1299 1300
	}

1301 1302
	list_for_each_entry(dev_data, &domain->dev_list, list) {

1303
		if (!dev_data->ats.enabled)
1304 1305
			continue;

1306
		ret |= device_flush_iotlb(dev_data, address, size);
1307 1308
	}

1309
	WARN_ON(ret);
1310 1311
}

1312 1313
static void domain_flush_pages(struct protection_domain *domain,
			       u64 address, size_t size)
1314
{
1315
	__domain_flush_pages(domain, address, size, 0);
1316
}
1317

1318
/* Flush the whole IO/TLB for a given protection domain */
1319
static void domain_flush_tlb(struct protection_domain *domain)
1320
{
1321
	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1322 1323
}

1324
/* Flush the whole IO/TLB for a given protection domain - including PDE */
1325
static void domain_flush_tlb_pde(struct protection_domain *domain)
1326
{
1327
	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1328 1329
}

1330
static void domain_flush_complete(struct protection_domain *domain)
1331
{
1332
	int i;
1333

1334
	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1335
		if (domain && !domain->dev_iommu[i])
1336
			continue;
1337

1338 1339 1340 1341 1342
		/*
		 * Devices of this domain are behind this IOMMU
		 * We need to wait for completion of all commands.
		 */
		iommu_completion_wait(amd_iommus[i]);
1343
	}
1344 1345
}

1346

1347
/*
1348
 * This function flushes the DTEs for all devices in domain
1349
 */
1350
static void domain_flush_devices(struct protection_domain *domain)
1351
{
1352
	struct iommu_dev_data *dev_data;
1353

1354
	list_for_each_entry(dev_data, &domain->dev_list, list)
1355
		device_flush_dte(dev_data);
1356 1357
}

1358 1359 1360 1361 1362 1363 1364
/****************************************************************************
 *
 * The functions below are used the create the page table mappings for
 * unity mapped regions.
 *
 ****************************************************************************/

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
/*
 * This function is used to add another level to an IO page table. Adding
 * another level increases the size of the address space by 9 bits to a size up
 * to 64 bits.
 */
static bool increase_address_space(struct protection_domain *domain,
				   gfp_t gfp)
{
	u64 *pte;

	if (domain->mode == PAGE_MODE_6_LEVEL)
		/* address space already 64 bit large */
		return false;

	pte = (void *)get_zeroed_page(gfp);
	if (!pte)
		return false;

	*pte             = PM_LEVEL_PDE(domain->mode,
					virt_to_phys(domain->pt_root));
	domain->pt_root  = pte;
	domain->mode    += 1;
	domain->updated  = true;

	return true;
}

static u64 *alloc_pte(struct protection_domain *domain,
		      unsigned long address,
1394
		      unsigned long page_size,
1395 1396 1397
		      u64 **pte_page,
		      gfp_t gfp)
{
1398
	int level, end_lvl;
1399
	u64 *pte, *page;
1400 1401

	BUG_ON(!is_power_of_2(page_size));
1402 1403 1404 1405

	while (address > PM_LEVEL_SIZE(domain->mode))
		increase_address_space(domain, gfp);

1406 1407 1408 1409
	level   = domain->mode - 1;
	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
	address = PAGE_SIZE_ALIGN(address, page_size);
	end_lvl = PAGE_SIZE_LEVEL(page_size);
1410 1411

	while (level > end_lvl) {
1412 1413 1414 1415 1416
		u64 __pte, __npte;

		__pte = *pte;

		if (!IOMMU_PTE_PRESENT(__pte)) {
1417 1418 1419
			page = (u64 *)get_zeroed_page(gfp);
			if (!page)
				return NULL;
1420 1421 1422

			__npte = PM_LEVEL_PDE(level, virt_to_phys(page));

1423 1424
			/* pte could have been changed somewhere. */
			if (cmpxchg64(pte, __pte, __npte) != __pte) {
1425 1426 1427
				free_page((unsigned long)page);
				continue;
			}
1428 1429
		}

1430 1431 1432 1433
		/* No level skipping support yet */
		if (PM_PTE_LEVEL(*pte) != level)
			return NULL;

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
		level -= 1;

		pte = IOMMU_PTE_PAGE(*pte);

		if (pte_page && level == end_lvl)
			*pte_page = pte;

		pte = &pte[PM_LEVEL_INDEX(level, address)];
	}

	return pte;
}

/*
 * This function checks if there is a PTE for a given dma address. If
 * there is one, it returns the pointer to it.
 */
1451 1452 1453
static u64 *fetch_pte(struct protection_domain *domain,
		      unsigned long address,
		      unsigned long *page_size)
1454 1455 1456 1457
{
	int level;
	u64 *pte;

1458 1459 1460
	if (address > PM_LEVEL_SIZE(domain->mode))
		return NULL;

1461 1462 1463
	level	   =  domain->mode - 1;
	pte	   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
1464

1465 1466 1467
	while (level > 0) {

		/* Not Present */
1468 1469 1470
		if (!IOMMU_PTE_PRESENT(*pte))
			return NULL;

1471
		/* Large PTE */
1472 1473 1474
		if (PM_PTE_LEVEL(*pte) == 7 ||
		    PM_PTE_LEVEL(*pte) == 0)
			break;
1475 1476 1477 1478 1479

		/* No level skipping support yet */
		if (PM_PTE_LEVEL(*pte) != level)
			return NULL;

1480 1481
		level -= 1;

1482
		/* Walk to the next level */
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
		pte	   = IOMMU_PTE_PAGE(*pte);
		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
		*page_size = PTE_LEVEL_PAGE_SIZE(level);
	}

	if (PM_PTE_LEVEL(*pte) == 0x07) {
		unsigned long pte_mask;

		/*
		 * If we have a series of large PTEs, make
		 * sure to return a pointer to the first one.
		 */
		*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
		pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
		pte        = (u64 *)(((unsigned long)pte) & pte_mask);
1498 1499 1500 1501 1502
	}

	return pte;
}

1503 1504 1505 1506 1507 1508 1509
/*
 * Generic mapping functions. It maps a physical address into a DMA
 * address space. It allocates the page table pages if necessary.
 * In the future it can be extended to a generic mapping function
 * supporting all features of AMD IOMMU page tables like level skipping
 * and full 64 bit address spaces.
 */
1510 1511 1512
static int iommu_map_page(struct protection_domain *dom,
			  unsigned long bus_addr,
			  unsigned long phys_addr,
1513
			  unsigned long page_size,
1514
			  int prot,
1515
			  gfp_t gfp)
1516
{
1517
	u64 __pte, *pte;
1518
	int i, count;
1519

1520 1521 1522
	BUG_ON(!IS_ALIGNED(bus_addr, page_size));
	BUG_ON(!IS_ALIGNED(phys_addr, page_size));

1523
	if (!(prot & IOMMU_PROT_MASK))
1524 1525
		return -EINVAL;

1526
	count = PAGE_SIZE_PTE_COUNT(page_size);
1527
	pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
1528

1529 1530 1531
	if (!pte)
		return -ENOMEM;

1532 1533 1534
	for (i = 0; i < count; ++i)
		if (IOMMU_PTE_PRESENT(pte[i]))
			return -EBUSY;
1535

1536
	if (count > 1) {
1537 1538 1539 1540
		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
	} else
		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1541 1542 1543 1544 1545 1546

	if (prot & IOMMU_PROT_IR)
		__pte |= IOMMU_PTE_IR;
	if (prot & IOMMU_PROT_IW)
		__pte |= IOMMU_PTE_IW;

1547 1548
	for (i = 0; i < count; ++i)
		pte[i] = __pte;
1549

1550 1551
	update_domain(dom);

1552 1553 1554
	return 0;
}

1555 1556 1557
static unsigned long iommu_unmap_page(struct protection_domain *dom,
				      unsigned long bus_addr,
				      unsigned long page_size)
1558
{
1559 1560
	unsigned long long unmapped;
	unsigned long unmap_size;
1561 1562 1563 1564 1565
	u64 *pte;

	BUG_ON(!is_power_of_2(page_size));

	unmapped = 0;
1566

1567 1568
	while (unmapped < page_size) {

1569 1570 1571 1572 1573 1574
		pte = fetch_pte(dom, bus_addr, &unmap_size);

		if (pte) {
			int i, count;

			count = PAGE_SIZE_PTE_COUNT(unmap_size);
1575 1576 1577 1578 1579 1580 1581 1582
			for (i = 0; i < count; i++)
				pte[i] = 0ULL;
		}

		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
		unmapped += unmap_size;
	}

1583
	BUG_ON(unmapped && !is_power_of_2(unmapped));
1584

1585
	return unmapped;
1586 1587
}

1588 1589 1590
/****************************************************************************
 *
 * The next functions belong to the address allocator for the dma_ops
1591
 * interface functions.
1592 1593
 *
 ****************************************************************************/
1594

1595

1596 1597 1598
static unsigned long dma_ops_alloc_iova(struct device *dev,
					struct dma_ops_domain *dma_dom,
					unsigned int pages, u64 dma_mask)
1599
{
1600
	unsigned long pfn = 0;
1601

1602
	pages = __roundup_pow_of_two(pages);
1603

1604 1605 1606
	if (dma_mask > DMA_BIT_MASK(32))
		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
				      IOVA_PFN(DMA_BIT_MASK(32)));
1607

1608 1609
	if (!pfn)
		pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
1610

1611
	return (pfn << PAGE_SHIFT);
1612 1613
}

1614 1615 1616
static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
			      unsigned long address,
			      unsigned int pages)
1617
{
1618 1619
	pages = __roundup_pow_of_two(pages);
	address >>= PAGE_SHIFT;
1620

1621
	free_iova_fast(&dma_dom->iovad, address, pages);
1622 1623
}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
/****************************************************************************
 *
 * The next functions belong to the domain allocation. A domain is
 * allocated for every IOMMU as the default domain. If device isolation
 * is enabled, every device get its own domain. The most important thing
 * about domains is the page table mapping the DMA address space they
 * contain.
 *
 ****************************************************************************/

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
/*
 * This function adds a protection domain to the global protection domain list
 */
static void add_domain_to_list(struct protection_domain *domain)
{
	unsigned long flags;

	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
	list_add(&domain->list, &amd_iommu_pd_list);
	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}

/*
 * This function removes a protection domain to the global
 * protection domain list
 */
static void del_domain_from_list(struct protection_domain *domain)
{
	unsigned long flags;

	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
	list_del(&domain->list);
	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
static u16 domain_id_alloc(void)
{
	unsigned long flags;
	int id;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
	BUG_ON(id == 0);
	if (id > 0 && id < MAX_DOMAIN_ID)
		__set_bit(id, amd_iommu_pd_alloc_bitmap);
	else
		id = 0;
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	return id;
}

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
static void domain_id_free(int id)
{
	unsigned long flags;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
	if (id > 0 && id < MAX_DOMAIN_ID)
		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
#define DEFINE_FREE_PT_FN(LVL, FN)				\
static void free_pt_##LVL (unsigned long __pt)			\
{								\
	unsigned long p;					\
	u64 *pt;						\
	int i;							\
								\
	pt = (u64 *)__pt;					\
								\
	for (i = 0; i < 512; ++i) {				\
1696
		/* PTE present? */				\
1697 1698 1699
		if (!IOMMU_PTE_PRESENT(pt[i]))			\
			continue;				\
								\
1700 1701 1702 1703 1704
		/* Large PTE? */				\
		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
		    PM_PTE_LEVEL(pt[i]) == 7)			\
			continue;				\
								\
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
		FN(p);						\
	}							\
	free_page((unsigned long)pt);				\
}

DEFINE_FREE_PT_FN(l2, free_page)
DEFINE_FREE_PT_FN(l3, free_pt_l2)
DEFINE_FREE_PT_FN(l4, free_pt_l3)
DEFINE_FREE_PT_FN(l5, free_pt_l4)
DEFINE_FREE_PT_FN(l6, free_pt_l5)

1717
static void free_pagetable(struct protection_domain *domain)
1718
{
1719
	unsigned long root = (unsigned long)domain->pt_root;
1720

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
	switch (domain->mode) {
	case PAGE_MODE_NONE:
		break;
	case PAGE_MODE_1_LEVEL:
		free_page(root);
		break;
	case PAGE_MODE_2_LEVEL:
		free_pt_l2(root);
		break;
	case PAGE_MODE_3_LEVEL:
		free_pt_l3(root);
		break;
	case PAGE_MODE_4_LEVEL:
		free_pt_l4(root);
		break;
	case PAGE_MODE_5_LEVEL:
		free_pt_l5(root);
		break;
	case PAGE_MODE_6_LEVEL:
		free_pt_l6(root);
		break;
	default:
		BUG();
1744 1745 1746
	}
}

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
static void free_gcr3_tbl_level1(u64 *tbl)
{
	u64 *ptr;
	int i;

	for (i = 0; i < 512; ++i) {
		if (!(tbl[i] & GCR3_VALID))
			continue;

		ptr = __va(tbl[i] & PAGE_MASK);

		free_page((unsigned long)ptr);
	}
}

static void free_gcr3_tbl_level2(u64 *tbl)
{
	u64 *ptr;
	int i;

	for (i = 0; i < 512; ++i) {
		if (!(tbl[i] & GCR3_VALID))
			continue;

		ptr = __va(tbl[i] & PAGE_MASK);

		free_gcr3_tbl_level1(ptr);
	}
}

1777 1778
static void free_gcr3_table(struct protection_domain *domain)
{
1779 1780 1781 1782
	if (domain->glx == 2)
		free_gcr3_tbl_level2(domain->gcr3_tbl);
	else if (domain->glx == 1)
		free_gcr3_tbl_level1(domain->gcr3_tbl);
1783 1784
	else
		BUG_ON(domain->glx != 0);
1785

1786 1787 1788
	free_page((unsigned long)domain->gcr3_tbl);
}

1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct flush_queue *queue;

		queue = per_cpu_ptr(dom->flush_queue, cpu);
		kfree(queue->entries);
	}

	free_percpu(dom->flush_queue);

	dom->flush_queue = NULL;
}

static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
{
	int cpu;

1809 1810 1811
	atomic64_set(&dom->flush_start_cnt,  0);
	atomic64_set(&dom->flush_finish_cnt, 0);

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
	dom->flush_queue = alloc_percpu(struct flush_queue);
	if (!dom->flush_queue)
		return -ENOMEM;

	/* First make sure everything is cleared */
	for_each_possible_cpu(cpu) {
		struct flush_queue *queue;

		queue = per_cpu_ptr(dom->flush_queue, cpu);
		queue->head    = 0;
		queue->tail    = 0;
		queue->entries = NULL;
	}

	/* Now start doing the allocation */
	for_each_possible_cpu(cpu) {
		struct flush_queue *queue;

		queue = per_cpu_ptr(dom->flush_queue, cpu);
		queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries),
					 GFP_KERNEL);
		if (!queue->entries) {
			dma_ops_domain_free_flush_queue(dom);
			return -ENOMEM;
		}
1837 1838

		spin_lock_init(&queue->lock);
1839 1840 1841 1842 1843
	}

	return 0;
}

1844 1845 1846 1847 1848 1849 1850 1851
static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
{
	atomic64_inc(&dom->flush_start_cnt);
	domain_flush_tlb(&dom->domain);
	domain_flush_complete(&dom->domain);
	atomic64_inc(&dom->flush_finish_cnt);
}

1852 1853
static inline bool queue_ring_full(struct flush_queue *queue)
{
1854 1855
	assert_spin_locked(&queue->lock);

1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
	return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
}

#define queue_ring_for_each(i, q) \
	for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)

static inline unsigned queue_ring_add(struct flush_queue *queue)
{
	unsigned idx = queue->tail;

1866
	assert_spin_locked(&queue->lock);
1867 1868 1869 1870 1871
	queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;

	return idx;
}

1872 1873 1874 1875 1876 1877
static inline void queue_ring_remove_head(struct flush_queue *queue)
{
	assert_spin_locked(&queue->lock);
	queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE;
}

1878 1879
static void queue_ring_free_flushed(struct dma_ops_domain *dom,
				    struct flush_queue *queue)
1880
{
1881
	u64 counter = atomic64_read(&dom->flush_finish_cnt);
1882 1883
	int idx;

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
	queue_ring_for_each(idx, queue) {
		/*
		 * This assumes that counter values in the ring-buffer are
		 * monotonously rising.
		 */
		if (queue->entries[idx].counter >= counter)
			break;

		free_iova_fast(&dom->iovad,
			       queue->entries[idx].iova_pfn,
			       queue->entries[idx].pages);

		queue_ring_remove_head(queue);
	}
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
}

static void queue_add(struct dma_ops_domain *dom,
		      unsigned long address, unsigned long pages)
{
	struct flush_queue *queue;
	unsigned long flags;
	int idx;

	pages     = __roundup_pow_of_two(pages);
	address >>= PAGE_SHIFT;

	queue = get_cpu_ptr(dom->flush_queue);
	spin_lock_irqsave(&queue->lock, flags);

1913 1914 1915 1916 1917 1918
	/*
	 * When ring-queue is full, flush the entries from the IOTLB so
	 * that we can free all entries with queue_ring_free_flushed()
	 * below.
	 */
	if (queue_ring_full(queue))
1919
		dma_ops_domain_flush_tlb(dom);
1920 1921

	queue_ring_free_flushed(dom, queue);
1922 1923 1924 1925 1926

	idx = queue_ring_add(queue);

	queue->entries[idx].iova_pfn = address;
	queue->entries[idx].pages    = pages;
1927
	queue->entries[idx].counter  = atomic64_read(&dom->flush_start_cnt);
1928

1929
	spin_unlock_irqrestore(&queue->lock, flags);
1930 1931 1932 1933

	if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0)
		mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10));

1934 1935 1936
	put_cpu_ptr(dom->flush_queue);
}

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
static void queue_flush_timeout(unsigned long data)
{
	struct dma_ops_domain *dom = (struct dma_ops_domain *)data;
	int cpu;

	atomic_set(&dom->flush_timer_on, 0);

	dma_ops_domain_flush_tlb(dom);

	for_each_possible_cpu(cpu) {
		struct flush_queue *queue;
		unsigned long flags;

		queue = per_cpu_ptr(dom->flush_queue, cpu);
		spin_lock_irqsave(&queue->lock, flags);
		queue_ring_free_flushed(dom, queue);
		spin_unlock_irqrestore(&queue->lock, flags);
	}
}

1957 1958 1959 1960
/*
 * Free a domain, only used if something went wrong in the
 * allocation path and we need to free an already allocated page table
 */
1961 1962 1963 1964 1965
static void dma_ops_domain_free(struct dma_ops_domain *dom)
{
	if (!dom)
		return;

1966 1967
	del_domain_from_list(&dom->domain);

1968 1969 1970
	if (timer_pending(&dom->flush_timer))
		del_timer(&dom->flush_timer);

1971 1972
	dma_ops_domain_free_flush_queue(dom);

1973
	put_iova_domain(&dom->iovad);
1974

1975
	free_pagetable(&dom->domain);
1976

1977 1978 1979
	if (dom->domain.id)
		domain_id_free(dom->domain.id);

1980 1981 1982
	kfree(dom);
}

1983 1984
/*
 * Allocates a new protection domain usable for the dma_ops functions.
1985
 * It also initializes the page table and the address allocator data
1986 1987
 * structures required for the dma_ops interface
 */
1988
static struct dma_ops_domain *dma_ops_domain_alloc(void)
1989 1990 1991 1992 1993 1994 1995
{
	struct dma_ops_domain *dma_dom;

	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
	if (!dma_dom)
		return NULL;

1996
	if (protection_domain_init(&dma_dom->domain))
1997
		goto free_dma_dom;
1998

1999
	dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
2000
	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2001
	dma_dom->domain.flags = PD_DMA_OPS_MASK;
2002 2003 2004
	if (!dma_dom->domain.pt_root)
		goto free_dma_dom;

2005 2006 2007
	init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
			 IOVA_START_PFN, DMA_32BIT_PFN);

2008 2009 2010
	/* Initialize reserved ranges */
	copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);

2011 2012 2013
	if (dma_ops_domain_alloc_flush_queue(dma_dom))
		goto free_dma_dom;

2014 2015 2016 2017 2018
	setup_timer(&dma_dom->flush_timer, queue_flush_timeout,
		    (unsigned long)dma_dom);

	atomic_set(&dma_dom->flush_timer_on, 0);

2019 2020
	add_domain_to_list(&dma_dom->domain);

2021 2022 2023 2024 2025 2026 2027 2028
	return dma_dom;

free_dma_dom:
	dma_ops_domain_free(dma_dom);

	return NULL;
}

2029 2030 2031 2032 2033 2034 2035 2036 2037
/*
 * little helper function to check whether a given protection domain is a
 * dma_ops domain
 */
static bool dma_ops_domain(struct protection_domain *domain)
{
	return domain->flags & PD_DMA_OPS_MASK;
}

2038
static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
2039
{
2040
	u64 pte_root = 0;
2041
	u64 flags = 0;
2042

2043 2044 2045
	if (domain->mode != PAGE_MODE_NONE)
		pte_root = virt_to_phys(domain->pt_root);

2046 2047 2048
	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
		    << DEV_ENTRY_MODE_SHIFT;
	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
2049

2050 2051
	flags = amd_iommu_dev_table[devid].data[1];

2052 2053 2054
	if (ats)
		flags |= DTE_FLAG_IOTLB;

2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
	if (domain->flags & PD_IOMMUV2_MASK) {
		u64 gcr3 = __pa(domain->gcr3_tbl);
		u64 glx  = domain->glx;
		u64 tmp;

		pte_root |= DTE_FLAG_GV;
		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;

		/* First mask out possible old values for GCR3 table */
		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
		flags    &= ~tmp;

		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
		flags    &= ~tmp;

		/* Encode GCR3 table into DTE */
		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
		pte_root |= tmp;

		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
		flags    |= tmp;

		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
		flags    |= tmp;
	}

2081 2082 2083 2084 2085
	flags &= ~(0xffffUL);
	flags |= domain->id;

	amd_iommu_dev_table[devid].data[1]  = flags;
	amd_iommu_dev_table[devid].data[0]  = pte_root;
2086 2087 2088 2089 2090
}

static void clear_dte_entry(u16 devid)
{
	/* remove entry from the device table seen by the hardware */
2091 2092
	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
2093 2094

	amd_iommu_apply_erratum_63(devid);
2095 2096
}

2097 2098
static void do_attach(struct iommu_dev_data *dev_data,
		      struct protection_domain *domain)
2099 2100
{
	struct amd_iommu *iommu;
2101
	u16 alias;
2102
	bool ats;
2103

2104
	iommu = amd_iommu_rlookup_table[dev_data->devid];
2105
	alias = dev_data->alias;
2106
	ats   = dev_data->ats.enabled;
2107 2108 2109 2110 2111 2112 2113 2114 2115

	/* Update data structures */
	dev_data->domain = domain;
	list_add(&dev_data->list, &domain->dev_list);

	/* Do reference counting */
	domain->dev_iommu[iommu->index] += 1;
	domain->dev_cnt                 += 1;

2116 2117 2118
	/* Update device table */
	set_dte_entry(dev_data->devid, domain, ats);
	if (alias != dev_data->devid)
2119
		set_dte_entry(alias, domain, ats);
2120

2121
	device_flush_dte(dev_data);
2122 2123
}

2124
static void do_detach(struct iommu_dev_data *dev_data)
2125 2126
{
	struct amd_iommu *iommu;
2127
	u16 alias;
2128

2129 2130 2131 2132 2133 2134 2135 2136 2137
	/*
	 * First check if the device is still attached. It might already
	 * be detached from its domain because the generic
	 * iommu_detach_group code detached it and we try again here in
	 * our alias handling.
	 */
	if (!dev_data->domain)
		return;

2138
	iommu = amd_iommu_rlookup_table[dev_data->devid];
2139
	alias = dev_data->alias;
2140 2141

	/* decrease reference counters */
2142 2143 2144 2145 2146 2147
	dev_data->domain->dev_iommu[iommu->index] -= 1;
	dev_data->domain->dev_cnt                 -= 1;

	/* Update data structures */
	dev_data->domain = NULL;
	list_del(&dev_data->list);
2148
	clear_dte_entry(dev_data->devid);
2149 2150
	if (alias != dev_data->devid)
		clear_dte_entry(alias);
2151

2152
	/* Flush the DTE entry */
2153
	device_flush_dte(dev_data);
2154 2155 2156 2157 2158 2159
}

/*
 * If a device is not yet associated with a domain, this function does
 * assigns it visible for the hardware
 */
2160
static int __attach_device(struct iommu_dev_data *dev_data,
2161
			   struct protection_domain *domain)
2162
{
2163
	int ret;
2164

2165 2166 2167 2168 2169 2170
	/*
	 * Must be called with IRQs disabled. Warn here to detect early
	 * when its not.
	 */
	WARN_ON(!irqs_disabled());

2171 2172 2173
	/* lock domain */
	spin_lock(&domain->lock);

2174
	ret = -EBUSY;
2175
	if (dev_data->domain != NULL)
2176
		goto out_unlock;
2177

2178
	/* Attach alias group root */
2179
	do_attach(dev_data, domain);
2180

2181 2182 2183 2184
	ret = 0;

out_unlock:

2185 2186
	/* ready */
	spin_unlock(&domain->lock);
2187

2188
	return ret;
2189
}
2190

2191 2192 2193 2194 2195 2196 2197 2198

static void pdev_iommuv2_disable(struct pci_dev *pdev)
{
	pci_disable_ats(pdev);
	pci_disable_pri(pdev);
	pci_disable_pasid(pdev);
}

2199 2200 2201 2202 2203 2204
/* FIXME: Change generic reset-function to do the same */
static int pri_reset_while_enabled(struct pci_dev *pdev)
{
	u16 control;
	int pos;

2205
	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2206 2207 2208
	if (!pos)
		return -EINVAL;

2209 2210 2211
	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
	control |= PCI_PRI_CTRL_RESET;
	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2212 2213 2214 2215

	return 0;
}

2216 2217
static int pdev_iommuv2_enable(struct pci_dev *pdev)
{
2218 2219 2220 2221 2222 2223 2224 2225
	bool reset_enable;
	int reqs, ret;

	/* FIXME: Hardcode number of outstanding requests for now */
	reqs = 32;
	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
		reqs = 1;
	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236

	/* Only allow access to user-accessible pages */
	ret = pci_enable_pasid(pdev, 0);
	if (ret)
		goto out_err;

	/* First reset the PRI state of the device */
	ret = pci_reset_pri(pdev);
	if (ret)
		goto out_err;

2237 2238
	/* Enable PRI */
	ret = pci_enable_pri(pdev, reqs);
2239 2240 2241
	if (ret)
		goto out_err;

2242 2243 2244 2245 2246 2247
	if (reset_enable) {
		ret = pri_reset_while_enabled(pdev);
		if (ret)
			goto out_err;
	}

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
	ret = pci_enable_ats(pdev, PAGE_SHIFT);
	if (ret)
		goto out_err;

	return 0;

out_err:
	pci_disable_pri(pdev);
	pci_disable_pasid(pdev);

	return ret;
}

2261
/* FIXME: Move this to PCI code */
2262
#define PCI_PRI_TLP_OFF		(1 << 15)
2263

J
Joerg Roedel 已提交
2264
static bool pci_pri_tlp_required(struct pci_dev *pdev)
2265
{
2266
	u16 status;
2267 2268
	int pos;

2269
	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2270 2271 2272
	if (!pos)
		return false;

2273
	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2274

2275
	return (status & PCI_PRI_TLP_OFF) ? true : false;
2276 2277
}

2278
/*
F
Frank Arnold 已提交
2279
 * If a device is not yet associated with a domain, this function
2280 2281
 * assigns it visible for the hardware
 */
2282 2283
static int attach_device(struct device *dev,
			 struct protection_domain *domain)
2284
{
2285
	struct pci_dev *pdev;
2286
	struct iommu_dev_data *dev_data;
2287
	unsigned long flags;
2288
	int ret;
2289

2290 2291
	dev_data = get_dev_data(dev);

2292 2293 2294 2295
	if (!dev_is_pci(dev))
		goto skip_ats_check;

	pdev = to_pci_dev(dev);
2296
	if (domain->flags & PD_IOMMUV2_MASK) {
2297
		if (!dev_data->passthrough)
2298 2299
			return -EINVAL;

2300 2301 2302
		if (dev_data->iommu_v2) {
			if (pdev_iommuv2_enable(pdev) != 0)
				return -EINVAL;
2303

2304 2305 2306 2307
			dev_data->ats.enabled = true;
			dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
			dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
		}
2308 2309
	} else if (amd_iommu_iotlb_sup &&
		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2310 2311 2312
		dev_data->ats.enabled = true;
		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
	}
2313

2314
skip_ats_check:
2315
	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2316
	ret = __attach_device(dev_data, domain);
2317 2318
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

2319 2320 2321 2322 2323
	/*
	 * We might boot into a crash-kernel here. The crashed kernel
	 * left the caches in the IOMMU dirty. So we have to flush
	 * here to evict all dirty stuff.
	 */
2324
	domain_flush_tlb_pde(domain);
2325 2326

	return ret;
2327 2328
}

2329 2330 2331
/*
 * Removes a device from a protection domain (unlocked)
 */
2332
static void __detach_device(struct iommu_dev_data *dev_data)
2333
{
2334
	struct protection_domain *domain;
2335

2336 2337 2338 2339 2340
	/*
	 * Must be called with IRQs disabled. Warn here to detect early
	 * when its not.
	 */
	WARN_ON(!irqs_disabled());
2341

2342 2343
	if (WARN_ON(!dev_data->domain))
		return;
2344

2345
	domain = dev_data->domain;
2346

2347
	spin_lock(&domain->lock);
2348

2349
	do_detach(dev_data);
2350

2351
	spin_unlock(&domain->lock);
2352 2353 2354 2355 2356
}

/*
 * Removes a device from a protection domain (with devtable_lock held)
 */
2357
static void detach_device(struct device *dev)
2358
{
2359
	struct protection_domain *domain;
2360
	struct iommu_dev_data *dev_data;
2361 2362
	unsigned long flags;

2363
	dev_data = get_dev_data(dev);
2364
	domain   = dev_data->domain;
2365

2366 2367
	/* lock device table */
	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2368
	__detach_device(dev_data);
2369
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2370

2371 2372 2373
	if (!dev_is_pci(dev))
		return;

2374
	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2375 2376
		pdev_iommuv2_disable(to_pci_dev(dev));
	else if (dev_data->ats.enabled)
2377
		pci_disable_ats(to_pci_dev(dev));
2378 2379

	dev_data->ats.enabled = false;
2380
}
2381

2382
static int amd_iommu_add_device(struct device *dev)
2383
{
2384
	struct iommu_dev_data *dev_data;
2385
	struct iommu_domain *domain;
2386
	struct amd_iommu *iommu;
2387
	int ret, devid;
2388

2389
	if (!check_device(dev) || get_dev_data(dev))
2390
		return 0;
2391

2392
	devid = get_device_id(dev);
2393
	if (devid < 0)
2394 2395
		return devid;

2396
	iommu = amd_iommu_rlookup_table[devid];
2397

2398
	ret = iommu_init_device(dev);
2399 2400 2401 2402
	if (ret) {
		if (ret != -ENOTSUPP)
			pr_err("Failed to initialize device %s - trying to proceed anyway\n",
				dev_name(dev));
2403

2404
		iommu_ignore_device(dev);
2405
		dev->dma_ops = &nommu_dma_ops;
2406 2407 2408
		goto out;
	}
	init_iommu_group(dev);
2409

2410
	dev_data = get_dev_data(dev);
2411

2412
	BUG_ON(!dev_data);
2413

2414
	if (iommu_pass_through || dev_data->iommu_v2)
2415
		iommu_request_dm_for_dev(dev);
2416

2417 2418
	/* Domains are initialized for this device - have a look what we ended up with */
	domain = iommu_get_domain_for_dev(dev);
2419
	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2420
		dev_data->passthrough = true;
2421
	else
2422
		dev->dma_ops = &amd_iommu_dma_ops;
2423

2424
out:
2425 2426 2427 2428 2429
	iommu_completion_wait(iommu);

	return 0;
}

2430
static void amd_iommu_remove_device(struct device *dev)
2431
{
2432
	struct amd_iommu *iommu;
2433
	int devid;
2434 2435 2436 2437 2438

	if (!check_device(dev))
		return;

	devid = get_device_id(dev);
2439
	if (devid < 0)
2440 2441
		return;

2442 2443 2444 2445
	iommu = amd_iommu_rlookup_table[devid];

	iommu_uninit_device(dev);
	iommu_completion_wait(iommu);
2446 2447
}

2448 2449 2450 2451 2452 2453 2454 2455
static struct iommu_group *amd_iommu_device_group(struct device *dev)
{
	if (dev_is_pci(dev))
		return pci_device_group(dev);

	return acpihid_device_group(dev);
}

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
/*****************************************************************************
 *
 * The next functions belong to the dma_ops mapping/unmapping code.
 *
 *****************************************************************************/

/*
 * In the dma_ops path we only have the struct device. This function
 * finds the corresponding IOMMU, the protection domain and the
 * requestor id for a given device.
 * If the device is not yet associated with a domain this is also done
 * in this function.
 */
2469
static struct protection_domain *get_domain(struct device *dev)
2470
{
2471
	struct protection_domain *domain;
2472

2473
	if (!check_device(dev))
2474
		return ERR_PTR(-EINVAL);
2475

2476
	domain = get_dev_data(dev)->domain;
2477
	if (!dma_ops_domain(domain))
2478
		return ERR_PTR(-EBUSY);
2479

2480
	return domain;
2481 2482
}

2483 2484
static void update_device_table(struct protection_domain *domain)
{
2485
	struct iommu_dev_data *dev_data;
2486

2487
	list_for_each_entry(dev_data, &domain->dev_list, list) {
2488
		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2489 2490 2491 2492 2493 2494 2495

		if (dev_data->devid == dev_data->alias)
			continue;

		/* There is an alias, update device table entry for it */
		set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
	}
2496 2497 2498 2499 2500 2501 2502 2503
}

static void update_domain(struct protection_domain *domain)
{
	if (!domain->updated)
		return;

	update_device_table(domain);
2504 2505 2506

	domain_flush_devices(domain);
	domain_flush_tlb_pde(domain);
2507 2508 2509 2510

	domain->updated = false;
}

2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
static int dir2prot(enum dma_data_direction direction)
{
	if (direction == DMA_TO_DEVICE)
		return IOMMU_PROT_IR;
	else if (direction == DMA_FROM_DEVICE)
		return IOMMU_PROT_IW;
	else if (direction == DMA_BIDIRECTIONAL)
		return IOMMU_PROT_IW | IOMMU_PROT_IR;
	else
		return 0;
}
2522 2523
/*
 * This function contains common code for mapping of a physically
J
Joerg Roedel 已提交
2524 2525
 * contiguous memory region into DMA address space. It is used by all
 * mapping functions provided with this IOMMU driver.
2526 2527
 * Must be called with the domain lock held.
 */
2528 2529 2530 2531
static dma_addr_t __map_single(struct device *dev,
			       struct dma_ops_domain *dma_dom,
			       phys_addr_t paddr,
			       size_t size,
2532
			       enum dma_data_direction direction,
2533
			       u64 dma_mask)
2534 2535
{
	dma_addr_t offset = paddr & ~PAGE_MASK;
2536
	dma_addr_t address, start, ret;
2537
	unsigned int pages;
2538
	int prot = 0;
2539 2540
	int i;

2541
	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2542 2543
	paddr &= PAGE_MASK;

2544
	address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
2545 2546
	if (address == DMA_ERROR_CODE)
		goto out;
2547

2548
	prot = dir2prot(direction);
2549

2550 2551
	start = address;
	for (i = 0; i < pages; ++i) {
2552 2553 2554
		ret = iommu_map_page(&dma_dom->domain, start, paddr,
				     PAGE_SIZE, prot, GFP_ATOMIC);
		if (ret)
2555 2556
			goto out_unmap;

2557 2558 2559 2560 2561
		paddr += PAGE_SIZE;
		start += PAGE_SIZE;
	}
	address += offset;

2562
	if (unlikely(amd_iommu_np_cache)) {
2563
		domain_flush_pages(&dma_dom->domain, address, size);
2564 2565
		domain_flush_complete(&dma_dom->domain);
	}
2566

2567 2568
out:
	return address;
2569 2570 2571 2572 2573

out_unmap:

	for (--i; i >= 0; --i) {
		start -= PAGE_SIZE;
2574
		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2575 2576
	}

2577 2578 2579 2580
	domain_flush_tlb(&dma_dom->domain);
	domain_flush_complete(&dma_dom->domain);

	dma_ops_free_iova(dma_dom, address, pages);
2581

2582
	return DMA_ERROR_CODE;
2583 2584
}

2585 2586 2587 2588
/*
 * Does the reverse of the __map_single function. Must be called with
 * the domain lock held too
 */
2589
static void __unmap_single(struct dma_ops_domain *dma_dom,
2590 2591 2592 2593
			   dma_addr_t dma_addr,
			   size_t size,
			   int dir)
{
2594
	dma_addr_t flush_addr;
2595 2596 2597
	dma_addr_t i, start;
	unsigned int pages;

2598
	flush_addr = dma_addr;
2599
	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2600 2601 2602 2603
	dma_addr &= PAGE_MASK;
	start = dma_addr;

	for (i = 0; i < pages; ++i) {
2604
		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2605 2606 2607
		start += PAGE_SIZE;
	}

J
Joerg Roedel 已提交
2608 2609 2610 2611 2612
	if (amd_iommu_unmap_flush) {
		dma_ops_free_iova(dma_dom, dma_addr, pages);
		domain_flush_tlb(&dma_dom->domain);
		domain_flush_complete(&dma_dom->domain);
	} else {
2613
		queue_add(dma_dom, dma_addr, pages);
J
Joerg Roedel 已提交
2614
	}
2615 2616
}

2617 2618 2619
/*
 * The exported map_single function for dma_ops.
 */
2620 2621 2622
static dma_addr_t map_page(struct device *dev, struct page *page,
			   unsigned long offset, size_t size,
			   enum dma_data_direction dir,
2623
			   unsigned long attrs)
2624
{
2625
	phys_addr_t paddr = page_to_phys(page) + offset;
2626
	struct protection_domain *domain;
2627
	struct dma_ops_domain *dma_dom;
2628
	u64 dma_mask;
2629

2630 2631
	domain = get_domain(dev);
	if (PTR_ERR(domain) == -EINVAL)
2632
		return (dma_addr_t)paddr;
2633 2634
	else if (IS_ERR(domain))
		return DMA_ERROR_CODE;
2635

2636
	dma_mask = *dev->dma_mask;
2637
	dma_dom = to_dma_ops_domain(domain);
2638

2639
	return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
2640 2641
}

2642 2643 2644
/*
 * The exported unmap_single function for dma_ops.
 */
2645
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2646
		       enum dma_data_direction dir, unsigned long attrs)
2647 2648
{
	struct protection_domain *domain;
2649
	struct dma_ops_domain *dma_dom;
2650

2651 2652
	domain = get_domain(dev);
	if (IS_ERR(domain))
2653 2654
		return;

2655 2656 2657
	dma_dom = to_dma_ops_domain(domain);

	__unmap_single(dma_dom, dma_addr, size, dir);
2658 2659
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
static int sg_num_pages(struct device *dev,
			struct scatterlist *sglist,
			int nelems)
{
	unsigned long mask, boundary_size;
	struct scatterlist *s;
	int i, npages = 0;

	mask          = dma_get_seg_boundary(dev);
	boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
				   1UL << (BITS_PER_LONG - PAGE_SHIFT);

	for_each_sg(sglist, s, nelems, i) {
		int p, n;

		s->dma_address = npages << PAGE_SHIFT;
		p = npages % boundary_size;
		n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
		if (p + n > boundary_size)
			npages += boundary_size - p;
		npages += n;
	}

	return npages;
}

2686 2687 2688 2689
/*
 * The exported map_sg function for dma_ops (handles scatter-gather
 * lists).
 */
2690
static int map_sg(struct device *dev, struct scatterlist *sglist,
2691
		  int nelems, enum dma_data_direction direction,
2692
		  unsigned long attrs)
2693
{
2694
	int mapped_pages = 0, npages = 0, prot = 0, i;
2695
	struct protection_domain *domain;
2696
	struct dma_ops_domain *dma_dom;
2697
	struct scatterlist *s;
2698
	unsigned long address;
2699
	u64 dma_mask;
2700

2701
	domain = get_domain(dev);
2702
	if (IS_ERR(domain))
2703
		return 0;
2704

2705
	dma_dom  = to_dma_ops_domain(domain);
2706
	dma_mask = *dev->dma_mask;
2707

2708 2709 2710 2711 2712 2713 2714 2715 2716
	npages = sg_num_pages(dev, sglist, nelems);

	address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
	if (address == DMA_ERROR_CODE)
		goto out_err;

	prot = dir2prot(direction);

	/* Map all sg entries */
2717
	for_each_sg(sglist, s, nelems, i) {
2718 2719 2720 2721 2722
		int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);

		for (j = 0; j < pages; ++j) {
			unsigned long bus_addr, phys_addr;
			int ret;
2723

2724 2725 2726 2727 2728
			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
			phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
			ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
			if (ret)
				goto out_unmap;
2729

2730 2731
			mapped_pages += 1;
		}
2732 2733
	}

2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
	/* Everything is mapped - write the right values into s->dma_address */
	for_each_sg(sglist, s, nelems, i) {
		s->dma_address += address + s->offset;
		s->dma_length   = s->length;
	}

	return nelems;

out_unmap:
	pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
	       dev_name(dev), npages);

	for_each_sg(sglist, s, nelems, i) {
		int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);

		for (j = 0; j < pages; ++j) {
			unsigned long bus_addr;
2751

2752 2753 2754 2755 2756 2757
			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
			iommu_unmap_page(domain, bus_addr, PAGE_SIZE);

			if (--mapped_pages)
				goto out_free_iova;
		}
2758 2759
	}

2760 2761 2762 2763
out_free_iova:
	free_iova_fast(&dma_dom->iovad, address, npages);

out_err:
2764
	return 0;
2765 2766
}

2767 2768 2769 2770
/*
 * The exported map_sg function for dma_ops (handles scatter-gather
 * lists).
 */
2771
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2772
		     int nelems, enum dma_data_direction dir,
2773
		     unsigned long attrs)
2774 2775
{
	struct protection_domain *domain;
2776
	struct dma_ops_domain *dma_dom;
2777 2778
	unsigned long startaddr;
	int npages = 2;
2779

2780 2781
	domain = get_domain(dev);
	if (IS_ERR(domain))
2782 2783
		return;

2784
	startaddr = sg_dma_address(sglist) & PAGE_MASK;
2785
	dma_dom   = to_dma_ops_domain(domain);
2786 2787
	npages    = sg_num_pages(dev, sglist, nelems);

2788
	__unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
2789 2790
}

2791 2792 2793
/*
 * The exported alloc_coherent function for dma_ops.
 */
2794
static void *alloc_coherent(struct device *dev, size_t size,
2795
			    dma_addr_t *dma_addr, gfp_t flag,
2796
			    unsigned long attrs)
2797
{
2798
	u64 dma_mask = dev->coherent_dma_mask;
2799
	struct protection_domain *domain;
2800
	struct dma_ops_domain *dma_dom;
2801
	struct page *page;
2802

2803 2804
	domain = get_domain(dev);
	if (PTR_ERR(domain) == -EINVAL) {
2805 2806 2807
		page = alloc_pages(flag, get_order(size));
		*dma_addr = page_to_phys(page);
		return page_address(page);
2808 2809
	} else if (IS_ERR(domain))
		return NULL;
2810

2811
	dma_dom   = to_dma_ops_domain(domain);
2812
	size	  = PAGE_ALIGN(size);
2813 2814
	dma_mask  = dev->coherent_dma_mask;
	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2815
	flag     |= __GFP_ZERO;
2816

2817 2818
	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
	if (!page) {
2819
		if (!gfpflags_allow_blocking(flag))
2820
			return NULL;
2821

2822
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2823
						 get_order(size), flag);
2824 2825 2826
		if (!page)
			return NULL;
	}
2827

2828 2829 2830
	if (!dma_mask)
		dma_mask = *dev->dma_mask;

2831
	*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
2832
				 size, DMA_BIDIRECTIONAL, dma_mask);
2833

2834
	if (*dma_addr == DMA_ERROR_CODE)
2835
		goto out_free;
2836

2837
	return page_address(page);
2838 2839 2840

out_free:

2841 2842
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));
2843 2844

	return NULL;
2845 2846
}

2847 2848 2849
/*
 * The exported free_coherent function for dma_ops.
 */
2850
static void free_coherent(struct device *dev, size_t size,
2851
			  void *virt_addr, dma_addr_t dma_addr,
2852
			  unsigned long attrs)
2853 2854
{
	struct protection_domain *domain;
2855
	struct dma_ops_domain *dma_dom;
2856
	struct page *page;
2857

2858 2859 2860
	page = virt_to_page(virt_addr);
	size = PAGE_ALIGN(size);

2861 2862
	domain = get_domain(dev);
	if (IS_ERR(domain))
2863 2864
		goto free_mem;

2865 2866 2867
	dma_dom = to_dma_ops_domain(domain);

	__unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
2868 2869

free_mem:
2870 2871
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));
2872 2873
}

2874 2875 2876 2877 2878 2879
/*
 * This function is called by the DMA layer to find out if we can handle a
 * particular device. It is part of the dma_ops.
 */
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
2880
	return check_device(dev);
2881 2882
}

2883
static const struct dma_map_ops amd_iommu_dma_ops = {
2884 2885 2886 2887 2888 2889 2890
	.alloc		= alloc_coherent,
	.free		= free_coherent,
	.map_page	= map_page,
	.unmap_page	= unmap_page,
	.map_sg		= map_sg,
	.unmap_sg	= unmap_sg,
	.dma_supported	= amd_iommu_dma_supported,
2891 2892
};

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
static int init_reserved_iova_ranges(void)
{
	struct pci_dev *pdev = NULL;
	struct iova *val;

	init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
			 IOVA_START_PFN, DMA_32BIT_PFN);

	lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
			  &reserved_rbtree_key);

	/* MSI memory range */
	val = reserve_iova(&reserved_iova_ranges,
			   IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
	if (!val) {
		pr_err("Reserving MSI range failed\n");
		return -ENOMEM;
	}

	/* HT memory range */
	val = reserve_iova(&reserved_iova_ranges,
			   IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
	if (!val) {
		pr_err("Reserving HT range failed\n");
		return -ENOMEM;
	}

	/*
	 * Memory used for PCI resources
	 * FIXME: Check whether we can reserve the PCI-hole completly
	 */
	for_each_pci_dev(pdev) {
		int i;

		for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
			struct resource *r = &pdev->resource[i];

			if (!(r->flags & IORESOURCE_MEM))
				continue;

			val = reserve_iova(&reserved_iova_ranges,
					   IOVA_PFN(r->start),
					   IOVA_PFN(r->end));
			if (!val) {
				pr_err("Reserve pci-resource range failed\n");
				return -ENOMEM;
			}
		}
	}

	return 0;
}

2946
int __init amd_iommu_init_api(void)
2947
{
2948
	int ret, err = 0;
2949 2950 2951 2952

	ret = iova_cache_get();
	if (ret)
		return ret;
2953

2954 2955 2956 2957
	ret = init_reserved_iova_ranges();
	if (ret)
		return ret;

2958 2959 2960 2961 2962 2963 2964 2965
	err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
	if (err)
		return err;
#ifdef CONFIG_ARM_AMBA
	err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
	if (err)
		return err;
#endif
2966 2967 2968
	err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
	if (err)
		return err;
2969

2970
	return 0;
2971 2972
}

2973 2974
int __init amd_iommu_init_dma_ops(void)
{
2975
	swiotlb        = iommu_pass_through ? 1 : 0;
2976 2977
	iommu_detected = 1;

2978 2979 2980 2981 2982 2983 2984 2985 2986
	/*
	 * In case we don't initialize SWIOTLB (actually the common case
	 * when AMD IOMMU is enabled), make sure there are global
	 * dma_ops set as a fall-back for devices not handled by this
	 * driver (for example non-PCI devices).
	 */
	if (!swiotlb)
		dma_ops = &nommu_dma_ops;

2987 2988 2989 2990 2991
	if (amd_iommu_unmap_flush)
		pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
	else
		pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");

2992
	return 0;
2993

2994
}
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007

/*****************************************************************************
 *
 * The following functions belong to the exported interface of AMD IOMMU
 *
 * This interface allows access to lower level functions of the IOMMU
 * like protection domain handling and assignement of devices to domains
 * which is not possible with the dma_ops interface.
 *
 *****************************************************************************/

static void cleanup_domain(struct protection_domain *domain)
{
3008
	struct iommu_dev_data *entry;
3009 3010 3011 3012
	unsigned long flags;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);

3013 3014 3015 3016
	while (!list_empty(&domain->dev_list)) {
		entry = list_first_entry(&domain->dev_list,
					 struct iommu_dev_data, list);
		__detach_device(entry);
3017
	}
3018 3019 3020 3021

	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}

3022 3023 3024 3025 3026
static void protection_domain_free(struct protection_domain *domain)
{
	if (!domain)
		return;

3027 3028
	del_domain_from_list(domain);

3029 3030 3031 3032 3033 3034
	if (domain->id)
		domain_id_free(domain->id);

	kfree(domain);
}

3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
static int protection_domain_init(struct protection_domain *domain)
{
	spin_lock_init(&domain->lock);
	mutex_init(&domain->api_lock);
	domain->id = domain_id_alloc();
	if (!domain->id)
		return -ENOMEM;
	INIT_LIST_HEAD(&domain->dev_list);

	return 0;
}

3047
static struct protection_domain *protection_domain_alloc(void)
3048 3049 3050 3051 3052
{
	struct protection_domain *domain;

	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
	if (!domain)
3053
		return NULL;
3054

3055
	if (protection_domain_init(domain))
3056 3057
		goto out_err;

3058 3059
	add_domain_to_list(domain);

3060 3061 3062 3063 3064 3065 3066 3067
	return domain;

out_err:
	kfree(domain);

	return NULL;
}

3068
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
3069
{
3070
	struct protection_domain *pdomain;
3071
	struct dma_ops_domain *dma_domain;
3072

3073 3074 3075 3076 3077
	switch (type) {
	case IOMMU_DOMAIN_UNMANAGED:
		pdomain = protection_domain_alloc();
		if (!pdomain)
			return NULL;
3078

3079 3080 3081 3082 3083 3084
		pdomain->mode    = PAGE_MODE_3_LEVEL;
		pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
		if (!pdomain->pt_root) {
			protection_domain_free(pdomain);
			return NULL;
		}
3085

3086 3087 3088
		pdomain->domain.geometry.aperture_start = 0;
		pdomain->domain.geometry.aperture_end   = ~0ULL;
		pdomain->domain.geometry.force_aperture = true;
3089

3090 3091 3092 3093 3094 3095 3096 3097 3098
		break;
	case IOMMU_DOMAIN_DMA:
		dma_domain = dma_ops_domain_alloc();
		if (!dma_domain) {
			pr_err("AMD-Vi: Failed to allocate\n");
			return NULL;
		}
		pdomain = &dma_domain->domain;
		break;
3099 3100 3101 3102
	case IOMMU_DOMAIN_IDENTITY:
		pdomain = protection_domain_alloc();
		if (!pdomain)
			return NULL;
3103

3104 3105
		pdomain->mode = PAGE_MODE_NONE;
		break;
3106 3107 3108
	default:
		return NULL;
	}
3109

3110
	return &pdomain->domain;
3111 3112
}

3113
static void amd_iommu_domain_free(struct iommu_domain *dom)
3114
{
3115
	struct protection_domain *domain;
3116
	struct dma_ops_domain *dma_dom;
3117

3118 3119
	domain = to_pdomain(dom);

3120 3121 3122 3123 3124
	if (domain->dev_cnt > 0)
		cleanup_domain(domain);

	BUG_ON(domain->dev_cnt != 0);

3125 3126
	if (!dom)
		return;
3127

3128 3129
	switch (dom->type) {
	case IOMMU_DOMAIN_DMA:
3130
		/* Now release the domain */
3131
		dma_dom = to_dma_ops_domain(domain);
3132 3133 3134 3135 3136
		dma_ops_domain_free(dma_dom);
		break;
	default:
		if (domain->mode != PAGE_MODE_NONE)
			free_pagetable(domain);
3137

3138 3139 3140 3141 3142 3143
		if (domain->flags & PD_IOMMUV2_MASK)
			free_gcr3_table(domain);

		protection_domain_free(domain);
		break;
	}
3144 3145
}

3146 3147 3148
static void amd_iommu_detach_device(struct iommu_domain *dom,
				    struct device *dev)
{
3149
	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3150
	struct amd_iommu *iommu;
3151
	int devid;
3152

3153
	if (!check_device(dev))
3154 3155
		return;

3156
	devid = get_device_id(dev);
3157
	if (devid < 0)
3158
		return;
3159

3160
	if (dev_data->domain != NULL)
3161
		detach_device(dev);
3162 3163 3164 3165 3166

	iommu = amd_iommu_rlookup_table[devid];
	if (!iommu)
		return;

3167 3168 3169 3170 3171 3172
#ifdef CONFIG_IRQ_REMAP
	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
	    (dom->type == IOMMU_DOMAIN_UNMANAGED))
		dev_data->use_vapic = 0;
#endif

3173 3174 3175
	iommu_completion_wait(iommu);
}

3176 3177 3178
static int amd_iommu_attach_device(struct iommu_domain *dom,
				   struct device *dev)
{
3179
	struct protection_domain *domain = to_pdomain(dom);
3180
	struct iommu_dev_data *dev_data;
3181
	struct amd_iommu *iommu;
3182
	int ret;
3183

3184
	if (!check_device(dev))
3185 3186
		return -EINVAL;

3187 3188
	dev_data = dev->archdata.iommu;

3189
	iommu = amd_iommu_rlookup_table[dev_data->devid];
3190 3191 3192
	if (!iommu)
		return -EINVAL;

3193
	if (dev_data->domain)
3194
		detach_device(dev);
3195

3196
	ret = attach_device(dev, domain);
3197

3198 3199 3200 3201 3202 3203 3204 3205 3206
#ifdef CONFIG_IRQ_REMAP
	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
		if (dom->type == IOMMU_DOMAIN_UNMANAGED)
			dev_data->use_vapic = 1;
		else
			dev_data->use_vapic = 0;
	}
#endif

3207 3208
	iommu_completion_wait(iommu);

3209
	return ret;
3210 3211
}

3212
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3213
			 phys_addr_t paddr, size_t page_size, int iommu_prot)
3214
{
3215
	struct protection_domain *domain = to_pdomain(dom);
3216 3217 3218
	int prot = 0;
	int ret;

3219 3220 3221
	if (domain->mode == PAGE_MODE_NONE)
		return -EINVAL;

3222 3223 3224 3225 3226
	if (iommu_prot & IOMMU_READ)
		prot |= IOMMU_PROT_IR;
	if (iommu_prot & IOMMU_WRITE)
		prot |= IOMMU_PROT_IW;

3227
	mutex_lock(&domain->api_lock);
3228
	ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
3229 3230
	mutex_unlock(&domain->api_lock);

3231
	return ret;
3232 3233
}

3234 3235
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
			   size_t page_size)
3236
{
3237
	struct protection_domain *domain = to_pdomain(dom);
3238
	size_t unmap_size;
3239

3240 3241 3242
	if (domain->mode == PAGE_MODE_NONE)
		return -EINVAL;

3243
	mutex_lock(&domain->api_lock);
3244
	unmap_size = iommu_unmap_page(domain, iova, page_size);
3245
	mutex_unlock(&domain->api_lock);
3246

3247
	domain_flush_tlb_pde(domain);
3248

3249
	return unmap_size;
3250 3251
}

3252
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3253
					  dma_addr_t iova)
3254
{
3255
	struct protection_domain *domain = to_pdomain(dom);
3256
	unsigned long offset_mask, pte_pgsize;
3257
	u64 *pte, __pte;
3258

3259 3260 3261
	if (domain->mode == PAGE_MODE_NONE)
		return iova;

3262
	pte = fetch_pte(domain, iova, &pte_pgsize);
3263

3264
	if (!pte || !IOMMU_PTE_PRESENT(*pte))
3265 3266
		return 0;

3267 3268
	offset_mask = pte_pgsize - 1;
	__pte	    = *pte & PM_ADDR_MASK;
3269

3270
	return (__pte & ~offset_mask) | (iova & offset_mask);
3271 3272
}

3273
static bool amd_iommu_capable(enum iommu_cap cap)
S
Sheng Yang 已提交
3274
{
3275 3276
	switch (cap) {
	case IOMMU_CAP_CACHE_COHERENCY:
3277
		return true;
3278
	case IOMMU_CAP_INTR_REMAP:
3279
		return (irq_remapping_enabled == 1);
3280 3281
	case IOMMU_CAP_NOEXEC:
		return false;
3282 3283
	}

3284
	return false;
S
Sheng Yang 已提交
3285 3286
}

3287 3288
static void amd_iommu_get_resv_regions(struct device *dev,
				       struct list_head *head)
3289
{
3290
	struct iommu_resv_region *region;
3291
	struct unity_map_entry *entry;
3292
	int devid;
3293 3294

	devid = get_device_id(dev);
3295
	if (devid < 0)
3296
		return;
3297 3298

	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3299 3300
		size_t length;
		int prot = 0;
3301 3302 3303 3304

		if (devid < entry->devid_start || devid > entry->devid_end)
			continue;

3305 3306 3307 3308 3309 3310 3311 3312 3313
		length = entry->address_end - entry->address_start;
		if (entry->prot & IOMMU_PROT_IR)
			prot |= IOMMU_READ;
		if (entry->prot & IOMMU_PROT_IW)
			prot |= IOMMU_WRITE;

		region = iommu_alloc_resv_region(entry->address_start,
						 length, prot,
						 IOMMU_RESV_DIRECT);
3314 3315 3316 3317 3318 3319 3320
		if (!region) {
			pr_err("Out of memory allocating dm-regions for %s\n",
				dev_name(dev));
			return;
		}
		list_add_tail(&region->list, head);
	}
3321 3322 3323

	region = iommu_alloc_resv_region(MSI_RANGE_START,
					 MSI_RANGE_END - MSI_RANGE_START + 1,
3324
					 0, IOMMU_RESV_MSI);
3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
	if (!region)
		return;
	list_add_tail(&region->list, head);

	region = iommu_alloc_resv_region(HT_RANGE_START,
					 HT_RANGE_END - HT_RANGE_START + 1,
					 0, IOMMU_RESV_RESERVED);
	if (!region)
		return;
	list_add_tail(&region->list, head);
3335 3336
}

3337
static void amd_iommu_put_resv_regions(struct device *dev,
3338 3339
				     struct list_head *head)
{
3340
	struct iommu_resv_region *entry, *next;
3341 3342 3343 3344 3345

	list_for_each_entry_safe(entry, next, head, list)
		kfree(entry);
}

3346
static void amd_iommu_apply_resv_region(struct device *dev,
3347
				      struct iommu_domain *domain,
3348
				      struct iommu_resv_region *region)
3349
{
3350
	struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
3351 3352 3353 3354 3355 3356 3357 3358
	unsigned long start, end;

	start = IOVA_PFN(region->start);
	end   = IOVA_PFN(region->start + region->length);

	WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}

3359
const struct iommu_ops amd_iommu_ops = {
3360
	.capable = amd_iommu_capable,
3361 3362
	.domain_alloc = amd_iommu_domain_alloc,
	.domain_free  = amd_iommu_domain_free,
3363 3364
	.attach_dev = amd_iommu_attach_device,
	.detach_dev = amd_iommu_detach_device,
3365 3366
	.map = amd_iommu_map,
	.unmap = amd_iommu_unmap,
O
Olav Haugan 已提交
3367
	.map_sg = default_iommu_map_sg,
3368
	.iova_to_phys = amd_iommu_iova_to_phys,
3369 3370
	.add_device = amd_iommu_add_device,
	.remove_device = amd_iommu_remove_device,
3371
	.device_group = amd_iommu_device_group,
3372 3373 3374
	.get_resv_regions = amd_iommu_get_resv_regions,
	.put_resv_regions = amd_iommu_put_resv_regions,
	.apply_resv_region = amd_iommu_apply_resv_region,
3375
	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
3376 3377
};

3378 3379 3380 3381 3382 3383 3384 3385 3386 3387
/*****************************************************************************
 *
 * The next functions do a basic initialization of IOMMU for pass through
 * mode
 *
 * In passthrough mode the IOMMU is initialized and enabled but not used for
 * DMA-API translation.
 *
 *****************************************************************************/

3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_register(&ppr_notifier, nb);
}
EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);

int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
{
	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
}
EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3400 3401 3402

void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
3403
	struct protection_domain *domain = to_pdomain(dom);
3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
	unsigned long flags;

	spin_lock_irqsave(&domain->lock, flags);

	/* Update data structure */
	domain->mode    = PAGE_MODE_NONE;
	domain->updated = true;

	/* Make changes visible to IOMMUs */
	update_domain(domain);

	/* Page-table is not visible to IOMMU anymore, so free it */
	free_pagetable(domain);

	spin_unlock_irqrestore(&domain->lock, flags);
}
EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3421 3422 3423

int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
3424
	struct protection_domain *domain = to_pdomain(dom);
3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467
	unsigned long flags;
	int levels, ret;

	if (pasids <= 0 || pasids > (PASID_MASK + 1))
		return -EINVAL;

	/* Number of GCR3 table levels required */
	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
		levels += 1;

	if (levels > amd_iommu_max_glx_val)
		return -EINVAL;

	spin_lock_irqsave(&domain->lock, flags);

	/*
	 * Save us all sanity checks whether devices already in the
	 * domain support IOMMUv2. Just force that the domain has no
	 * devices attached when it is switched into IOMMUv2 mode.
	 */
	ret = -EBUSY;
	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
		goto out;

	ret = -ENOMEM;
	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
	if (domain->gcr3_tbl == NULL)
		goto out;

	domain->glx      = levels;
	domain->flags   |= PD_IOMMUV2_MASK;
	domain->updated  = true;

	update_domain(domain);

	ret = 0;

out:
	spin_unlock_irqrestore(&domain->lock, flags);

	return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484

static int __flush_pasid(struct protection_domain *domain, int pasid,
			 u64 address, bool size)
{
	struct iommu_dev_data *dev_data;
	struct iommu_cmd cmd;
	int i, ret;

	if (!(domain->flags & PD_IOMMUV2_MASK))
		return -EINVAL;

	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);

	/*
	 * IOMMU TLB needs to be flushed before Device TLB to
	 * prevent device TLB refill from IOMMU TLB
	 */
3485
	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501
		if (domain->dev_iommu[i] == 0)
			continue;

		ret = iommu_queue_command(amd_iommus[i], &cmd);
		if (ret != 0)
			goto out;
	}

	/* Wait until IOMMU TLB flushes are complete */
	domain_flush_complete(domain);

	/* Now flush device TLBs */
	list_for_each_entry(dev_data, &domain->dev_list, list) {
		struct amd_iommu *iommu;
		int qdep;

3502 3503 3504 3505 3506 3507
		/*
		   There might be non-IOMMUv2 capable devices in an IOMMUv2
		 * domain.
		 */
		if (!dev_data->ats.enabled)
			continue;
3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538

		qdep  = dev_data->ats.qdep;
		iommu = amd_iommu_rlookup_table[dev_data->devid];

		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
				      qdep, address, size);

		ret = iommu_queue_command(iommu, &cmd);
		if (ret != 0)
			goto out;
	}

	/* Wait until all device TLBs are flushed */
	domain_flush_complete(domain);

	ret = 0;

out:

	return ret;
}

static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
				  u64 address)
{
	return __flush_pasid(domain, pasid, address, false);
}

int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
			 u64 address)
{
3539
	struct protection_domain *domain = to_pdomain(dom);
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&domain->lock, flags);
	ret = __amd_iommu_flush_page(domain, pasid, address);
	spin_unlock_irqrestore(&domain->lock, flags);

	return ret;
}
EXPORT_SYMBOL(amd_iommu_flush_page);

static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
{
	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
			     true);
}

int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{
3559
	struct protection_domain *domain = to_pdomain(dom);
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&domain->lock, flags);
	ret = __amd_iommu_flush_tlb(domain, pasid);
	spin_unlock_irqrestore(&domain->lock, flags);

	return ret;
}
EXPORT_SYMBOL(amd_iommu_flush_tlb);

3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
{
	int index;
	u64 *pte;

	while (true) {

		index = (pasid >> (9 * level)) & 0x1ff;
		pte   = &root[index];

		if (level == 0)
			break;

		if (!(*pte & GCR3_VALID)) {
			if (!alloc)
				return NULL;

			root = (void *)get_zeroed_page(GFP_ATOMIC);
			if (root == NULL)
				return NULL;

			*pte = __pa(root) | GCR3_VALID;
		}

		root = __va(*pte & PAGE_MASK);

		level -= 1;
	}

	return pte;
}

static int __set_gcr3(struct protection_domain *domain, int pasid,
		      unsigned long cr3)
{
	u64 *pte;

	if (domain->mode != PAGE_MODE_NONE)
		return -EINVAL;

	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
	if (pte == NULL)
		return -ENOMEM;

	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;

	return __amd_iommu_flush_tlb(domain, pasid);
}

static int __clear_gcr3(struct protection_domain *domain, int pasid)
{
	u64 *pte;

	if (domain->mode != PAGE_MODE_NONE)
		return -EINVAL;

	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
	if (pte == NULL)
		return 0;

	*pte = 0;

	return __amd_iommu_flush_tlb(domain, pasid);
}

int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
			      unsigned long cr3)
{
3639
	struct protection_domain *domain = to_pdomain(dom);
3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&domain->lock, flags);
	ret = __set_gcr3(domain, pasid, cr3);
	spin_unlock_irqrestore(&domain->lock, flags);

	return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);

int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{
3653
	struct protection_domain *domain = to_pdomain(dom);
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&domain->lock, flags);
	ret = __clear_gcr3(domain, pasid);
	spin_unlock_irqrestore(&domain->lock, flags);

	return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680

int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
			   int status, int tag)
{
	struct iommu_dev_data *dev_data;
	struct amd_iommu *iommu;
	struct iommu_cmd cmd;

	dev_data = get_dev_data(&pdev->dev);
	iommu    = amd_iommu_rlookup_table[dev_data->devid];

	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
			   tag, dev_data->pri_tlp);

	return iommu_queue_command(iommu, &cmd);
}
EXPORT_SYMBOL(amd_iommu_complete_ppr);
3681 3682 3683

struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
3684
	struct protection_domain *pdomain;
3685

3686 3687
	pdomain = get_domain(&pdev->dev);
	if (IS_ERR(pdomain))
3688 3689 3690
		return NULL;

	/* Only return IOMMUv2 domains */
3691
	if (!(pdomain->flags & PD_IOMMUV2_MASK))
3692 3693
		return NULL;

3694
	return &pdomain->domain;
3695 3696
}
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708

void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
{
	struct iommu_dev_data *dev_data;

	if (!amd_iommu_v2_supported())
		return;

	dev_data = get_dev_data(&pdev->dev);
	dev_data->errata |= (1 << erratum);
}
EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751

int amd_iommu_device_info(struct pci_dev *pdev,
                          struct amd_iommu_device_info *info)
{
	int max_pasids;
	int pos;

	if (pdev == NULL || info == NULL)
		return -EINVAL;

	if (!amd_iommu_v2_supported())
		return -EINVAL;

	memset(info, 0, sizeof(*info));

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
	if (pos)
		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
	if (pos)
		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
	if (pos) {
		int features;

		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
		max_pasids = min(max_pasids, (1 << 20));

		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);

		features = pci_pasid_features(pdev);
		if (features & PCI_PASID_CAP_EXEC)
			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
		if (features & PCI_PASID_CAP_PRIV)
			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
	}

	return 0;
}
EXPORT_SYMBOL(amd_iommu_device_info);
3752 3753 3754 3755 3756 3757 3758 3759 3760

#ifdef CONFIG_IRQ_REMAP

/*****************************************************************************
 *
 * Interrupt Remapping Implementation
 *
 *****************************************************************************/

3761 3762
static struct irq_chip amd_ir_chip;

3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
#define DTE_IRQ_REMAP_ENABLE    1ULL

static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
{
	u64 dte;

	dte	= amd_iommu_dev_table[devid].data[2];
	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
	dte	|= virt_to_phys(table->table);
	dte	|= DTE_IRQ_REMAP_INTCTL;
	dte	|= DTE_IRQ_TABLE_LEN;
	dte	|= DTE_IRQ_REMAP_ENABLE;

	amd_iommu_dev_table[devid].data[2] = dte;
}

static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{
	struct irq_remap_table *table = NULL;
	struct amd_iommu *iommu;
	unsigned long flags;
	u16 alias;

	write_lock_irqsave(&amd_iommu_devtable_lock, flags);

	iommu = amd_iommu_rlookup_table[devid];
	if (!iommu)
		goto out_unlock;

	table = irq_lookup_table[devid];
	if (table)
3797
		goto out_unlock;
3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810

	alias = amd_iommu_alias_table[devid];
	table = irq_lookup_table[alias];
	if (table) {
		irq_lookup_table[devid] = table;
		set_dte_irq_entry(devid, table);
		iommu_flush_dte(iommu, devid);
		goto out;
	}

	/* Nothing there yet, allocate new irq remapping table */
	table = kzalloc(sizeof(*table), GFP_ATOMIC);
	if (!table)
3811
		goto out_unlock;
3812

3813 3814 3815
	/* Initialize table spin-lock */
	spin_lock_init(&table->lock);

3816 3817 3818 3819 3820 3821 3822
	if (ioapic)
		/* Keep the first 32 indexes free for IOAPIC interrupts */
		table->min_index = 32;

	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
	if (!table->table) {
		kfree(table);
3823
		table = NULL;
3824
		goto out_unlock;
3825 3826
	}

3827 3828 3829 3830 3831 3832
	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
		memset(table->table, 0,
		       MAX_IRQS_PER_TABLE * sizeof(u32));
	else
		memset(table->table, 0,
		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
3833 3834 3835 3836 3837

	if (ioapic) {
		int i;

		for (i = 0; i < 32; ++i)
3838
			iommu->irte_ops->set_allocated(table, i);
3839 3840 3841 3842 3843 3844 3845
	}

	irq_lookup_table[devid] = table;
	set_dte_irq_entry(devid, table);
	iommu_flush_dte(iommu, devid);
	if (devid != alias) {
		irq_lookup_table[alias] = table;
3846
		set_dte_irq_entry(alias, table);
3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
		iommu_flush_dte(iommu, alias);
	}

out:
	iommu_completion_wait(iommu);

out_unlock:
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

	return table;
}

3859
static int alloc_irq_index(u16 devid, int count)
3860 3861 3862 3863
{
	struct irq_remap_table *table;
	unsigned long flags;
	int index, c;
3864 3865 3866 3867
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return -ENODEV;
3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878

	table = get_irq_table(devid, false);
	if (!table)
		return -ENODEV;

	spin_lock_irqsave(&table->lock, flags);

	/* Scan table for free entries */
	for (c = 0, index = table->min_index;
	     index < MAX_IRQS_PER_TABLE;
	     ++index) {
3879
		if (!iommu->irte_ops->is_allocated(table, index))
3880 3881 3882 3883 3884 3885
			c += 1;
		else
			c = 0;

		if (c == count)	{
			for (; c != 0; --c)
3886
				iommu->irte_ops->set_allocated(table, index - c + 1);
3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900

			index -= count - 1;
			goto out;
		}
	}

	index = -ENOSPC;

out:
	spin_unlock_irqrestore(&table->lock, flags);

	return index;
}

3901 3902
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
			  struct amd_ir_data *data)
3903 3904 3905 3906
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;
3907
	struct irte_ga *entry;
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return -EINVAL;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);
3918 3919 3920 3921 3922 3923 3924

	entry = (struct irte_ga *)table->table;
	entry = &entry[index];
	entry->lo.fields_remap.valid = 0;
	entry->hi.val = irte->hi.val;
	entry->lo.val = irte->lo.val;
	entry->lo.fields_remap.valid = 1;
3925 3926
	if (data)
		data->ref = entry;
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936

	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);

	return 0;
}

static int modify_irte(u16 devid, int index, union irte *irte)
3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return -EINVAL;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);
3951
	table->table[index] = irte->val;
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);

	return 0;
}

static void free_irte(u16 devid, int index)
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return;

	table = get_irq_table(devid, false);
	if (!table)
		return;

	spin_lock_irqsave(&table->lock, flags);
3975
	iommu->irte_ops->clear_allocated(table, index);
3976 3977 3978 3979 3980 3981
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
}

3982 3983
static void irte_prepare(void *entry,
			 u32 delivery_mode, u32 dest_mode,
3984
			 u8 vector, u32 dest_apicid, int devid)
3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
{
	union irte *irte = (union irte *) entry;

	irte->val                = 0;
	irte->fields.vector      = vector;
	irte->fields.int_type    = delivery_mode;
	irte->fields.destination = dest_apicid;
	irte->fields.dm          = dest_mode;
	irte->fields.valid       = 1;
}

static void irte_ga_prepare(void *entry,
			    u32 delivery_mode, u32 dest_mode,
3998
			    u8 vector, u32 dest_apicid, int devid)
3999 4000
{
	struct irte_ga *irte = (struct irte_ga *) entry;
4001
	struct iommu_dev_data *dev_data = search_dev_data(devid);
4002 4003 4004

	irte->lo.val                      = 0;
	irte->hi.val                      = 0;
4005
	irte->lo.fields_remap.guest_mode  = dev_data ? dev_data->use_vapic : 0;
4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
	irte->lo.fields_remap.int_type    = delivery_mode;
	irte->lo.fields_remap.dm          = dest_mode;
	irte->hi.fields.vector            = vector;
	irte->lo.fields_remap.destination = dest_apicid;
	irte->lo.fields_remap.valid       = 1;
}

static void irte_activate(void *entry, u16 devid, u16 index)
{
	union irte *irte = (union irte *) entry;

	irte->fields.valid = 1;
	modify_irte(devid, index, irte);
}

static void irte_ga_activate(void *entry, u16 devid, u16 index)
{
	struct irte_ga *irte = (struct irte_ga *) entry;

	irte->lo.fields_remap.valid = 1;
4026
	modify_irte_ga(devid, index, irte, NULL);
4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
}

static void irte_deactivate(void *entry, u16 devid, u16 index)
{
	union irte *irte = (union irte *) entry;

	irte->fields.valid = 0;
	modify_irte(devid, index, irte);
}

static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
{
	struct irte_ga *irte = (struct irte_ga *) entry;

	irte->lo.fields_remap.valid = 0;
4042
	modify_irte_ga(devid, index, irte, NULL);
4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058
}

static void irte_set_affinity(void *entry, u16 devid, u16 index,
			      u8 vector, u32 dest_apicid)
{
	union irte *irte = (union irte *) entry;

	irte->fields.vector = vector;
	irte->fields.destination = dest_apicid;
	modify_irte(devid, index, irte);
}

static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
				 u8 vector, u32 dest_apicid)
{
	struct irte_ga *irte = (struct irte_ga *) entry;
4059
	struct iommu_dev_data *dev_data = search_dev_data(devid);
4060

4061 4062 4063 4064 4065 4066
	if (!dev_data || !dev_data->use_vapic) {
		irte->hi.fields.vector = vector;
		irte->lo.fields_remap.destination = dest_apicid;
		irte->lo.fields_remap.guest_mode = 0;
		modify_irte_ga(devid, index, irte, NULL);
	}
4067 4068
}

4069
#define IRTE_ALLOCATED (~1U)
4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114
static void irte_set_allocated(struct irq_remap_table *table, int index)
{
	table->table[index] = IRTE_ALLOCATED;
}

static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	memset(&irte->lo.val, 0, sizeof(u64));
	memset(&irte->hi.val, 0, sizeof(u64));
	irte->hi.fields.vector = 0xff;
}

static bool irte_is_allocated(struct irq_remap_table *table, int index)
{
	union irte *ptr = (union irte *)table->table;
	union irte *irte = &ptr[index];

	return irte->val != 0;
}

static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	return irte->hi.fields.vector != 0;
}

static void irte_clear_allocated(struct irq_remap_table *table, int index)
{
	table->table[index] = 0;
}

static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	memset(&irte->lo.val, 0, sizeof(u64));
	memset(&irte->hi.val, 0, sizeof(u64));
}

4115
static int get_devid(struct irq_alloc_info *info)
4116
{
4117
	int devid = -1;
4118

4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133
	switch (info->type) {
	case X86_IRQ_ALLOC_TYPE_IOAPIC:
		devid     = get_ioapic_devid(info->ioapic_id);
		break;
	case X86_IRQ_ALLOC_TYPE_HPET:
		devid     = get_hpet_devid(info->hpet_id);
		break;
	case X86_IRQ_ALLOC_TYPE_MSI:
	case X86_IRQ_ALLOC_TYPE_MSIX:
		devid = get_device_id(&info->msi_dev->dev);
		break;
	default:
		BUG_ON(1);
		break;
	}
4134

4135 4136
	return devid;
}
4137

4138 4139 4140 4141
static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
{
	struct amd_iommu *iommu;
	int devid;
4142

4143 4144
	if (!info)
		return NULL;
4145

4146 4147 4148 4149 4150 4151
	devid = get_devid(info);
	if (devid >= 0) {
		iommu = amd_iommu_rlookup_table[devid];
		if (iommu)
			return iommu->ir_domain;
	}
4152

4153
	return NULL;
4154 4155
}

4156
static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
4157
{
4158 4159
	struct amd_iommu *iommu;
	int devid;
4160

4161 4162
	if (!info)
		return NULL;
4163

4164 4165 4166 4167
	switch (info->type) {
	case X86_IRQ_ALLOC_TYPE_MSI:
	case X86_IRQ_ALLOC_TYPE_MSIX:
		devid = get_device_id(&info->msi_dev->dev);
4168
		if (devid < 0)
4169 4170
			return NULL;

4171 4172 4173
		iommu = amd_iommu_rlookup_table[devid];
		if (iommu)
			return iommu->msi_domain;
4174 4175 4176 4177
		break;
	default:
		break;
	}
4178

4179 4180
	return NULL;
}
4181

4182 4183 4184 4185 4186 4187
struct irq_remap_ops amd_iommu_irq_ops = {
	.prepare		= amd_iommu_prepare,
	.enable			= amd_iommu_enable,
	.disable		= amd_iommu_disable,
	.reenable		= amd_iommu_reenable,
	.enable_faulting	= amd_iommu_enable_faulting,
4188 4189 4190
	.get_ir_irq_domain	= get_ir_irq_domain,
	.get_irq_domain		= get_irq_domain,
};
4191

4192 4193 4194 4195 4196 4197 4198 4199
static void irq_remapping_prepare_irte(struct amd_ir_data *data,
				       struct irq_cfg *irq_cfg,
				       struct irq_alloc_info *info,
				       int devid, int index, int sub_handle)
{
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	struct msi_msg *msg = &data->msi_entry;
	struct IO_APIC_route_entry *entry;
4200 4201 4202 4203
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return;
4204

4205 4206
	data->irq_2_irte.devid = devid;
	data->irq_2_irte.index = index + sub_handle;
4207 4208
	iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
				 apic->irq_dest_mode, irq_cfg->vector,
4209
				 irq_cfg->dest_apicid, devid);
4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224

	switch (info->type) {
	case X86_IRQ_ALLOC_TYPE_IOAPIC:
		/* Setup IOAPIC entry */
		entry = info->ioapic_entry;
		info->ioapic_entry = NULL;
		memset(entry, 0, sizeof(*entry));
		entry->vector        = index;
		entry->mask          = 0;
		entry->trigger       = info->ioapic_trigger;
		entry->polarity      = info->ioapic_polarity;
		/* Mask level triggered irqs. */
		if (info->ioapic_trigger)
			entry->mask = 1;
		break;
4225

4226 4227 4228 4229 4230 4231 4232
	case X86_IRQ_ALLOC_TYPE_HPET:
	case X86_IRQ_ALLOC_TYPE_MSI:
	case X86_IRQ_ALLOC_TYPE_MSIX:
		msg->address_hi = MSI_ADDR_BASE_HI;
		msg->address_lo = MSI_ADDR_BASE_LO;
		msg->data = irte_info->index;
		break;
4233

4234 4235 4236 4237
	default:
		BUG_ON(1);
		break;
	}
4238 4239
}

4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
struct amd_irte_ops irte_32_ops = {
	.prepare = irte_prepare,
	.activate = irte_activate,
	.deactivate = irte_deactivate,
	.set_affinity = irte_set_affinity,
	.set_allocated = irte_set_allocated,
	.is_allocated = irte_is_allocated,
	.clear_allocated = irte_clear_allocated,
};

struct amd_irte_ops irte_128_ops = {
	.prepare = irte_ga_prepare,
	.activate = irte_ga_activate,
	.deactivate = irte_ga_deactivate,
	.set_affinity = irte_ga_set_affinity,
	.set_allocated = irte_ga_set_allocated,
	.is_allocated = irte_ga_is_allocated,
	.clear_allocated = irte_ga_clear_allocated,
};

4260 4261
static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
			       unsigned int nr_irqs, void *arg)
4262
{
4263 4264
	struct irq_alloc_info *info = arg;
	struct irq_data *irq_data;
4265
	struct amd_ir_data *data = NULL;
4266
	struct irq_cfg *cfg;
4267 4268
	int i, ret, devid;
	int index = -1;
4269

4270 4271 4272 4273
	if (!info)
		return -EINVAL;
	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
4274 4275
		return -EINVAL;

4276 4277 4278 4279 4280 4281
	/*
	 * With IRQ remapping enabled, don't need contiguous CPU vectors
	 * to support multiple MSI interrupts.
	 */
	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
4282

4283 4284 4285
	devid = get_devid(info);
	if (devid < 0)
		return -EINVAL;
4286

4287 4288 4289
	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
	if (ret < 0)
		return ret;
4290

4291 4292 4293 4294 4295 4296
	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
		if (get_irq_table(devid, true))
			index = info->ioapic_pin;
		else
			ret = -ENOMEM;
	} else {
4297
		index = alloc_irq_index(devid, nr_irqs);
4298 4299 4300
	}
	if (index < 0) {
		pr_warn("Failed to allocate IRTE\n");
4301
		ret = index;
4302 4303
		goto out_free_parent;
	}
4304

4305 4306 4307 4308 4309 4310 4311
	for (i = 0; i < nr_irqs; i++) {
		irq_data = irq_domain_get_irq_data(domain, virq + i);
		cfg = irqd_cfg(irq_data);
		if (!irq_data || !cfg) {
			ret = -EINVAL;
			goto out_free_data;
		}
4312

4313 4314 4315 4316 4317
		ret = -ENOMEM;
		data = kzalloc(sizeof(*data), GFP_KERNEL);
		if (!data)
			goto out_free_data;

4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
		else
			data->entry = kzalloc(sizeof(struct irte_ga),
						     GFP_KERNEL);
		if (!data->entry) {
			kfree(data);
			goto out_free_data;
		}

4328 4329 4330 4331 4332 4333
		irq_data->hwirq = (devid << 16) + i;
		irq_data->chip_data = data;
		irq_data->chip = &amd_ir_chip;
		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
	}
4334

4335
	return 0;
4336

4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
out_free_data:
	for (i--; i >= 0; i--) {
		irq_data = irq_domain_get_irq_data(domain, virq + i);
		if (irq_data)
			kfree(irq_data->chip_data);
	}
	for (i = 0; i < nr_irqs; i++)
		free_irte(devid, index + i);
out_free_parent:
	irq_domain_free_irqs_common(domain, virq, nr_irqs);
	return ret;
4348 4349
}

4350 4351
static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
			       unsigned int nr_irqs)
4352
{
4353 4354 4355 4356
	struct irq_2_irte *irte_info;
	struct irq_data *irq_data;
	struct amd_ir_data *data;
	int i;
4357

4358 4359 4360 4361 4362 4363
	for (i = 0; i < nr_irqs; i++) {
		irq_data = irq_domain_get_irq_data(domain, virq  + i);
		if (irq_data && irq_data->chip_data) {
			data = irq_data->chip_data;
			irte_info = &data->irq_2_irte;
			free_irte(irte_info->devid, irte_info->index);
4364
			kfree(data->entry);
4365 4366 4367 4368 4369
			kfree(data);
		}
	}
	irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
4370

4371 4372 4373 4374 4375
static void irq_remapping_activate(struct irq_domain *domain,
				   struct irq_data *irq_data)
{
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
4376
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4377

4378 4379 4380
	if (iommu)
		iommu->irte_ops->activate(data->entry, irte_info->devid,
					  irte_info->index);
4381 4382
}

4383 4384
static void irq_remapping_deactivate(struct irq_domain *domain,
				     struct irq_data *irq_data)
4385
{
4386 4387
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
4388
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4389

4390 4391 4392
	if (iommu)
		iommu->irte_ops->deactivate(data->entry, irte_info->devid,
					    irte_info->index);
4393
}
4394

4395
static const struct irq_domain_ops amd_ir_domain_ops = {
4396 4397 4398 4399
	.alloc = irq_remapping_alloc,
	.free = irq_remapping_free,
	.activate = irq_remapping_activate,
	.deactivate = irq_remapping_deactivate,
4400
};
4401

4402 4403 4404 4405 4406 4407 4408 4409
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
	struct amd_iommu *iommu;
	struct amd_iommu_pi_data *pi_data = vcpu_info;
	struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
	struct amd_ir_data *ir_data = data->chip_data;
	struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4410 4411 4412 4413 4414 4415 4416 4417
	struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);

	/* Note:
	 * This device has never been set up for guest mode.
	 * we should not modify the IRTE
	 */
	if (!dev_data || !dev_data->use_vapic)
		return 0;
4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465

	pi_data->ir_data = ir_data;

	/* Note:
	 * SVM tries to set up for VAPIC mode, but we are in
	 * legacy mode. So, we force legacy mode instead.
	 */
	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
		pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
			 __func__);
		pi_data->is_guest_mode = false;
	}

	iommu = amd_iommu_rlookup_table[irte_info->devid];
	if (iommu == NULL)
		return -EINVAL;

	pi_data->prev_ga_tag = ir_data->cached_ga_tag;
	if (pi_data->is_guest_mode) {
		/* Setting */
		irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
		irte->hi.fields.vector = vcpu_pi_info->vector;
		irte->lo.fields_vapic.guest_mode = 1;
		irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;

		ir_data->cached_ga_tag = pi_data->ga_tag;
	} else {
		/* Un-Setting */
		struct irq_cfg *cfg = irqd_cfg(data);

		irte->hi.val = 0;
		irte->lo.val = 0;
		irte->hi.fields.vector = cfg->vector;
		irte->lo.fields_remap.guest_mode = 0;
		irte->lo.fields_remap.destination = cfg->dest_apicid;
		irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
		irte->lo.fields_remap.dm = apic->irq_dest_mode;

		/*
		 * This communicates the ga_tag back to the caller
		 * so that it can do all the necessary clean up.
		 */
		ir_data->cached_ga_tag = 0;
	}

	return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
}

4466 4467 4468 4469 4470 4471 4472
static int amd_ir_set_affinity(struct irq_data *data,
			       const struct cpumask *mask, bool force)
{
	struct amd_ir_data *ir_data = data->chip_data;
	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
	struct irq_cfg *cfg = irqd_cfg(data);
	struct irq_data *parent = data->parent_data;
4473
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4474
	int ret;
4475

4476 4477 4478
	if (!iommu)
		return -ENODEV;

4479 4480 4481
	ret = parent->chip->irq_set_affinity(parent, mask, force);
	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
		return ret;
4482

4483 4484 4485 4486
	/*
	 * Atomically updates the IRTE with the new destination, vector
	 * and flushes the interrupt entry cache.
	 */
4487 4488
	iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
			    irte_info->index, cfg->vector, cfg->dest_apicid);
4489

4490 4491 4492 4493 4494
	/*
	 * After this point, all the interrupts will start arriving
	 * at the new destination. So, time to cleanup the previous
	 * vector allocation.
	 */
4495
	send_cleanup_vector(cfg);
4496 4497

	return IRQ_SET_MASK_OK_DONE;
4498 4499
}

4500
static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4501
{
4502
	struct amd_ir_data *ir_data = irq_data->chip_data;
4503

4504 4505
	*msg = ir_data->msi_entry;
}
4506

4507 4508 4509
static struct irq_chip amd_ir_chip = {
	.irq_ack = ir_ack_apic_edge,
	.irq_set_affinity = amd_ir_set_affinity,
4510
	.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4511 4512
	.irq_compose_msi_msg = ir_compose_msi_msg,
};
4513

4514 4515 4516 4517 4518
int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{
	iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
	if (!iommu->ir_domain)
		return -ENOMEM;
4519

4520 4521
	iommu->ir_domain->parent = arch_get_ir_parent_domain();
	iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
4522 4523 4524

	return 0;
}
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563

int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct irq_remap_table *irt;
	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
	int devid = ir_data->irq_2_irte.devid;
	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;

	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
		return 0;

	iommu = amd_iommu_rlookup_table[devid];
	if (!iommu)
		return -ENODEV;

	irt = get_irq_table(devid, false);
	if (!irt)
		return -ENODEV;

	spin_lock_irqsave(&irt->lock, flags);

	if (ref->lo.fields_vapic.guest_mode) {
		if (cpu >= 0)
			ref->lo.fields_vapic.destination = cpu;
		ref->lo.fields_vapic.is_run = is_run;
		barrier();
	}

	spin_unlock_irqrestore(&irt->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
	return 0;
}
EXPORT_SYMBOL(amd_iommu_update_ga);
4564
#endif