amd_iommu_init.c 57.1 KB
Newer Older
1
/*
2
 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
J
Joerg Roedel 已提交
3
 * Author: Joerg Roedel <jroedel@suse.de>
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *         Leo Duran <leo.duran@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/list.h>
23
#include <linux/slab.h>
24
#include <linux/syscore_ops.h>
25 26
#include <linux/interrupt.h>
#include <linux/msi.h>
27
#include <linux/amd-iommu.h>
28
#include <linux/export.h>
A
Alex Williamson 已提交
29
#include <linux/iommu.h>
30
#include <asm/pci-direct.h>
31
#include <asm/iommu.h>
32
#include <asm/gart.h>
33
#include <asm/x86_init.h>
34
#include <asm/iommu_table.h>
35
#include <asm/io_apic.h>
36
#include <asm/irq_remapping.h>
37 38 39

#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
40
#include "irq_remapping.h"
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * definitions for the ACPI scanning code
 */
#define IVRS_HEADER_LENGTH 48

#define ACPI_IVHD_TYPE                  0x10
#define ACPI_IVMD_TYPE_ALL              0x20
#define ACPI_IVMD_TYPE                  0x21
#define ACPI_IVMD_TYPE_RANGE            0x22

#define IVHD_DEV_ALL                    0x01
#define IVHD_DEV_SELECT                 0x02
#define IVHD_DEV_SELECT_RANGE_START     0x03
#define IVHD_DEV_RANGE_END              0x04
#define IVHD_DEV_ALIAS                  0x42
#define IVHD_DEV_ALIAS_RANGE            0x43
#define IVHD_DEV_EXT_SELECT             0x46
#define IVHD_DEV_EXT_SELECT_RANGE       0x47
60 61 62 63
#define IVHD_DEV_SPECIAL		0x48

#define IVHD_SPECIAL_IOAPIC		1
#define IVHD_SPECIAL_HPET		2
64

J
Joerg Roedel 已提交
65 66 67 68
#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
#define IVHD_FLAG_PASSPW_EN_MASK        0x02
#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
#define IVHD_FLAG_ISOC_EN_MASK          0x08
69 70 71 72 73 74 75 76 77 78 79 80 81

#define IVMD_FLAG_EXCL_RANGE            0x08
#define IVMD_FLAG_UNITY_MAP             0x01

#define ACPI_DEVFLAG_INITPASS           0x01
#define ACPI_DEVFLAG_EXTINT             0x02
#define ACPI_DEVFLAG_NMI                0x04
#define ACPI_DEVFLAG_SYSMGT1            0x10
#define ACPI_DEVFLAG_SYSMGT2            0x20
#define ACPI_DEVFLAG_LINT0              0x40
#define ACPI_DEVFLAG_LINT1              0x80
#define ACPI_DEVFLAG_ATSDIS             0x10000000

82 83 84 85 86 87 88 89 90 91 92
/*
 * ACPI table definitions
 *
 * These data structures are laid over the table to parse the important values
 * out of it.
 */

/*
 * structure describing one IOMMU in the ACPI table. Typically followed by one
 * or more ivhd_entrys.
 */
93 94 95 96 97 98 99 100 101
struct ivhd_header {
	u8 type;
	u8 flags;
	u16 length;
	u16 devid;
	u16 cap_ptr;
	u64 mmio_phys;
	u16 pci_seg;
	u16 info;
102
	u32 efr;
103 104
} __attribute__((packed));

105 106 107 108
/*
 * A device entry describing which devices a specific IOMMU translates and
 * which requestor ids they use.
 */
109 110 111 112 113 114 115
struct ivhd_entry {
	u8 type;
	u16 devid;
	u8 flags;
	u32 ext;
} __attribute__((packed));

116 117 118 119
/*
 * An AMD IOMMU memory definition structure. It defines things like exclusion
 * ranges for devices and regions that should be unity mapped.
 */
120 121 122 123 124 125 126 127 128 129 130
struct ivmd_header {
	u8 type;
	u8 flags;
	u16 length;
	u16 devid;
	u16 aux;
	u64 resv;
	u64 range_start;
	u64 range_length;
} __attribute__((packed));

131
bool amd_iommu_dump;
132
bool amd_iommu_irq_remap __read_mostly;
133

134
static bool amd_iommu_detected;
135
static bool __initdata amd_iommu_disabled;
136

137 138
u16 amd_iommu_last_bdf;			/* largest PCI device id we have
					   to handle */
139
LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
140
					   we find in ACPI */
141
u32 amd_iommu_unmap_flush;		/* if true, flush on every unmap */
142

143
LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
144
					   system */
145

146 147 148 149
/* Array to assign indices to IOMMUs*/
struct amd_iommu *amd_iommus[MAX_IOMMUS];
int amd_iommus_present;

150 151
/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly;
152
bool amd_iommu_iotlb_sup __read_mostly = true;
153

154
u32 amd_iommu_max_pasid __read_mostly = ~0;
155

156
bool amd_iommu_v2_present __read_mostly;
J
Joerg Roedel 已提交
157
static bool amd_iommu_pc_present __read_mostly;
158

159 160
bool amd_iommu_force_isolation __read_mostly;

161 162 163 164 165 166
/*
 * List of protection domains - used during resume
 */
LIST_HEAD(amd_iommu_pd_list);
spinlock_t amd_iommu_pd_lock;

167 168 169 170 171 172
/*
 * Pointer to the device table which is shared by all AMD IOMMUs
 * it is indexed by the PCI device id or the HT unit id and contains
 * information about the domain the device belongs to as well as the
 * page table root pointer.
 */
173
struct dev_table_entry *amd_iommu_dev_table;
174 175 176 177 178 179

/*
 * The alias table is a driver specific data structure which contains the
 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
 * More than one device can share the same requestor id.
 */
180
u16 *amd_iommu_alias_table;
181 182 183 184 185

/*
 * The rlookup table is used to find the IOMMU which is responsible
 * for a specific device. It is also indexed by the PCI device id.
 */
186
struct amd_iommu **amd_iommu_rlookup_table;
187 188

/*
189 190 191 192 193
 * This table is used to find the irq remapping table for a given device id
 * quickly.
 */
struct irq_remap_table **irq_lookup_table;

194
/*
F
Frank Arnold 已提交
195
 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
196 197
 * to know which ones are already in use.
 */
198 199
unsigned long *amd_iommu_pd_alloc_bitmap;

200 201 202
static u32 dev_table_size;	/* size of the device table */
static u32 alias_table_size;	/* size of the alias table */
static u32 rlookup_table_size;	/* size if the rlookup table */
203

204 205 206 207 208 209 210 211 212 213 214 215 216
enum iommu_init_state {
	IOMMU_START_STATE,
	IOMMU_IVRS_DETECTED,
	IOMMU_ACPI_FINISHED,
	IOMMU_ENABLED,
	IOMMU_PCI_INIT,
	IOMMU_INTERRUPTS_EN,
	IOMMU_DMA_OPS,
	IOMMU_INITIALIZED,
	IOMMU_NOT_FOUND,
	IOMMU_INIT_ERROR,
};

217 218 219 220 221 222
/* Early ioapic and hpet maps from kernel command line */
#define EARLY_MAP_SIZE		4
static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
static int __initdata early_ioapic_map_size;
static int __initdata early_hpet_map_size;
223
static bool __initdata cmdline_maps;
224

225 226
static enum iommu_init_state init_state = IOMMU_START_STATE;

227
static int amd_iommu_enable_interrupts(void);
228
static int __init iommu_go_to_state(enum iommu_init_state state);
229
static void init_device_table_dma(void);
230

231 232 233 234 235 236
static inline void update_last_devid(u16 devid)
{
	if (devid > amd_iommu_last_bdf)
		amd_iommu_last_bdf = devid;
}

237 238 239
static inline unsigned long tbl_size(int entry_size)
{
	unsigned shift = PAGE_SHIFT +
240
			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
241 242 243 244

	return 1UL << shift;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
/* Access to l1 and l2 indexed register spaces */

static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
{
	u32 val;

	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
	pci_read_config_dword(iommu->dev, 0xfc, &val);
	return val;
}

static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
{
	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
	pci_write_config_dword(iommu->dev, 0xfc, val);
	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
}

static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
{
	u32 val;

	pci_write_config_dword(iommu->dev, 0xf0, address);
	pci_read_config_dword(iommu->dev, 0xf4, &val);
	return val;
}

static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
{
	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
	pci_write_config_dword(iommu->dev, 0xf4, val);
}

278 279 280 281 282 283 284 285
/****************************************************************************
 *
 * AMD IOMMU MMIO register space handling functions
 *
 * These functions are used to program the IOMMU device registers in
 * MMIO space required for that driver.
 *
 ****************************************************************************/
286

287 288 289 290
/*
 * This function set the exclusion range in the IOMMU. DMA accesses to the
 * exclusion range are passed through untranslated
 */
291
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
{
	u64 start = iommu->exclusion_start & PAGE_MASK;
	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
	u64 entry;

	if (!iommu->exclusion_start)
		return;

	entry = start | MMIO_EXCL_ENABLE_MASK;
	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
			&entry, sizeof(entry));

	entry = limit;
	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
			&entry, sizeof(entry));
}

309
/* Programs the physical address of the device table into the IOMMU hardware */
310
static void iommu_set_device_table(struct amd_iommu *iommu)
311
{
312
	u64 entry;
313 314 315 316 317 318 319 320 321

	BUG_ON(iommu->mmio_base == NULL);

	entry = virt_to_phys(amd_iommu_dev_table);
	entry |= (dev_table_size >> 12) - 1;
	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
			&entry, sizeof(entry));
}

322
/* Generic functions to enable/disable certain features of the IOMMU. */
323
static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
324 325 326 327 328 329 330 331
{
	u32 ctrl;

	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
	ctrl |= (1 << bit);
	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}

332
static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
333 334 335
{
	u32 ctrl;

336
	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
337 338 339 340
	ctrl &= ~(1 << bit);
	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}

341 342 343 344 345 346 347 348 349 350
static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
{
	u32 ctrl;

	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
	ctrl &= ~CTRL_INV_TO_MASK;
	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}

351
/* Function to enable the hardware */
352
static void iommu_enable(struct amd_iommu *iommu)
353 354 355 356
{
	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
}

357
static void iommu_disable(struct amd_iommu *iommu)
J
Joerg Roedel 已提交
358
{
359 360 361 362 363 364 365 366
	/* Disable command buffer */
	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);

	/* Disable event logging and event interrupts */
	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);

	/* Disable IOMMU hardware itself */
367
	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
J
Joerg Roedel 已提交
368 369
}

370 371 372 373
/*
 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 * the system has one.
 */
374
static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
375
{
376 377 378
	if (!request_mem_region(address, end, "amd_iommu")) {
		pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
			address, end);
379
		pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
380
		return NULL;
381
	}
382

383
	return (u8 __iomem *)ioremap_nocache(address, end);
384 385 386 387 388 389
}

static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
{
	if (iommu->mmio_base)
		iounmap(iommu->mmio_base);
390
	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
391 392
}

393 394 395 396 397 398 399 400 401
/****************************************************************************
 *
 * The functions below belong to the first pass of AMD IOMMU ACPI table
 * parsing. In this pass we try to find out the highest device id this
 * code has to handle. Upon this information the size of the shared data
 * structures is determined later.
 *
 ****************************************************************************/

402 403 404 405 406 407 408 409
/*
 * This function calculates the length of a given IVHD entry
 */
static inline int ivhd_entry_length(u8 *ivhd)
{
	return 0x04 << (*ivhd >> 6);
}

410 411 412 413
/*
 * This function reads the last device id the IOMMU has to handle from the PCI
 * capability header for this IOMMU
 */
414 415 416 417 418
static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
{
	u32 cap;

	cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
419
	update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
420 421 422 423

	return 0;
}

424 425 426 427
/*
 * After reading the highest device id from the IOMMU PCI capability header
 * this function looks if there is a higher device id defined in the ACPI table
 */
428 429 430 431 432 433 434 435
static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{
	u8 *p = (void *)h, *end = (void *)h;
	struct ivhd_entry *dev;

	p += sizeof(*h);
	end += h->length;

436
	find_last_devid_on_pci(PCI_BUS_NUM(h->devid),
437 438 439 440 441 442 443
			PCI_SLOT(h->devid),
			PCI_FUNC(h->devid),
			h->cap_ptr);

	while (p < end) {
		dev = (struct ivhd_entry *)p;
		switch (dev->type) {
444 445 446 447
		case IVHD_DEV_ALL:
			/* Use maximum BDF value for DEV_ALL */
			update_last_devid(0xffff);
			break;
448 449 450 451
		case IVHD_DEV_SELECT:
		case IVHD_DEV_RANGE_END:
		case IVHD_DEV_ALIAS:
		case IVHD_DEV_EXT_SELECT:
452
			/* all the above subfield types refer to device ids */
453
			update_last_devid(dev->devid);
454 455 456 457
			break;
		default:
			break;
		}
458
		p += ivhd_entry_length(p);
459 460 461 462 463 464 465
	}

	WARN_ON(p != end);

	return 0;
}

466 467 468 469 470
/*
 * Iterate over all IVHD entries in the ACPI table and find the highest device
 * id which we need to handle. This is the first of three functions which parse
 * the ACPI table. So we check the checksum here.
 */
471 472 473 474 475 476 477 478 479 480 481 482
static int __init find_last_devid_acpi(struct acpi_table_header *table)
{
	int i;
	u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
	struct ivhd_header *h;

	/*
	 * Validate checksum here so we don't need to do it when
	 * we actually parse the table
	 */
	for (i = 0; i < table->length; ++i)
		checksum += p[i];
483
	if (checksum != 0)
484
		/* ACPI table corrupt */
485
		return -ENODEV;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505

	p += IVRS_HEADER_LENGTH;

	end += table->length;
	while (p < end) {
		h = (struct ivhd_header *)p;
		switch (h->type) {
		case ACPI_IVHD_TYPE:
			find_last_devid_from_ivhd(h);
			break;
		default:
			break;
		}
		p += h->length;
	}
	WARN_ON(p != end);

	return 0;
}

506 507
/****************************************************************************
 *
F
Frank Arnold 已提交
508
 * The following functions belong to the code path which parses the ACPI table
509 510 511 512 513 514 515 516 517 518 519
 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 * data structures, initialize the device/alias/rlookup table and also
 * basically initialize the hardware.
 *
 ****************************************************************************/

/*
 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 * write commands to that buffer later and the IOMMU will execute them
 * asynchronously
 */
520
static int __init alloc_command_buffer(struct amd_iommu *iommu)
521
{
522 523
	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
						  get_order(CMD_BUFFER_SIZE));
524

525
	return iommu->cmd_buf ? 0 : -ENOMEM;
526 527
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541
/*
 * This function resets the command buffer if the IOMMU stopped fetching
 * commands from it.
 */
void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
{
	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);

	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);

	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
}

542 543 544 545 546 547 548 549 550 551 552
/*
 * This function writes the command buffer address to the hardware and
 * enables it.
 */
static void iommu_enable_command_buffer(struct amd_iommu *iommu)
{
	u64 entry;

	BUG_ON(iommu->cmd_buf == NULL);

	entry = (u64)virt_to_phys(iommu->cmd_buf);
553
	entry |= MMIO_CMD_SIZE_512;
554

555
	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
556
		    &entry, sizeof(entry));
557

558
	amd_iommu_reset_cmd_buffer(iommu);
559 560 561 562
}

static void __init free_command_buffer(struct amd_iommu *iommu)
{
563
	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
564 565
}

566
/* allocates the memory where the IOMMU will log its events to */
567
static int __init alloc_event_buffer(struct amd_iommu *iommu)
568
{
569 570
	iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
						  get_order(EVT_BUFFER_SIZE));
571

572
	return iommu->evt_buf ? 0 : -ENOMEM;
573 574 575 576 577 578 579 580
}

static void iommu_enable_event_buffer(struct amd_iommu *iommu)
{
	u64 entry;

	BUG_ON(iommu->evt_buf == NULL);

581
	entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
582

583 584 585
	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
		    &entry, sizeof(entry));

586 587 588 589
	/* set head and tail to zero manually */
	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);

590
	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
591 592 593 594 595 596 597
}

static void __init free_event_buffer(struct amd_iommu *iommu)
{
	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
}

598
/* allocates the memory where the IOMMU will log its events to */
599
static int __init alloc_ppr_log(struct amd_iommu *iommu)
600
{
601 602
	iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
						  get_order(PPR_LOG_SIZE));
603

604
	return iommu->ppr_log ? 0 : -ENOMEM;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
}

static void iommu_enable_ppr_log(struct amd_iommu *iommu)
{
	u64 entry;

	if (iommu->ppr_log == NULL)
		return;

	entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;

	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
		    &entry, sizeof(entry));

	/* set head and tail to zero manually */
	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);

	iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
	iommu_feature_enable(iommu, CONTROL_PPR_EN);
}

static void __init free_ppr_log(struct amd_iommu *iommu)
{
	if (iommu->ppr_log == NULL)
		return;

	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
}

635 636 637 638 639 640 641 642
static void iommu_enable_gt(struct amd_iommu *iommu)
{
	if (!iommu_feature(iommu, FEATURE_GT))
		return;

	iommu_feature_enable(iommu, CONTROL_GT_EN);
}

643
/* sets a specific bit in the device table entry. */
644 645
static void set_dev_entry_bit(u16 devid, u8 bit)
{
646 647
	int i = (bit >> 6) & 0x03;
	int _bit = bit & 0x3f;
648

649
	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
650 651
}

652 653
static int get_dev_entry_bit(u16 devid, u8 bit)
{
654 655
	int i = (bit >> 6) & 0x03;
	int _bit = bit & 0x3f;
656

657
	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
658 659 660 661 662 663 664 665 666 667 668 669 670 671
}


void amd_iommu_apply_erratum_63(u16 devid)
{
	int sysmgt;

	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);

	if (sysmgt == 0x01)
		set_dev_entry_bit(devid, DEV_ENTRY_IW);
}

672 673 674 675 676 677
/* Writes the specific IOMMU for a device into the rlookup table */
static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
{
	amd_iommu_rlookup_table[devid] = iommu;
}

678 679 680 681
/*
 * This function takes the device specific flags read from the ACPI
 * table and sets up the device table entry with that information
 */
682 683
static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
					   u16 devid, u32 flags, u32 ext_flags)
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
{
	if (flags & ACPI_DEVFLAG_INITPASS)
		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
	if (flags & ACPI_DEVFLAG_EXTINT)
		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
	if (flags & ACPI_DEVFLAG_NMI)
		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
	if (flags & ACPI_DEVFLAG_SYSMGT1)
		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
	if (flags & ACPI_DEVFLAG_SYSMGT2)
		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
	if (flags & ACPI_DEVFLAG_LINT0)
		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
	if (flags & ACPI_DEVFLAG_LINT1)
		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);

700 701
	amd_iommu_apply_erratum_63(devid);

702
	set_iommu_for_device(iommu, devid);
703 704
}

705
static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
706 707 708 709
{
	struct devid_map *entry;
	struct list_head *list;

710 711 712 713 714
	if (type == IVHD_SPECIAL_IOAPIC)
		list = &ioapic_map;
	else if (type == IVHD_SPECIAL_HPET)
		list = &hpet_map;
	else
715 716
		return -EINVAL;

717 718 719 720 721 722 723
	list_for_each_entry(entry, list, list) {
		if (!(entry->id == id && entry->cmd_line))
			continue;

		pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);

724 725
		*devid = entry->devid;

726 727 728
		return 0;
	}

729 730 731 732
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

733
	entry->id	= id;
734
	entry->devid	= *devid;
735
	entry->cmd_line	= cmd_line;
736 737 738 739 740 741

	list_add_tail(&entry->list, list);

	return 0;
}

742 743 744 745 746 747 748
static int __init add_early_maps(void)
{
	int i, ret;

	for (i = 0; i < early_ioapic_map_size; ++i) {
		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
					 early_ioapic_map[i].id,
749
					 &early_ioapic_map[i].devid,
750 751 752 753 754 755 756 757
					 early_ioapic_map[i].cmd_line);
		if (ret)
			return ret;
	}

	for (i = 0; i < early_hpet_map_size; ++i) {
		ret = add_special_device(IVHD_SPECIAL_HPET,
					 early_hpet_map[i].id,
758
					 &early_hpet_map[i].devid,
759 760 761 762 763 764 765 766
					 early_hpet_map[i].cmd_line);
		if (ret)
			return ret;
	}

	return 0;
}

767
/*
F
Frank Arnold 已提交
768
 * Reads the device exclusion range from ACPI and initializes the IOMMU with
769 770
 * it
 */
771 772 773 774 775 776 777 778
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
{
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
		return;

	if (iommu) {
779 780 781 782 783
		/*
		 * We only can configure exclusion ranges per IOMMU, not
		 * per device. But we can enable the exclusion range per
		 * device. This is done here
		 */
784
		set_dev_entry_bit(devid, DEV_ENTRY_EX);
785 786 787 788 789
		iommu->exclusion_start = m->range_start;
		iommu->exclusion_length = m->range_length;
	}
}

790 791 792 793
/*
 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 * initializes the hardware and our data structures with it.
 */
794
static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
795 796 797 798
					struct ivhd_header *h)
{
	u8 *p = (u8 *)h;
	u8 *end = p, flags = 0;
799 800
	u16 devid = 0, devid_start = 0, devid_to = 0;
	u32 dev_i, ext_flags = 0;
801
	bool alias = false;
802
	struct ivhd_entry *e;
803 804 805 806 807 808
	int ret;


	ret = add_early_maps();
	if (ret)
		return ret;
809 810

	/*
811
	 * First save the recommended feature enable bits from ACPI
812
	 */
813
	iommu->acpi_flags = h->flags;
814 815 816 817 818 819 820

	/*
	 * Done. Now parse the device entries
	 */
	p += sizeof(struct ivhd_header);
	end += h->length;

821

822 823 824 825
	while (p < end) {
		e = (struct ivhd_entry *)p;
		switch (e->type) {
		case IVHD_DEV_ALL:
826 827 828

			DUMP_printk("  DEV_ALL\t\t\t first devid: %02x:%02x.%x"
				    " last device %02x:%02x.%x flags: %02x\n",
829
				    PCI_BUS_NUM(iommu->first_device),
830 831
				    PCI_SLOT(iommu->first_device),
				    PCI_FUNC(iommu->first_device),
832
				    PCI_BUS_NUM(iommu->last_device),
833 834 835 836
				    PCI_SLOT(iommu->last_device),
				    PCI_FUNC(iommu->last_device),
				    e->flags);

837 838
			for (dev_i = iommu->first_device;
					dev_i <= iommu->last_device; ++dev_i)
839 840
				set_dev_entry_from_acpi(iommu, dev_i,
							e->flags, 0);
841 842
			break;
		case IVHD_DEV_SELECT:
843 844 845

			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
				    "flags: %02x\n",
846
				    PCI_BUS_NUM(e->devid),
847 848 849 850
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags);

851
			devid = e->devid;
852
			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
853 854
			break;
		case IVHD_DEV_SELECT_RANGE_START:
855 856 857

			DUMP_printk("  DEV_SELECT_RANGE_START\t "
				    "devid: %02x:%02x.%x flags: %02x\n",
858
				    PCI_BUS_NUM(e->devid),
859 860 861 862
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags);

863 864 865
			devid_start = e->devid;
			flags = e->flags;
			ext_flags = 0;
866
			alias = false;
867 868
			break;
		case IVHD_DEV_ALIAS:
869 870 871

			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
				    "flags: %02x devid_to: %02x:%02x.%x\n",
872
				    PCI_BUS_NUM(e->devid),
873 874 875
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags,
876
				    PCI_BUS_NUM(e->ext >> 8),
877 878 879
				    PCI_SLOT(e->ext >> 8),
				    PCI_FUNC(e->ext >> 8));

880 881
			devid = e->devid;
			devid_to = e->ext >> 8;
882
			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
883
			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
884 885 886
			amd_iommu_alias_table[devid] = devid_to;
			break;
		case IVHD_DEV_ALIAS_RANGE:
887 888 889 890

			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
				    "devid: %02x:%02x.%x flags: %02x "
				    "devid_to: %02x:%02x.%x\n",
891
				    PCI_BUS_NUM(e->devid),
892 893 894
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags,
895
				    PCI_BUS_NUM(e->ext >> 8),
896 897 898
				    PCI_SLOT(e->ext >> 8),
				    PCI_FUNC(e->ext >> 8));

899 900 901 902
			devid_start = e->devid;
			flags = e->flags;
			devid_to = e->ext >> 8;
			ext_flags = 0;
903
			alias = true;
904 905
			break;
		case IVHD_DEV_EXT_SELECT:
906 907 908

			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
				    "flags: %02x ext: %08x\n",
909
				    PCI_BUS_NUM(e->devid),
910 911 912 913
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags, e->ext);

914
			devid = e->devid;
915 916
			set_dev_entry_from_acpi(iommu, devid, e->flags,
						e->ext);
917 918
			break;
		case IVHD_DEV_EXT_SELECT_RANGE:
919 920 921

			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
				    "%02x:%02x.%x flags: %02x ext: %08x\n",
922
				    PCI_BUS_NUM(e->devid),
923 924 925 926
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid),
				    e->flags, e->ext);

927 928 929
			devid_start = e->devid;
			flags = e->flags;
			ext_flags = e->ext;
930
			alias = false;
931 932
			break;
		case IVHD_DEV_RANGE_END:
933 934

			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
935
				    PCI_BUS_NUM(e->devid),
936 937 938
				    PCI_SLOT(e->devid),
				    PCI_FUNC(e->devid));

939 940
			devid = e->devid;
			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
941
				if (alias) {
942
					amd_iommu_alias_table[dev_i] = devid_to;
943 944 945 946 947
					set_dev_entry_from_acpi(iommu,
						devid_to, flags, ext_flags);
				}
				set_dev_entry_from_acpi(iommu, dev_i,
							flags, ext_flags);
948 949
			}
			break;
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
		case IVHD_DEV_SPECIAL: {
			u8 handle, type;
			const char *var;
			u16 devid;
			int ret;

			handle = e->ext & 0xff;
			devid  = (e->ext >>  8) & 0xffff;
			type   = (e->ext >> 24) & 0xff;

			if (type == IVHD_SPECIAL_IOAPIC)
				var = "IOAPIC";
			else if (type == IVHD_SPECIAL_HPET)
				var = "HPET";
			else
				var = "UNKNOWN";

			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
				    var, (int)handle,
969
				    PCI_BUS_NUM(devid),
970 971 972
				    PCI_SLOT(devid),
				    PCI_FUNC(devid));

973
			ret = add_special_device(type, handle, &devid, false);
974 975
			if (ret)
				return ret;
976 977 978 979 980 981 982 983

			/*
			 * add_special_device might update the devid in case a
			 * command-line override is present. So call
			 * set_dev_entry_from_acpi after add_special_device.
			 */
			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);

984 985
			break;
		}
986 987 988 989
		default:
			break;
		}

990
		p += ivhd_entry_length(p);
991
	}
992 993

	return 0;
994 995
}

996
/* Initializes the device->iommu mapping for the driver */
997 998
static int __init init_iommu_devices(struct amd_iommu *iommu)
{
999
	u32 i;
1000 1001 1002 1003 1004 1005 1006

	for (i = iommu->first_device; i <= iommu->last_device; ++i)
		set_iommu_for_device(iommu, i);

	return 0;
}

1007 1008 1009
static void __init free_iommu_one(struct amd_iommu *iommu)
{
	free_command_buffer(iommu);
1010
	free_event_buffer(iommu);
1011
	free_ppr_log(iommu);
1012 1013 1014 1015 1016 1017 1018
	iommu_unmap_mmio_space(iommu);
}

static void __init free_iommu_all(void)
{
	struct amd_iommu *iommu, *next;

1019
	for_each_iommu_safe(iommu, next) {
1020 1021 1022 1023 1024 1025
		list_del(&iommu->list);
		free_iommu_one(iommu);
		kfree(iommu);
	}
}

1026 1027 1028 1029 1030 1031
/*
 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
 * Workaround:
 *     BIOS should disable L2B micellaneous clock gating by setting
 *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
 */
1032
static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
{
	u32 value;

	if ((boot_cpu_data.x86 != 0x15) ||
	    (boot_cpu_data.x86_model < 0x10) ||
	    (boot_cpu_data.x86_model > 0x1f))
		return;

	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
	pci_read_config_dword(iommu->dev, 0xf4, &value);

	if (value & BIT(2))
		return;

	/* Select NB indirect register 0x90 and enable writing */
	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));

	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
	pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
		dev_name(&iommu->dev->dev));

	/* Clear the enable writing bit */
	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
}

1058 1059 1060 1061 1062
/*
 * This function clues the initialization function for one IOMMU
 * together and also allocates the command buffer and programs the
 * hardware. It does NOT enable the IOMMU. This is done afterwards.
 */
1063 1064
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
1065 1066
	int ret;

1067
	spin_lock_init(&iommu->lock);
1068 1069

	/* Add IOMMU to internal data structures */
1070
	list_add_tail(&iommu->list, &amd_iommu_list);
1071 1072 1073 1074 1075 1076 1077 1078 1079
	iommu->index             = amd_iommus_present++;

	if (unlikely(iommu->index >= MAX_IOMMUS)) {
		WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
		return -ENOSYS;
	}

	/* Index is fine - add IOMMU to the array */
	amd_iommus[iommu->index] = iommu;
1080 1081 1082 1083

	/*
	 * Copy data from ACPI table entry to the iommu struct
	 */
1084
	iommu->devid   = h->devid;
1085
	iommu->cap_ptr = h->cap_ptr;
1086
	iommu->pci_seg = h->pci_seg;
1087
	iommu->mmio_phys = h->mmio_phys;
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099

	/* Check if IVHD EFR contains proper max banks/counters */
	if ((h->efr != 0) &&
	    ((h->efr & (0xF << 13)) != 0) &&
	    ((h->efr & (0x3F << 17)) != 0)) {
		iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
	} else {
		iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
	}

	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
						iommu->mmio_phys_end);
1100 1101 1102
	if (!iommu->mmio_base)
		return -ENOMEM;

1103
	if (alloc_command_buffer(iommu))
1104 1105
		return -ENOMEM;

1106
	if (alloc_event_buffer(iommu))
1107 1108
		return -ENOMEM;

1109 1110
	iommu->int_enabled = false;

1111 1112 1113
	ret = init_iommu_from_acpi(iommu, h);
	if (ret)
		return ret;
1114

1115 1116 1117 1118
	ret = amd_iommu_create_irq_domain(iommu);
	if (ret)
		return ret;

1119 1120 1121 1122 1123 1124
	/*
	 * Make sure IOMMU is not considered to translate itself. The IVRS
	 * table tells us so, but this is a lie!
	 */
	amd_iommu_rlookup_table[iommu->devid] = NULL;

1125 1126
	init_iommu_devices(iommu);

1127
	return 0;
1128 1129
}

1130 1131 1132 1133
/*
 * Iterates over all IOMMU entries in the ACPI table, allocates the
 * IOMMU structure and initializes it with init_iommu_one()
 */
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
static int __init init_iommu_all(struct acpi_table_header *table)
{
	u8 *p = (u8 *)table, *end = (u8 *)table;
	struct ivhd_header *h;
	struct amd_iommu *iommu;
	int ret;

	end += table->length;
	p += IVRS_HEADER_LENGTH;

	while (p < end) {
		h = (struct ivhd_header *)p;
		switch (*p) {
		case ACPI_IVHD_TYPE:
1148

1149
			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1150
				    "seg: %d flags: %01x info %04x\n",
1151
				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1152 1153 1154 1155 1156
				    PCI_FUNC(h->devid), h->cap_ptr,
				    h->pci_seg, h->flags, h->info);
			DUMP_printk("       mmio-addr: %016llx\n",
				    h->mmio_phys);

1157
			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1158 1159
			if (iommu == NULL)
				return -ENOMEM;
1160

1161
			ret = init_iommu_one(iommu, h);
1162 1163
			if (ret)
				return ret;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
			break;
		default:
			break;
		}
		p += h->length;

	}
	WARN_ON(p != end);

	return 0;
}

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201

static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
	u64 val = 0xabcd, val2 = 0;

	if (!iommu_feature(iommu, FEATURE_PC))
		return;

	amd_iommu_pc_present = true;

	/* Check if the performance counters can be written to */
	if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
	    (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
	    (val != val2)) {
		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
		amd_iommu_pc_present = false;
		return;
	}

	pr_info("AMD-Vi: IOMMU performance counters supported\n");

	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
	iommu->max_counters = (u8) ((val >> 7) & 0xf);
}

A
Alex Williamson 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
static ssize_t amd_iommu_show_cap(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	struct amd_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%x\n", iommu->cap);
}
static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);

static ssize_t amd_iommu_show_features(struct device *dev,
				       struct device_attribute *attr,
				       char *buf)
{
	struct amd_iommu *iommu = dev_get_drvdata(dev);
	return sprintf(buf, "%llx\n", iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);

static struct attribute *amd_iommu_attrs[] = {
	&dev_attr_cap.attr,
	&dev_attr_features.attr,
	NULL,
};

static struct attribute_group amd_iommu_group = {
	.name = "amd-iommu",
	.attrs = amd_iommu_attrs,
};

static const struct attribute_group *amd_iommu_groups[] = {
	&amd_iommu_group,
	NULL,
};
1235

1236 1237 1238 1239 1240
static int iommu_init_pci(struct amd_iommu *iommu)
{
	int cap_ptr = iommu->cap_ptr;
	u32 range, misc, low, high;

1241
	iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
					  iommu->devid & 0xff);
	if (!iommu->dev)
		return -ENODEV;

	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
			      &iommu->cap);
	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
			      &range);
	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
			      &misc);

1253
	iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range),
1254
					 MMIO_GET_FD(range));
1255
	iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range),
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
					MMIO_GET_LD(range));

	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
		amd_iommu_iotlb_sup = false;

	/* read extended feature bits */
	low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
	high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);

	iommu->features = ((u64)high << 32) | low;

	if (iommu_feature(iommu, FEATURE_GT)) {
		int glxval;
1269 1270
		u32 max_pasid;
		u64 pasmax;
1271

1272 1273 1274
		pasmax = iommu->features & FEATURE_PASID_MASK;
		pasmax >>= FEATURE_PASID_SHIFT;
		max_pasid  = (1 << (pasmax + 1)) - 1;
1275

1276 1277 1278
		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);

		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294

		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
		glxval >>= FEATURE_GLXVAL_SHIFT;

		if (amd_iommu_max_glx_val == -1)
			amd_iommu_max_glx_val = glxval;
		else
			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
	}

	if (iommu_feature(iommu, FEATURE_GT) &&
	    iommu_feature(iommu, FEATURE_PPR)) {
		iommu->is_iommu_v2   = true;
		amd_iommu_v2_present = true;
	}

1295 1296
	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
		return -ENOMEM;
1297 1298 1299 1300

	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
		amd_iommu_np_cache = true;

1301 1302
	init_iommu_perf_ctr(iommu);

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	if (is_rd890_iommu(iommu->dev)) {
		int i, j;

		iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
				PCI_DEVFN(0, 0));

		/*
		 * Some rd890 systems may not be fully reconfigured by the
		 * BIOS, so it's necessary for us to store this information so
		 * it can be reprogrammed on resume
		 */
		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
				&iommu->stored_addr_lo);
		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
				&iommu->stored_addr_hi);

		/* Low bit locks writes to configuration space */
		iommu->stored_addr_lo &= ~1;

		for (i = 0; i < 6; i++)
			for (j = 0; j < 0x12; j++)
				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);

		for (i = 0; i < 0x83; i++)
			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
	}

1330 1331
	amd_iommu_erratum_746_workaround(iommu);

A
Alex Williamson 已提交
1332 1333 1334 1335
	iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
					       amd_iommu_groups, "ivhd%d",
					       iommu->index);

1336 1337 1338
	return pci_enable_device(iommu->dev);
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static void print_iommu_info(void)
{
	static const char * const feat_str[] = {
		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
		"IA", "GA", "HE", "PC"
	};
	struct amd_iommu *iommu;

	for_each_iommu(iommu) {
		int i;

		pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
			dev_name(&iommu->dev->dev), iommu->cap_ptr);

		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
			pr_info("AMD-Vi:  Extended features: ");
1355
			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1356 1357 1358
				if (iommu_feature(iommu, (1ULL << i)))
					pr_cont(" %s", feat_str[i]);
			}
1359
			pr_cont("\n");
1360
		}
1361
	}
1362 1363
	if (irq_remapping_enabled)
		pr_info("AMD-Vi: Interrupt remapping enabled\n");
1364 1365
}

1366
static int __init amd_iommu_init_pci(void)
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
{
	struct amd_iommu *iommu;
	int ret = 0;

	for_each_iommu(iommu) {
		ret = iommu_init_pci(iommu);
		if (ret)
			break;
	}

1377 1378 1379 1380 1381
	init_device_table_dma();

	for_each_iommu(iommu)
		iommu_flush_all_caches(iommu);

1382
	ret = amd_iommu_init_api();
1383

1384 1385
	if (!ret)
		print_iommu_info();
1386

1387 1388 1389
	return ret;
}

1390 1391 1392
/****************************************************************************
 *
 * The following functions initialize the MSI interrupts for all IOMMUs
F
Frank Arnold 已提交
1393
 * in the system. It's a bit challenging because there could be multiple
1394 1395 1396 1397 1398
 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
 * pci_dev.
 *
 ****************************************************************************/

1399
static int iommu_setup_msi(struct amd_iommu *iommu)
1400 1401 1402
{
	int r;

1403 1404 1405
	r = pci_enable_msi(iommu->dev);
	if (r)
		return r;
1406

1407 1408 1409 1410
	r = request_threaded_irq(iommu->dev->irq,
				 amd_iommu_int_handler,
				 amd_iommu_int_thread,
				 0, "AMD-Vi",
1411
				 iommu);
1412 1413 1414

	if (r) {
		pci_disable_msi(iommu->dev);
1415
		return r;
1416 1417
	}

1418
	iommu->int_enabled = true;
1419

1420 1421 1422
	return 0;
}

1423
static int iommu_init_msi(struct amd_iommu *iommu)
1424
{
1425 1426
	int ret;

1427
	if (iommu->int_enabled)
1428
		goto enable_faults;
1429

1430
	if (iommu->dev->msi_cap)
1431 1432 1433 1434 1435 1436
		ret = iommu_setup_msi(iommu);
	else
		ret = -ENODEV;

	if (ret)
		return ret;
1437

1438 1439
enable_faults:
	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1440

1441 1442 1443 1444
	if (iommu->ppr_log != NULL)
		iommu_feature_enable(iommu, CONTROL_PPFINT_EN);

	return 0;
1445 1446
}

1447 1448 1449 1450
/****************************************************************************
 *
 * The next functions belong to the third pass of parsing the ACPI
 * table. In this last pass the memory mapping requirements are
F
Frank Arnold 已提交
1451
 * gathered (like exclusion and unity mapping ranges).
1452 1453 1454
 *
 ****************************************************************************/

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
static void __init free_unity_maps(void)
{
	struct unity_map_entry *entry, *next;

	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
		list_del(&entry->list);
		kfree(entry);
	}
}

1465
/* called when we find an exclusion range definition in ACPI */
1466 1467 1468 1469 1470 1471 1472 1473 1474
static int __init init_exclusion_range(struct ivmd_header *m)
{
	int i;

	switch (m->type) {
	case ACPI_IVMD_TYPE:
		set_device_exclusion_range(m->devid, m);
		break;
	case ACPI_IVMD_TYPE_ALL:
1475
		for (i = 0; i <= amd_iommu_last_bdf; ++i)
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
			set_device_exclusion_range(i, m);
		break;
	case ACPI_IVMD_TYPE_RANGE:
		for (i = m->devid; i <= m->aux; ++i)
			set_device_exclusion_range(i, m);
		break;
	default:
		break;
	}

	return 0;
}

1489
/* called for unity map ACPI definition */
1490 1491
static int __init init_unity_map_range(struct ivmd_header *m)
{
J
Joerg Roedel 已提交
1492
	struct unity_map_entry *e = NULL;
1493
	char *s;
1494 1495 1496 1497 1498 1499 1500

	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL)
		return -ENOMEM;

	switch (m->type) {
	default:
1501 1502
		kfree(e);
		return 0;
1503
	case ACPI_IVMD_TYPE:
1504
		s = "IVMD_TYPEi\t\t\t";
1505 1506 1507
		e->devid_start = e->devid_end = m->devid;
		break;
	case ACPI_IVMD_TYPE_ALL:
1508
		s = "IVMD_TYPE_ALL\t\t";
1509 1510 1511 1512
		e->devid_start = 0;
		e->devid_end = amd_iommu_last_bdf;
		break;
	case ACPI_IVMD_TYPE_RANGE:
1513
		s = "IVMD_TYPE_RANGE\t\t";
1514 1515 1516 1517 1518 1519 1520 1521
		e->devid_start = m->devid;
		e->devid_end = m->aux;
		break;
	}
	e->address_start = PAGE_ALIGN(m->range_start);
	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
	e->prot = m->flags >> 1;

1522 1523
	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
1524 1525
		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1526 1527 1528
		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
		    e->address_start, e->address_end, m->flags);

1529 1530 1531 1532 1533
	list_add_tail(&e->list, &amd_iommu_unity_map);

	return 0;
}

1534
/* iterates over all memory definitions we find in the ACPI table */
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
static int __init init_memory_definitions(struct acpi_table_header *table)
{
	u8 *p = (u8 *)table, *end = (u8 *)table;
	struct ivmd_header *m;

	end += table->length;
	p += IVRS_HEADER_LENGTH;

	while (p < end) {
		m = (struct ivmd_header *)p;
		if (m->flags & IVMD_FLAG_EXCL_RANGE)
			init_exclusion_range(m);
		else if (m->flags & IVMD_FLAG_UNITY_MAP)
			init_unity_map_range(m);

		p += m->length;
	}

	return 0;
}

1556 1557 1558 1559
/*
 * Init the device table to not allow DMA access for devices and
 * suppress all page faults
 */
1560
static void init_device_table_dma(void)
1561
{
1562
	u32 devid;
1563 1564 1565 1566 1567 1568 1569

	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
	}
}

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
static void __init uninit_device_table_dma(void)
{
	u32 devid;

	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
		amd_iommu_dev_table[devid].data[0] = 0ULL;
		amd_iommu_dev_table[devid].data[1] = 0ULL;
	}
}

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
static void init_device_table(void)
{
	u32 devid;

	if (!amd_iommu_irq_remap)
		return;

	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
}

1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
static void iommu_init_flags(struct amd_iommu *iommu)
{
	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);

	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);

	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);

	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
		iommu_feature_disable(iommu, CONTROL_ISOC_EN);

	/*
	 * make IOMMU memory accesses cache coherent
	 */
	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1613 1614 1615

	/* Set IOTLB invalidation timeout to 1s */
	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1616 1617
}

1618
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1619
{
1620 1621
	int i, j;
	u32 ioc_feature_control;
1622
	struct pci_dev *pdev = iommu->root_pdev;
1623 1624

	/* RD890 BIOSes may not have completely reconfigured the iommu */
1625
	if (!is_rd890_iommu(iommu->dev) || !pdev)
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
		return;

	/*
	 * First, we need to ensure that the iommu is enabled. This is
	 * controlled by a register in the northbridge
	 */

	/* Select Northbridge indirect register 0x75 and enable writing */
	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);

	/* Enable the iommu */
	if (!(ioc_feature_control & 0x1))
		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);

	/* Restore the iommu BAR */
	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
			       iommu->stored_addr_lo);
	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
			       iommu->stored_addr_hi);

	/* Restore the l1 indirect regs for each of the 6 l1s */
	for (i = 0; i < 6; i++)
		for (j = 0; j < 0x12; j++)
			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);

	/* Restore the l2 indirect regs */
	for (i = 0; i < 0x83; i++)
		iommu_write_l2(iommu, i, iommu->stored_l2[i]);

	/* Lock PCI setup registers */
	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
			       iommu->stored_addr_lo | 1);
1659 1660
}

1661 1662 1663 1664
/*
 * This function finally enables all IOMMUs found in the system after
 * they have been initialized
 */
1665
static void early_enable_iommus(void)
1666 1667 1668
{
	struct amd_iommu *iommu;

1669
	for_each_iommu(iommu) {
1670
		iommu_disable(iommu);
1671
		iommu_init_flags(iommu);
1672 1673 1674
		iommu_set_device_table(iommu);
		iommu_enable_command_buffer(iommu);
		iommu_enable_event_buffer(iommu);
1675 1676
		iommu_set_exclusion_range(iommu);
		iommu_enable(iommu);
1677
		iommu_flush_all_caches(iommu);
1678 1679 1680
	}
}

1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
static void enable_iommus_v2(void)
{
	struct amd_iommu *iommu;

	for_each_iommu(iommu) {
		iommu_enable_ppr_log(iommu);
		iommu_enable_gt(iommu);
	}
}

static void enable_iommus(void)
{
	early_enable_iommus();

	enable_iommus_v2();
}

1698 1699 1700 1701 1702 1703 1704 1705
static void disable_iommus(void)
{
	struct amd_iommu *iommu;

	for_each_iommu(iommu)
		iommu_disable(iommu);
}

1706 1707 1708 1709 1710
/*
 * Suspend/Resume support
 * disable suspend until real resume implemented
 */

1711
static void amd_iommu_resume(void)
1712
{
1713 1714 1715 1716 1717
	struct amd_iommu *iommu;

	for_each_iommu(iommu)
		iommu_apply_resume_quirks(iommu);

1718 1719
	/* re-load the hardware */
	enable_iommus();
1720 1721

	amd_iommu_enable_interrupts();
1722 1723
}

1724
static int amd_iommu_suspend(void)
1725
{
1726 1727 1728 1729
	/* disable IOMMUs to go out of the way for BIOS */
	disable_iommus();

	return 0;
1730 1731
}

1732
static struct syscore_ops amd_iommu_syscore_ops = {
1733 1734 1735 1736
	.suspend = amd_iommu_suspend,
	.resume = amd_iommu_resume,
};

1737 1738
static void __init free_on_init_error(void)
{
1739 1740
	free_pages((unsigned long)irq_lookup_table,
		   get_order(rlookup_table_size));
1741

1742 1743
	kmem_cache_destroy(amd_iommu_irq_cache);
	amd_iommu_irq_cache = NULL;
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765

	free_pages((unsigned long)amd_iommu_rlookup_table,
		   get_order(rlookup_table_size));

	free_pages((unsigned long)amd_iommu_alias_table,
		   get_order(alias_table_size));

	free_pages((unsigned long)amd_iommu_dev_table,
		   get_order(dev_table_size));

	free_iommu_all();

#ifdef CONFIG_GART_IOMMU
	/*
	 * We failed to initialize the AMD IOMMU - try fallback to GART
	 * if possible.
	 */
	gart_iommu_init();

#endif
}

1766 1767 1768
/* SB IOAPIC is always on this device in AMD systems */
#define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))

1769 1770
static bool __init check_ioapic_information(void)
{
1771
	const char *fw_bug = FW_BUG;
1772
	bool ret, has_sb_ioapic;
1773 1774
	int idx;

1775 1776
	has_sb_ioapic = false;
	ret           = false;
1777

1778 1779 1780 1781 1782 1783 1784 1785
	/*
	 * If we have map overrides on the kernel command line the
	 * messages in this function might not describe firmware bugs
	 * anymore - so be careful
	 */
	if (cmdline_maps)
		fw_bug = "";

1786 1787 1788 1789 1790
	for (idx = 0; idx < nr_ioapics; idx++) {
		int devid, id = mpc_ioapic_id(idx);

		devid = get_ioapic_devid(id);
		if (devid < 0) {
1791 1792
			pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
				fw_bug, id);
1793 1794 1795 1796
			ret = false;
		} else if (devid == IOAPIC_SB_DEVID) {
			has_sb_ioapic = true;
			ret           = true;
1797 1798 1799
		}
	}

1800 1801 1802 1803 1804 1805 1806 1807 1808
	if (!has_sb_ioapic) {
		/*
		 * We expect the SB IOAPIC to be listed in the IVRS
		 * table. The system timer is connected to the SB IOAPIC
		 * and if we don't have it in the list the system will
		 * panic at boot time.  This situation usually happens
		 * when the BIOS is buggy and provides us the wrong
		 * device id for the IOAPIC in the system.
		 */
1809
		pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
1810 1811 1812
	}

	if (!ret)
1813
		pr_err("AMD-Vi: Disabling interrupt remapping\n");
1814 1815

	return ret;
1816 1817
}

1818 1819 1820 1821 1822 1823 1824 1825
static void __init free_dma_resources(void)
{
	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
		   get_order(MAX_DOMAIN_ID/8));

	free_unity_maps();
}

1826
/*
1827 1828 1829
 * This is the hardware init function for AMD IOMMU in the system.
 * This function is called either from amd_iommu_init or from the interrupt
 * remapping setup code.
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
 *
 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
 * three times:
 *
 *	1 pass) Find the highest PCI device id the driver has to handle.
 *		Upon this information the size of the data structures is
 *		determined that needs to be allocated.
 *
 *	2 pass) Initialize the data structures just allocated with the
 *		information in the ACPI table about available AMD IOMMUs
 *		in the system. It also maps the PCI devices in the
 *		system to specific IOMMUs
 *
 *	3 pass) After the basic data structures are allocated and
 *		initialized we update them with information about memory
 *		remapping requirements parsed out of the ACPI table in
 *		this last pass.
 *
1848 1849
 * After everything is set up the IOMMUs are enabled and the necessary
 * hotplug and suspend notifiers are registered.
1850
 */
1851
static int __init early_amd_iommu_init(void)
1852
{
1853 1854 1855
	struct acpi_table_header *ivrs_base;
	acpi_size ivrs_size;
	acpi_status status;
1856 1857
	int i, ret = 0;

1858
	if (!amd_iommu_detected)
1859 1860
		return -ENODEV;

1861 1862 1863 1864 1865 1866 1867 1868 1869
	status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
	if (status == AE_NOT_FOUND)
		return -ENODEV;
	else if (ACPI_FAILURE(status)) {
		const char *err = acpi_format_exception(status);
		pr_err("AMD-Vi: IVRS table error: %s\n", err);
		return -EINVAL;
	}

1870 1871 1872 1873 1874
	/*
	 * First parse ACPI tables to find the largest Bus/Dev/Func
	 * we need to handle. Upon this information the shared data
	 * structures for the IOMMUs in the system will be allocated
	 */
1875 1876
	ret = find_last_devid_acpi(ivrs_base);
	if (ret)
1877 1878
		goto out;

1879 1880 1881
	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1882 1883

	/* Device table - directly used by all IOMMUs */
1884
	ret = -ENOMEM;
1885
	amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
				      get_order(dev_table_size));
	if (amd_iommu_dev_table == NULL)
		goto out;

	/*
	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
	 * IOMMU see for that device
	 */
	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
			get_order(alias_table_size));
	if (amd_iommu_alias_table == NULL)
1897
		goto out;
1898 1899

	/* IOMMU rlookup table - find the IOMMU for a specific device */
1900 1901
	amd_iommu_rlookup_table = (void *)__get_free_pages(
			GFP_KERNEL | __GFP_ZERO,
1902 1903
			get_order(rlookup_table_size));
	if (amd_iommu_rlookup_table == NULL)
1904
		goto out;
1905

1906 1907
	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
					    GFP_KERNEL | __GFP_ZERO,
1908 1909
					    get_order(MAX_DOMAIN_ID/8));
	if (amd_iommu_pd_alloc_bitmap == NULL)
1910
		goto out;
1911 1912

	/*
1913
	 * let all alias entries point to itself
1914
	 */
1915
	for (i = 0; i <= amd_iommu_last_bdf; ++i)
1916 1917 1918 1919 1920 1921 1922 1923
		amd_iommu_alias_table[i] = i;

	/*
	 * never allocate domain 0 because its used as the non-allocated and
	 * error value placeholder
	 */
	amd_iommu_pd_alloc_bitmap[0] = 1;

1924 1925
	spin_lock_init(&amd_iommu_pd_lock);

1926 1927 1928 1929
	/*
	 * now the data structures are allocated and basically initialized
	 * start the real acpi table scan
	 */
1930 1931
	ret = init_iommu_all(ivrs_base);
	if (ret)
1932
		goto out;
1933

1934 1935 1936
	if (amd_iommu_irq_remap)
		amd_iommu_irq_remap = check_ioapic_information();

1937 1938 1939 1940 1941
	if (amd_iommu_irq_remap) {
		/*
		 * Interrupt remapping enabled, create kmem_cache for the
		 * remapping tables.
		 */
1942
		ret = -ENOMEM;
1943 1944 1945 1946 1947 1948
		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
				MAX_IRQS_PER_TABLE * sizeof(u32),
				IRQ_TABLE_ALIGNMENT,
				0, NULL);
		if (!amd_iommu_irq_cache)
			goto out;
1949 1950 1951 1952 1953 1954

		irq_lookup_table = (void *)__get_free_pages(
				GFP_KERNEL | __GFP_ZERO,
				get_order(rlookup_table_size));
		if (!irq_lookup_table)
			goto out;
1955 1956
	}

1957 1958
	ret = init_memory_definitions(ivrs_base);
	if (ret)
1959
		goto out;
1960

1961 1962 1963
	/* init the device table */
	init_device_table();

1964
out:
1965 1966 1967 1968
	/* Don't leak any ACPI memory */
	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
	ivrs_base = NULL;

1969 1970 1971
	return ret;
}

1972
static int amd_iommu_enable_interrupts(void)
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
{
	struct amd_iommu *iommu;
	int ret = 0;

	for_each_iommu(iommu) {
		ret = iommu_init_msi(iommu);
		if (ret)
			goto out;
	}

out:
	return ret;
}

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
static bool detect_ivrs(void)
{
	struct acpi_table_header *ivrs_base;
	acpi_size ivrs_size;
	acpi_status status;

	status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
	if (status == AE_NOT_FOUND)
		return false;
	else if (ACPI_FAILURE(status)) {
		const char *err = acpi_format_exception(status);
		pr_err("AMD-Vi: IVRS table error: %s\n", err);
		return false;
	}

	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);

2004 2005 2006
	/* Make sure ACS will be enabled during PCI probe */
	pci_request_acs();

2007 2008 2009
	return true;
}

2010
/****************************************************************************
2011
 *
2012 2013 2014 2015 2016
 * AMD IOMMU Initialization State Machine
 *
 ****************************************************************************/

static int __init state_next(void)
2017 2018 2019
{
	int ret = 0;

2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
	switch (init_state) {
	case IOMMU_START_STATE:
		if (!detect_ivrs()) {
			init_state	= IOMMU_NOT_FOUND;
			ret		= -ENODEV;
		} else {
			init_state	= IOMMU_IVRS_DETECTED;
		}
		break;
	case IOMMU_IVRS_DETECTED:
		ret = early_amd_iommu_init();
		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
		break;
	case IOMMU_ACPI_FINISHED:
		early_enable_iommus();
		register_syscore_ops(&amd_iommu_syscore_ops);
		x86_platform.iommu_shutdown = disable_iommus;
		init_state = IOMMU_ENABLED;
		break;
	case IOMMU_ENABLED:
		ret = amd_iommu_init_pci();
		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
		enable_iommus_v2();
		break;
	case IOMMU_PCI_INIT:
		ret = amd_iommu_enable_interrupts();
		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
		break;
	case IOMMU_INTERRUPTS_EN:
2049
		ret = amd_iommu_init_dma_ops();
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
		break;
	case IOMMU_DMA_OPS:
		init_state = IOMMU_INITIALIZED;
		break;
	case IOMMU_INITIALIZED:
		/* Nothing to do */
		break;
	case IOMMU_NOT_FOUND:
	case IOMMU_INIT_ERROR:
		/* Error states => do nothing */
		ret = -EINVAL;
		break;
	default:
		/* Unknown state */
		BUG();
	}
2067

2068 2069
	return ret;
}
2070

2071 2072 2073
static int __init iommu_go_to_state(enum iommu_init_state state)
{
	int ret = 0;
2074

2075 2076 2077 2078 2079 2080
	while (init_state != state) {
		ret = state_next();
		if (init_state == IOMMU_NOT_FOUND ||
		    init_state == IOMMU_INIT_ERROR)
			break;
	}
2081

2082
	return ret;
2083
}
2084

2085 2086 2087
#ifdef CONFIG_IRQ_REMAP
int __init amd_iommu_prepare(void)
{
2088 2089
	int ret;

2090
	amd_iommu_irq_remap = true;
2091

2092 2093 2094 2095
	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
	if (ret)
		return ret;
	return amd_iommu_irq_remap ? 0 : -ENODEV;
2096
}
2097

2098 2099 2100 2101 2102 2103 2104
int __init amd_iommu_enable(void)
{
	int ret;

	ret = iommu_go_to_state(IOMMU_ENABLED);
	if (ret)
		return ret;
2105

2106
	irq_remapping_enabled = 1;
2107

2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
	return 0;
}

void amd_iommu_disable(void)
{
	amd_iommu_suspend();
}

int amd_iommu_reenable(int mode)
{
	amd_iommu_resume();

	return 0;
}
2122

2123 2124 2125 2126 2127 2128
int __init amd_iommu_enable_faulting(void)
{
	/* We enable MSI later when PCI is initialized */
	return 0;
}
#endif
2129

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
/*
 * This is the core init function for AMD IOMMU hardware in the system.
 * This function is called from the generic x86 DMA layer initialization
 * code.
 */
static int __init amd_iommu_init(void)
{
	int ret;

	ret = iommu_go_to_state(IOMMU_INITIALIZED);
	if (ret) {
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
		free_dma_resources();
		if (!irq_remapping_enabled) {
			disable_iommus();
			free_on_init_error();
		} else {
			struct amd_iommu *iommu;

			uninit_device_table_dma();
			for_each_iommu(iommu)
				iommu_flush_all_caches(iommu);
		}
2152 2153 2154
	}

	return ret;
2155 2156
}

2157 2158 2159 2160 2161 2162 2163
/****************************************************************************
 *
 * Early detect code. This code runs at IOMMU detection time in the DMA
 * layer. It just looks if there is an IVRS ACPI table to detect AMD
 * IOMMUs
 *
 ****************************************************************************/
2164
int __init amd_iommu_detect(void)
2165
{
2166
	int ret;
2167

2168
	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2169
		return -ENODEV;
2170

2171
	if (amd_iommu_disabled)
2172
		return -ENODEV;
2173

2174 2175 2176
	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
	if (ret)
		return ret;
2177

2178 2179 2180 2181
	amd_iommu_detected = true;
	iommu_detected = 1;
	x86_init.iommu.iommu_init = amd_iommu_init;

2182
	return 1;
2183 2184
}

2185 2186 2187 2188 2189 2190 2191
/****************************************************************************
 *
 * Parsing functions for the AMD IOMMU specific kernel command line
 * options.
 *
 ****************************************************************************/

2192 2193 2194 2195 2196 2197 2198
static int __init parse_amd_iommu_dump(char *str)
{
	amd_iommu_dump = true;

	return 1;
}

2199 2200 2201
static int __init parse_amd_iommu_options(char *str)
{
	for (; *str; ++str) {
2202
		if (strncmp(str, "fullflush", 9) == 0)
2203
			amd_iommu_unmap_flush = true;
2204 2205
		if (strncmp(str, "off", 3) == 0)
			amd_iommu_disabled = true;
2206 2207
		if (strncmp(str, "force_isolation", 15) == 0)
			amd_iommu_force_isolation = true;
2208 2209 2210 2211 2212
	}

	return 1;
}

2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
static int __init parse_ivrs_ioapic(char *str)
{
	unsigned int bus, dev, fn;
	int ret, id, i;
	u16 devid;

	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);

	if (ret != 4) {
		pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
		return 1;
	}

	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
		pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
			str);
		return 1;
	}

	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);

2234
	cmdline_maps			= true;
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
	i				= early_ioapic_map_size++;
	early_ioapic_map[i].id		= id;
	early_ioapic_map[i].devid	= devid;
	early_ioapic_map[i].cmd_line	= true;

	return 1;
}

static int __init parse_ivrs_hpet(char *str)
{
	unsigned int bus, dev, fn;
	int ret, id, i;
	u16 devid;

	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);

	if (ret != 4) {
		pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
		return 1;
	}

	if (early_hpet_map_size == EARLY_MAP_SIZE) {
		pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
			str);
		return 1;
	}

	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);

2264
	cmdline_maps			= true;
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
	i				= early_hpet_map_size++;
	early_hpet_map[i].id		= id;
	early_hpet_map[i].devid		= devid;
	early_hpet_map[i].cmd_line	= true;

	return 1;
}

__setup("amd_iommu_dump",	parse_amd_iommu_dump);
__setup("amd_iommu=",		parse_amd_iommu_options);
__setup("ivrs_ioapic",		parse_ivrs_ioapic);
__setup("ivrs_hpet",		parse_ivrs_hpet);
2277 2278 2279

IOMMU_INIT_FINISH(amd_iommu_detect,
		  gart_iommu_hole_init,
J
Joerg Roedel 已提交
2280 2281
		  NULL,
		  NULL);
2282 2283 2284 2285 2286 2287

bool amd_iommu_v2_supported(void)
{
	return amd_iommu_v2_present;
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368

/****************************************************************************
 *
 * IOMMU EFR Performance Counter support functionality. This code allows
 * access to the IOMMU PC functionality.
 *
 ****************************************************************************/

u8 amd_iommu_pc_get_max_banks(u16 devid)
{
	struct amd_iommu *iommu;
	u8 ret = 0;

	/* locate the iommu governing the devid */
	iommu = amd_iommu_rlookup_table[devid];
	if (iommu)
		ret = iommu->max_banks;

	return ret;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);

bool amd_iommu_pc_supported(void)
{
	return amd_iommu_pc_present;
}
EXPORT_SYMBOL(amd_iommu_pc_supported);

u8 amd_iommu_pc_get_max_counters(u16 devid)
{
	struct amd_iommu *iommu;
	u8 ret = 0;

	/* locate the iommu governing the devid */
	iommu = amd_iommu_rlookup_table[devid];
	if (iommu)
		ret = iommu->max_counters;

	return ret;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);

int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
				    u64 *value, bool is_write)
{
	struct amd_iommu *iommu;
	u32 offset;
	u32 max_offset_lim;

	/* Make sure the IOMMU PC resource is available */
	if (!amd_iommu_pc_present)
		return -ENODEV;

	/* Locate the iommu associated with the device ID */
	iommu = amd_iommu_rlookup_table[devid];

	/* Check for valid iommu and pc register indexing */
	if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
		return -ENODEV;

	offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);

	/* Limit the offset to the hw defined mmio region aperture */
	max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
				(iommu->max_counters << 8) | 0x28);
	if ((offset < MMIO_CNTR_REG_OFFSET) ||
	    (offset > max_offset_lim))
		return -EINVAL;

	if (is_write) {
		writel((u32)*value, iommu->mmio_base + offset);
		writel((*value >> 32), iommu->mmio_base + offset + 4);
	} else {
		*value = readl(iommu->mmio_base + offset + 4);
		*value <<= 32;
		*value = readl(iommu->mmio_base + offset);
	}

	return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);