mmconfig-shared.c 19.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6
/*
 * mmconfig-shared.c - Low-level direct PCI config space access via
 *                     MMCONFIG - common code between i386 and x86-64.
 *
 * This code does:
7
 * - known chipset handling
8 9 10 11 12 13 14 15
 * - ACPI decoding and validation
 *
 * Per-architecture code takes care of the mappings and accesses
 * themselves.
 */

#include <linux/pci.h>
#include <linux/init.h>
F
Feng Tang 已提交
16
#include <linux/sfi_acpi.h>
17
#include <linux/bitmap.h>
18
#include <linux/dmi.h>
19
#include <linux/slab.h>
20 21
#include <linux/mutex.h>
#include <linux/rculist.h>
22
#include <asm/e820/api.h>
23
#include <asm/pci_x86.h>
F
Feng Tang 已提交
24
#include <asm/acpi.h>
25

26
#define PREFIX "PCI: "
27

28
/* Indicate if the mmcfg resources have been placed into the resource table. */
29
static bool pci_mmcfg_running_state;
30
static bool pci_mmcfg_arch_init_failed;
31
static DEFINE_MUTEX(pci_mmcfg_lock);
32

33 34
LIST_HEAD(pci_mmcfg_list);

35
static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
36 37 38 39 40 41 42
{
	if (cfg->res.parent)
		release_resource(&cfg->res);
	list_del(&cfg->list);
	kfree(cfg);
}

43
static void __init free_all_mmcfg(void)
44
{
45
	struct pci_mmcfg_region *cfg, *tmp;
46

47
	pci_mmcfg_arch_free();
48 49
	list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
		pci_mmconfig_remove(cfg);
50 51
}

52
static void list_add_sorted(struct pci_mmcfg_region *new)
53 54 55 56
{
	struct pci_mmcfg_region *cfg;

	/* keep list sorted by segment and starting bus number */
57
	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
58 59 60
		if (cfg->segment > new->segment ||
		    (cfg->segment == new->segment &&
		     cfg->start_bus >= new->start_bus)) {
61
			list_add_tail_rcu(&new->list, &cfg->list);
62 63 64
			return;
		}
	}
65
	list_add_tail_rcu(&new->list, &pci_mmcfg_list);
66 67
}

68 69
static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
						   int end, u64 addr)
70
{
71
	struct pci_mmcfg_region *new;
72
	struct resource *res;
73

74 75 76
	if (addr == 0)
		return NULL;

77
	new = kzalloc(sizeof(*new), GFP_KERNEL);
78
	if (!new)
79
		return NULL;
80

81 82 83 84
	new->address = addr;
	new->segment = segment;
	new->start_bus = start;
	new->end_bus = end;
85

86 87
	res = &new->res;
	res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
88
	res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
89 90 91 92 93
	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
	snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
		 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
	res->name = new->name;

94
	return new;
95 96
}

97
static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
98 99 100 101 102
							int end, u64 addr)
{
	struct pci_mmcfg_region *new;

	new = pci_mmconfig_alloc(segment, start, end, addr);
103 104
	if (new) {
		mutex_lock(&pci_mmcfg_lock);
105
		list_add_sorted(new);
106
		mutex_unlock(&pci_mmcfg_lock);
107

108
		pr_info(PREFIX
109 110 111
		       "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
		       "(base %#lx)\n",
		       segment, start, end, &new->res, (unsigned long)addr);
112
	}
113 114 115 116

	return new;
}

117 118 119 120
struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
{
	struct pci_mmcfg_region *cfg;

121
	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
122 123 124 125 126 127 128
		if (cfg->segment == segment &&
		    cfg->start_bus <= bus && bus <= cfg->end_bus)
			return cfg;

	return NULL;
}

129
static const char *__init pci_mmcfg_e7520(void)
130 131
{
	u32 win;
132
	raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
133

134
	win = win & 0xf000;
135 136 137
	if (win == 0x0000 || win == 0xf000)
		return NULL;

138
	if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
139 140
		return NULL;

141 142 143
	return "Intel Corporation E7520 Memory Controller Hub";
}

144
static const char *__init pci_mmcfg_intel_945(void)
145 146 147
{
	u32 pciexbar, mask = 0, len = 0;

148
	raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
149 150 151

	/* Enable bit */
	if (!(pciexbar & 1))
152
		return NULL;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/* Size bits */
	switch ((pciexbar >> 1) & 3) {
	case 0:
		mask = 0xf0000000U;
		len  = 0x10000000U;
		break;
	case 1:
		mask = 0xf8000000U;
		len  = 0x08000000U;
		break;
	case 2:
		mask = 0xfc000000U;
		len  = 0x04000000U;
		break;
	default:
169
		return NULL;
170 171 172 173 174 175
	}

	/* Errata #2, things break when not aligned on a 256Mb boundary */
	/* Can only happen in 64M/128M mode */

	if ((pciexbar & mask) & 0x0fffffffU)
176
		return NULL;
177

178 179
	/* Don't hit the APIC registers and their friends */
	if ((pciexbar & mask) >= 0xf0000000U)
180 181
		return NULL;

182
	if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
183 184
		return NULL;

185 186 187
	return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
}

188
static const char *__init pci_mmcfg_amd_fam10h(void)
189 190 191 192
{
	u32 low, high, address;
	u64 base, msr;
	int i;
193
	unsigned segnbits = 0, busnbits, end_bus;
194

195 196 197
	if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
		return NULL;

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	address = MSR_FAM10H_MMIO_CONF_BASE;
	if (rdmsr_safe(address, &low, &high))
		return NULL;

	msr = high;
	msr <<= 32;
	msr |= low;

	/* mmconfig is not enable */
	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
		return NULL;

	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);

	busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
			 FAM10H_MMIO_CONF_BUSRANGE_MASK;

	/*
	 * only handle bus 0 ?
	 * need to skip it
	 */
	if (!busnbits)
		return NULL;

	if (busnbits > 8) {
		segnbits = busnbits - 8;
		busnbits = 8;
	}

227
	end_bus = (1 << busnbits) - 1;
228
	for (i = 0; i < (1 << segnbits); i++)
229 230 231 232 233
		if (pci_mmconfig_add(i, 0, end_bus,
				     base + (1<<28) * i) == NULL) {
			free_all_mmcfg();
			return NULL;
		}
234 235 236 237

	return "AMD Family 10h NB";
}

238
static bool __initdata mcp55_checked;
239
static const char *__init pci_mmcfg_nvidia_mcp55(void)
240 241 242 243
{
	int bus;
	int mcp55_mmconf_found = 0;

244 245 246 247 248 249 250 251 252 253 254 255 256 257
	static const u32 extcfg_regnum __initconst	= 0x90;
	static const u32 extcfg_regsize __initconst	= 4;
	static const u32 extcfg_enable_mask __initconst	= 1 << 31;
	static const u32 extcfg_start_mask __initconst	= 0xff << 16;
	static const int extcfg_start_shift __initconst	= 16;
	static const u32 extcfg_size_mask __initconst	= 0x3 << 28;
	static const int extcfg_size_shift __initconst	= 28;
	static const int extcfg_sizebus[] __initconst	= {
		0x100, 0x80, 0x40, 0x20
	};
	static const u32 extcfg_base_mask[] __initconst	= {
		0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
	};
	static const int extcfg_base_lshift __initconst	= 25;
258 259 260 261

	/*
	 * do check if amd fam10h already took over
	 */
262
	if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
		return NULL;

	mcp55_checked = true;
	for (bus = 0; bus < 256; bus++) {
		u64 base;
		u32 l, extcfg;
		u16 vendor, device;
		int start, size_index, end;

		raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
		vendor = l & 0xffff;
		device = (l >> 16) & 0xffff;

		if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
			continue;

		raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
				  extcfg_regsize, &extcfg);

		if (!(extcfg & extcfg_enable_mask))
			continue;

		size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
		base = extcfg & extcfg_base_mask[size_index];
		/* base could > 4G */
		base <<= extcfg_base_lshift;
		start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
		end = start + extcfg_sizebus[size_index] - 1;
291 292
		if (pci_mmconfig_add(0, start, end, base) == NULL)
			continue;
293 294 295 296 297 298 299 300 301
		mcp55_mmconf_found++;
	}

	if (!mcp55_mmconf_found)
		return NULL;

	return "nVidia MCP55";
}

302
struct pci_mmcfg_hostbridge_probe {
303 304
	u32 bus;
	u32 devfn;
305 306 307 308 309
	u32 vendor;
	u32 device;
	const char *(*probe)(void);
};

310
static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
311 312 313 314 315 316 317 318
	{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
	  PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
	{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
	  PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
	{ 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
	  0x1200, pci_mmcfg_amd_fam10h },
	{ 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
	  0x1200, pci_mmcfg_amd_fam10h },
319 320
	{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
	  0x0369, pci_mmcfg_nvidia_mcp55 },
321 322
};

323 324
static void __init pci_mmcfg_check_end_bus_number(void)
{
325
	struct pci_mmcfg_region *cfg, *cfgx;
326

327
	/* Fixup overlaps */
328
	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
329 330
		if (cfg->end_bus < cfg->start_bus)
			cfg->end_bus = 255;
331

332 333 334 335
		/* Don't access the list head ! */
		if (cfg->list.next == &pci_mmcfg_list)
			break;

336
		cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
337
		if (cfg->end_bus >= cfgx->start_bus)
338
			cfg->end_bus = cfgx->start_bus - 1;
339 340 341
	}
}

342 343 344
static int __init pci_mmcfg_check_hostbridge(void)
{
	u32 l;
345
	u32 bus, devfn;
346 347 348 349
	u16 vendor, device;
	int i;
	const char *name;

350 351 352
	if (!raw_pci_ops)
		return 0;

353
	free_all_mmcfg();
354

355
	for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
356 357
		bus =  pci_mmcfg_probes[i].bus;
		devfn = pci_mmcfg_probes[i].devfn;
358
		raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
359 360 361
		vendor = l & 0xffff;
		device = (l >> 16) & 0xffff;

362
		name = NULL;
363 364
		if (pci_mmcfg_probes[i].vendor == vendor &&
		    pci_mmcfg_probes[i].device == device)
365 366
			name = pci_mmcfg_probes[i].probe();

367
		if (name)
368
			pr_info(PREFIX "%s with MMCONFIG support\n", name);
369 370
	}

371 372 373
	/* some end_bus_number is crazy, fix it */
	pci_mmcfg_check_end_bus_number();

374
	return !list_empty(&pci_mmcfg_list);
375 376
}

377
static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
378 379 380 381 382 383 384 385 386 387 388
{
	struct resource *mcfg_res = data;
	struct acpi_resource_address64 address;
	acpi_status status;

	if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
		struct acpi_resource_fixed_memory32 *fixmem32 =
			&res->data.fixed_memory32;
		if (!fixmem32)
			return AE_OK;
		if ((mcfg_res->start >= fixmem32->address) &&
389
		    (mcfg_res->end < (fixmem32->address +
390 391 392 393 394 395 396 397 398 399 400
				      fixmem32->address_length))) {
			mcfg_res->flags = 1;
			return AE_CTRL_TERMINATE;
		}
	}
	if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
	    (res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
		return AE_OK;

	status = acpi_resource_to_address64(res, &address);
	if (ACPI_FAILURE(status) ||
401
	   (address.address.address_length <= 0) ||
402 403 404
	   (address.resource_type != ACPI_MEMORY_RANGE))
		return AE_OK;

405 406
	if ((mcfg_res->start >= address.address.minimum) &&
	    (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
407 408 409 410 411 412
		mcfg_res->flags = 1;
		return AE_CTRL_TERMINATE;
	}
	return AE_OK;
}

413 414
static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
					void *context, void **rv)
415 416 417 418 419 420 421 422 423 424 425 426
{
	struct resource *mcfg_res = context;

	acpi_walk_resources(handle, METHOD_NAME__CRS,
			    check_mcfg_resource, context);

	if (mcfg_res->flags)
		return AE_CTRL_TERMINATE;

	return AE_OK;
}

427
static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used)
428 429 430 431
{
	struct resource mcfg_res;

	mcfg_res.start = start;
432
	mcfg_res.end = end - 1;
433 434 435 436 437 438 439 440 441 442 443
	mcfg_res.flags = 0;

	acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);

	if (!mcfg_res.flags)
		acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
				 NULL);

	return mcfg_res.flags;
}

444
typedef bool (*check_reserved_t)(u64 start, u64 end, unsigned type);
445

446 447 448
static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
				     struct pci_mmcfg_region *cfg,
				     struct device *dev, int with_e820)
449
{
450 451
	u64 addr = cfg->res.start;
	u64 size = resource_size(&cfg->res);
452
	u64 old_size = size;
453 454
	int num_buses;
	char *method = with_e820 ? "E820" : "ACPI motherboard resources";
455

456
	while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
457 458 459 460 461
		size >>= 1;
		if (size < (16UL<<20))
			break;
	}

462 463 464 465 466 467 468
	if (size < (16UL<<20) && size != old_size)
		return 0;

	if (dev)
		dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
			 &cfg->res, method);
	else
469
		pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
		       &cfg->res, method);

	if (old_size != size) {
		/* update end_bus */
		cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
		num_buses = cfg->end_bus - cfg->start_bus + 1;
		cfg->res.end = cfg->res.start +
		    PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
		snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
			 "PCI MMCONFIG %04x [bus %02x-%02x]",
			 cfg->segment, cfg->start_bus, cfg->end_bus);

		if (dev)
			dev_info(dev,
				"MMCONFIG "
				"at %pR (base %#lx) (size reduced!)\n",
				&cfg->res, (unsigned long) cfg->address);
		else
488
			pr_info(PREFIX
489 490 491 492
				"MMCONFIG for %04x [bus%02x-%02x] "
				"at %pR (base %#lx) (size reduced!)\n",
				cfg->segment, cfg->start_bus, cfg->end_bus,
				&cfg->res, (unsigned long) cfg->address);
493 494
	}

495
	return 1;
496 497
}

498 499
static bool __ref
pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
500 501
{
	if (!early && !acpi_disabled) {
502
		if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
503
			return 1;
504 505 506 507 508 509

		if (dev)
			dev_info(dev, FW_INFO
				 "MMCONFIG at %pR not reserved in "
				 "ACPI motherboard resources\n",
				 &cfg->res);
510
		else
511
			pr_info(FW_INFO PREFIX
512 513 514 515 516
			       "MMCONFIG at %pR not reserved in "
			       "ACPI motherboard resources\n",
			       &cfg->res);
	}

517
	/*
518
	 * e820__mapped_all() is marked as __init.
519 520 521 522 523 524 525
	 * All entries from ACPI MCFG table have been checked at boot time.
	 * For MCFG information constructed from hotpluggable host bridge's
	 * _CBA method, just assume it's reserved.
	 */
	if (pci_mmcfg_running_state)
		return 1;

526 527 528
	/* Don't try to do this check unless configuration
	   type 1 is available. how about type 2 ?*/
	if (raw_pci_ops)
529
		return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
530 531 532 533

	return 0;
}

534
static void __init pci_mmcfg_reject_broken(int early)
535
{
536
	struct pci_mmcfg_region *cfg;
537

538
	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
539
		if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
540
			pr_info(PREFIX "not using MMCONFIG\n");
541 542
			free_all_mmcfg();
			return;
543
		}
544 545 546
	}
}

547 548
static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
					struct acpi_mcfg_allocation *cfg)
549
{
550 551 552
	if (cfg->address < 0xFFFFFFFF)
		return 0;

553
	if (!strncmp(mcfg->header.oem_id, "SGI", 3))
554
		return 0;
555

556 557
	if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010))
		return 0;
558

559
	pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
560 561 562
	       "is above 4GB, ignored\n", cfg->pci_segment,
	       cfg->start_bus_number, cfg->end_bus_number, cfg->address);
	return -EINVAL;
563 564 565 566 567
}

static int __init pci_parse_mcfg(struct acpi_table_header *header)
{
	struct acpi_table_mcfg *mcfg;
568
	struct acpi_mcfg_allocation *cfg_table, *cfg;
569
	unsigned long i;
570
	int entries;
571 572 573 574 575 576 577

	if (!header)
		return -EINVAL;

	mcfg = (struct acpi_table_mcfg *)header;

	/* how many config structures do we have */
578
	free_all_mmcfg();
579
	entries = 0;
580 581
	i = header->length - sizeof(struct acpi_table_mcfg);
	while (i >= sizeof(struct acpi_mcfg_allocation)) {
582
		entries++;
583
		i -= sizeof(struct acpi_mcfg_allocation);
584
	}
585
	if (entries == 0) {
586
		pr_err(PREFIX "MMCONFIG has no entries\n");
587 588 589
		return -ENODEV;
	}

590
	cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
591
	for (i = 0; i < entries; i++) {
592 593
		cfg = &cfg_table[i];
		if (acpi_mcfg_check_entry(mcfg, cfg)) {
594
			free_all_mmcfg();
595 596
			return -ENODEV;
		}
597 598 599

		if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
				   cfg->end_bus_number, cfg->address) == NULL) {
600
			pr_warn(PREFIX "no memory for MCFG entries\n");
601 602 603
			free_all_mmcfg();
			return -ENOMEM;
		}
604 605 606 607 608
	}

	return 0;
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
#ifdef CONFIG_ACPI_APEI
extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
				     void *data), void *data);

static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
				     void *data), void *data)
{
	struct pci_mmcfg_region *cfg;
	int rc;

	if (list_empty(&pci_mmcfg_list))
		return 0;

	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
		rc = func(cfg->res.start, resource_size(&cfg->res), data);
		if (rc)
			return rc;
	}

	return 0;
}
#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
#else
#define set_apei_filter()
#endif

635
static void __init __pci_mmcfg_init(int early)
636
{
637
	pci_mmcfg_reject_broken(early);
638
	if (list_empty(&pci_mmcfg_list))
639 640
		return;

641 642 643 644 645 646 647 648 649 650
	if (pcibios_last_bus < 0) {
		const struct pci_mmcfg_region *cfg;

		list_for_each_entry(cfg, &pci_mmcfg_list, list) {
			if (cfg->segment)
				break;
			pcibios_last_bus = cfg->end_bus;
		}
	}

651
	if (pci_mmcfg_arch_init())
652
		pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
653
	else {
654
		free_all_mmcfg();
655
		pci_mmcfg_arch_init_failed = true;
656 657
	}
}
658

659 660
static int __initdata known_bridge;

661
void __init pci_mmcfg_early_init(void)
662
{
663 664 665 666 667 668
	if (pci_probe & PCI_PROBE_MMCONF) {
		if (pci_mmcfg_check_hostbridge())
			known_bridge = 1;
		else
			acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
		__pci_mmcfg_init(1);
669 670

		set_apei_filter();
671
	}
672 673 674 675
}

void __init pci_mmcfg_late_init(void)
{
676 677 678 679 680 681 682 683 684 685 686 687
	/* MMCONFIG disabled */
	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
		return;

	if (known_bridge)
		return;

	/* MMCONFIG hasn't been enabled yet, try again */
	if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
		acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
		__pci_mmcfg_init(0);
	}
688 689
}

690 691
static int __init pci_mmcfg_late_insert_resources(void)
{
692 693
	struct pci_mmcfg_region *cfg;

694 695
	pci_mmcfg_running_state = true;

696 697
	/* If we are not using MMCONFIG, don't insert the resources. */
	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
698 699 700 701 702 703 704
		return 1;

	/*
	 * Attempt to insert the mmcfg resources but not with the busy flag
	 * marked so it won't cause request errors when __request_region is
	 * called.
	 */
705 706 707
	list_for_each_entry(cfg, &pci_mmcfg_list, list)
		if (!cfg->res.parent)
			insert_resource(&iomem_resource, &cfg->res);
708 709 710 711 712 713 714 715 716 717

	return 0;
}

/*
 * Perform MMCONFIG resource insertion after PCI initialization to allow for
 * misprogrammed MCFG tables that state larger sizes but actually conflict
 * with other system resources.
 */
late_initcall(pci_mmcfg_late_insert_resources);
718 719

/* Add MMCFG information for host bridges */
720 721
int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
			phys_addr_t addr)
722 723 724 725 726 727 728 729
{
	int rc;
	struct resource *tmp = NULL;
	struct pci_mmcfg_region *cfg;

	if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
		return -ENODEV;

730
	if (start > end)
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
		return -EINVAL;

	mutex_lock(&pci_mmcfg_lock);
	cfg = pci_mmconfig_lookup(seg, start);
	if (cfg) {
		if (cfg->end_bus < end)
			dev_info(dev, FW_INFO
				 "MMCONFIG for "
				 "domain %04x [bus %02x-%02x] "
				 "only partially covers this bridge\n",
				  cfg->segment, cfg->start_bus, cfg->end_bus);
		mutex_unlock(&pci_mmcfg_lock);
		return -EEXIST;
	}

746 747 748 749 750
	if (!addr) {
		mutex_unlock(&pci_mmcfg_lock);
		return -EINVAL;
	}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	rc = -EBUSY;
	cfg = pci_mmconfig_alloc(seg, start, end, addr);
	if (cfg == NULL) {
		dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
		rc = -ENOMEM;
	} else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
		dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
			 &cfg->res);
	} else {
		/* Insert resource if it's not in boot stage */
		if (pci_mmcfg_running_state)
			tmp = insert_resource_conflict(&iomem_resource,
						       &cfg->res);

		if (tmp) {
			dev_warn(dev,
				 "MMCONFIG %pR conflicts with "
				 "%s %pR\n",
				 &cfg->res, tmp->name, tmp);
		} else if (pci_mmcfg_arch_map(cfg)) {
			dev_warn(dev, "fail to map MMCONFIG %pR.\n",
				 &cfg->res);
		} else {
			list_add_sorted(cfg);
			dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
				 &cfg->res, (unsigned long)addr);
			cfg = NULL;
			rc = 0;
		}
	}

	if (cfg) {
		if (cfg->res.parent)
			release_resource(&cfg->res);
		kfree(cfg);
	}

	mutex_unlock(&pci_mmcfg_lock);

	return rc;
}

/* Delete MMCFG information for host bridges */
int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
{
	struct pci_mmcfg_region *cfg;

	mutex_lock(&pci_mmcfg_lock);
	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
		if (cfg->segment == seg && cfg->start_bus == start &&
		    cfg->end_bus == end) {
			list_del_rcu(&cfg->list);
			synchronize_rcu();
			pci_mmcfg_arch_unmap(cfg);
			if (cfg->res.parent)
				release_resource(&cfg->res);
			mutex_unlock(&pci_mmcfg_lock);
			kfree(cfg);
			return 0;
		}
	mutex_unlock(&pci_mmcfg_lock);

	return -ENOENT;
}
反馈
建议
客服 返回
顶部