efi.c 25.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
H
Huang, Ying 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Common EFI (Extensible Firmware Interface) support functions
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 1999 VA Linux Systems
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 * Copyright (C) 1999-2002 Hewlett-Packard Co.
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *	Stephane Eranian <eranian@hpl.hp.com>
 * Copyright (C) 2005-2008 Intel Co.
 *	Fenghua Yu <fenghua.yu@intel.com>
 *	Bibo Mao <bibo.mao@intel.com>
 *	Chandramouli Narayanan <mouli@linux.intel.com>
 *	Huang Ying <ying.huang@intel.com>
16 17
 * Copyright (C) 2013 SuSE Labs
 *	Borislav Petkov <bp@suse.de> - runtime services VA mapping
H
Huang, Ying 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Copied from efi_32.c to eliminate the duplicated code between EFI
 * 32/64 support code. --ying 2007-10-26
 *
 * All EFI Runtime Services are not implemented yet as EFI only
 * supports physical mode addressing on SoftSDV. This is to be fixed
 * in a future version.  --drummond 1999-07-20
 *
 * Implemented EFI runtime services and virtual mode calls.  --davidm
 *
 * Goutham Rao: <goutham.rao@intel.com>
 *	Skip non-WB memory and ignore empty memory ranges.
 */

32 33
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

H
Huang, Ying 已提交
34 35 36
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/efi.h>
37
#include <linux/efi-bgrt.h>
38
#include <linux/export.h>
39
#include <linux/memblock.h>
M
Mike Rapoport 已提交
40
#include <linux/slab.h>
H
Huang, Ying 已提交
41 42 43 44 45 46 47 48 49
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>

#include <asm/setup.h>
#include <asm/efi.h>
50
#include <asm/e820/api.h>
H
Huang, Ying 已提交
51
#include <asm/time.h>
L
Laura Abbott 已提交
52
#include <asm/set_memory.h>
53
#include <asm/tlbflush.h>
54
#include <asm/x86_init.h>
B
Borislav Petkov 已提交
55
#include <asm/uv/uv.h>
H
Huang, Ying 已提交
56 57

static efi_system_table_t efi_systab __initdata;
58
static u64 efi_systab_phys __initdata;
H
Huang, Ying 已提交
59

J
Joe Perches 已提交
60
static efi_config_table_type_t arch_tables[] __initdata = {
61
#ifdef CONFIG_X86_UV
62
	{UV_SYSTEM_TABLE_GUID, "UVsystab", &uv_systab_phys},
63
#endif
64
	{NULL_GUID, NULL, NULL},
65 66
};

67 68 69 70 71 72 73 74 75
static const unsigned long * const efi_tables[] = {
	&efi.mps,
	&efi.acpi,
	&efi.acpi20,
	&efi.smbios,
	&efi.smbios3,
	&efi.boot_info,
	&efi.hcdp,
	&efi.uga,
76 77 78
#ifdef CONFIG_X86_UV
	&uv_systab_phys,
#endif
79 80 81 82 83 84
	&efi.fw_vendor,
	&efi.runtime,
	&efi.config_table,
	&efi.esrt,
	&efi.properties_table,
	&efi.mem_attr_table,
85 86 87
#ifdef CONFIG_EFI_RCI2_TABLE
	&rci2_table_phys,
#endif
88 89
};

90
u64 efi_setup;		/* efi setup_data physical address */
91

92
static int add_efi_memmap __initdata;
93 94 95 96 97 98 99
static int __init setup_add_efi_memmap(char *arg)
{
	add_efi_memmap = 1;
	return 0;
}
early_param("add_efi_memmap", setup_add_efi_memmap);

100 101
void __init efi_find_mirror(void)
{
102
	efi_memory_desc_t *md;
103 104
	u64 mirror_size = 0, total_size = 0;

105 106 107
	if (!efi_enabled(EFI_MEMMAP))
		return;

108
	for_each_efi_memory_desc(md) {
109 110 111 112 113 114 115 116 117 118 119 120 121 122
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;

		total_size += size;
		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
			memblock_mark_mirror(start, size);
			mirror_size += size;
		}
	}
	if (mirror_size)
		pr_info("Memory: %lldM/%lldM mirrored memory\n",
			mirror_size>>20, total_size>>20);
}

123 124
/*
 * Tell the kernel about the EFI memory map.  This might include
125 126 127
 * more than the max 128 entries that can fit in the passed in e820
 * legacy (zeropage) memory map, but the kernel's e820 table can hold
 * E820_MAX_ENTRIES.
128 129
 */

130
static void __init do_add_efi_memmap(void)
131
{
132
	efi_memory_desc_t *md;
133

134 135 136
	if (!efi_enabled(EFI_MEMMAP))
		return;

137
	for_each_efi_memory_desc(md) {
138 139 140 141
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
		int e820_type;

142 143 144 145 146 147
		switch (md->type) {
		case EFI_LOADER_CODE:
		case EFI_LOADER_DATA:
		case EFI_BOOT_SERVICES_CODE:
		case EFI_BOOT_SERVICES_DATA:
		case EFI_CONVENTIONAL_MEMORY:
148 149 150 151
			if (efi_soft_reserve_enabled()
			    && (md->attribute & EFI_MEMORY_SP))
				e820_type = E820_TYPE_SOFT_RESERVED;
			else if (md->attribute & EFI_MEMORY_WB)
152
				e820_type = E820_TYPE_RAM;
153
			else
154
				e820_type = E820_TYPE_RESERVED;
155 156
			break;
		case EFI_ACPI_RECLAIM_MEMORY:
157
			e820_type = E820_TYPE_ACPI;
158 159
			break;
		case EFI_ACPI_MEMORY_NVS:
160
			e820_type = E820_TYPE_NVS;
161 162
			break;
		case EFI_UNUSABLE_MEMORY:
163
			e820_type = E820_TYPE_UNUSABLE;
164
			break;
165
		case EFI_PERSISTENT_MEMORY:
166
			e820_type = E820_TYPE_PMEM;
167
			break;
168 169 170 171 172 173
		default:
			/*
			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
			 */
174
			e820_type = E820_TYPE_RESERVED;
175 176
			break;
		}
177

178
		e820__range_add(start, size, e820_type);
179
	}
180
	e820__update_table(e820_table);
181 182
}

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/*
 * Given add_efi_memmap defaults to 0 and there there is no alternative
 * e820 mechanism for soft-reserved memory, import the full EFI memory
 * map if soft reservations are present and enabled. Otherwise, the
 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
 * the efi=nosoftreserve option.
 */
static bool do_efi_soft_reserve(void)
{
	efi_memory_desc_t *md;

	if (!efi_enabled(EFI_MEMMAP))
		return false;

	if (!efi_soft_reserve_enabled())
		return false;

	for_each_efi_memory_desc(md)
		if (md->type == EFI_CONVENTIONAL_MEMORY &&
		    (md->attribute & EFI_MEMORY_SP))
			return true;
	return false;
}

207
int __init efi_memblock_x86_reserve_range(void)
208
{
209
	struct efi_info *e = &boot_params.efi_info;
210
	struct efi_memory_map_data data;
211
	phys_addr_t pmap;
212
	int rv;
213

D
Daniel Kiper 已提交
214 215 216
	if (efi_enabled(EFI_PARAVIRT))
		return 0;

217
#ifdef CONFIG_X86_32
218
	/* Can't handle data above 4GB at this time */
219
	if (e->efi_memmap_hi) {
220 221 222
		pr_err("Memory map is above 4GB, disabling EFI.\n");
		return -EINVAL;
	}
223
	pmap =  e->efi_memmap;
224
#else
225
	pmap = (e->efi_memmap |	((__u64)e->efi_memmap_hi << 32));
226
#endif
227 228 229 230 231 232 233 234 235
	data.phys_map		= pmap;
	data.size 		= e->efi_memmap_size;
	data.desc_size		= e->efi_memdesc_size;
	data.desc_version	= e->efi_memdesc_version;

	rv = efi_memmap_init_early(&data);
	if (rv)
		return rv;

236
	if (add_efi_memmap || do_efi_soft_reserve())
237
		do_add_efi_memmap();
238

239 240
	efi_fake_memmap_early();

241 242 243 244
	WARN(efi.memmap.desc_version != 1,
	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
	     efi.memmap.desc_version);

245
	memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
246

247
	return 0;
248 249
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
#define OVERFLOW_ADDR_SHIFT	(64 - EFI_PAGE_SHIFT)
#define OVERFLOW_ADDR_MASK	(U64_MAX << OVERFLOW_ADDR_SHIFT)
#define U64_HIGH_BIT		(~(U64_MAX >> 1))

static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
{
	u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
	u64 end_hi = 0;
	char buf[64];

	if (md->num_pages == 0) {
		end = 0;
	} else if (md->num_pages > EFI_PAGES_MAX ||
		   EFI_PAGES_MAX - md->num_pages <
		   (md->phys_addr >> EFI_PAGE_SHIFT)) {
		end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
			>> OVERFLOW_ADDR_SHIFT;

		if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
			end_hi += 1;
	} else {
		return true;
	}

	pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");

	if (end_hi) {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end_hi, end);
	} else {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end);
	}
	return false;
}

static void __init efi_clean_memmap(void)
{
	efi_memory_desc_t *out = efi.memmap.map;
	const efi_memory_desc_t *in = out;
	const efi_memory_desc_t *end = efi.memmap.map_end;
	int i, n_removal;

	for (i = n_removal = 0; in < end; i++) {
		if (efi_memmap_entry_valid(in, i)) {
			if (out != in)
				memcpy(out, in, efi.memmap.desc_size);
			out = (void *)out + efi.memmap.desc_size;
		} else {
			n_removal++;
		}
		in = (void *)in + efi.memmap.desc_size;
	}

	if (n_removal > 0) {
		u64 size = efi.memmap.nr_map - n_removal;

		pr_warn("Removing %d invalid memory map entries.\n", n_removal);
		efi_memmap_install(efi.memmap.phys_map, size);
	}
}

314
void __init efi_print_memmap(void)
H
Huang, Ying 已提交
315 316
{
	efi_memory_desc_t *md;
317
	int i = 0;
H
Huang, Ying 已提交
318

319
	for_each_efi_memory_desc(md) {
320 321
		char buf[64];

322
		pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
323
			i++, efi_md_typeattr_format(buf, sizeof(buf), md),
324
			md->phys_addr,
325
			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
H
Huang, Ying 已提交
326 327
			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
	}
B
Borislav Petkov 已提交
328
}
H
Huang, Ying 已提交
329

330
static int __init efi_systab_init(u64 phys)
H
Huang, Ying 已提交
331
{
332 333 334 335 336 337 338 339 340 341 342
	int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
					  : sizeof(efi_system_table_32_t);
	bool over4g = false;
	void *p;

	p = early_memremap_ro(phys, size);
	if (p == NULL) {
		pr_err("Couldn't map the system table!\n");
		return -ENOMEM;
	}

343
	if (efi_enabled(EFI_64BIT)) {
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
		const efi_system_table_64_t *systab64 = p;

		efi_systab.hdr			= systab64->hdr;
		efi_systab.fw_vendor		= systab64->fw_vendor;
		efi_systab.fw_revision		= systab64->fw_revision;
		efi_systab.con_in_handle	= systab64->con_in_handle;
		efi_systab.con_in		= systab64->con_in;
		efi_systab.con_out_handle	= systab64->con_out_handle;
		efi_systab.con_out		= (void *)(unsigned long)systab64->con_out;
		efi_systab.stderr_handle	= systab64->stderr_handle;
		efi_systab.stderr		= systab64->stderr;
		efi_systab.runtime		= (void *)(unsigned long)systab64->runtime;
		efi_systab.boottime		= (void *)(unsigned long)systab64->boottime;
		efi_systab.nr_tables		= systab64->nr_tables;
		efi_systab.tables		= systab64->tables;

		over4g = systab64->con_in_handle	> U32_MAX ||
			 systab64->con_in		> U32_MAX ||
			 systab64->con_out_handle	> U32_MAX ||
			 systab64->con_out		> U32_MAX ||
			 systab64->stderr_handle	> U32_MAX ||
			 systab64->stderr		> U32_MAX ||
			 systab64->boottime		> U32_MAX;
367

368
		if (efi_setup) {
369 370 371 372 373
			struct efi_setup_data *data;

			data = early_memremap_ro(efi_setup, sizeof(*data));
			if (!data) {
				early_memunmap(p, size);
374
				return -ENOMEM;
375 376 377 378 379 380 381 382 383
			}

			efi_systab.fw_vendor	= (unsigned long)data->fw_vendor;
			efi_systab.runtime	= (void *)(unsigned long)data->runtime;
			efi_systab.tables	= (unsigned long)data->tables;

			over4g |= data->fw_vendor	> U32_MAX ||
				  data->runtime		> U32_MAX ||
				  data->tables		> U32_MAX;
384

385
			early_memunmap(data, sizeof(*data));
386 387 388 389
		} else {
			over4g |= systab64->fw_vendor	> U32_MAX ||
				  systab64->runtime	> U32_MAX ||
				  systab64->tables	> U32_MAX;
390 391
		}
	} else {
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		const efi_system_table_32_t *systab32 = p;

		efi_systab.hdr			= systab32->hdr;
		efi_systab.fw_vendor		= systab32->fw_vendor;
		efi_systab.fw_revision		= systab32->fw_revision;
		efi_systab.con_in_handle	= systab32->con_in_handle;
		efi_systab.con_in		= systab32->con_in;
		efi_systab.con_out_handle	= systab32->con_out_handle;
		efi_systab.con_out		= (void *)(unsigned long)systab32->con_out;
		efi_systab.stderr_handle	= systab32->stderr_handle;
		efi_systab.stderr		= systab32->stderr;
		efi_systab.runtime		= (void *)(unsigned long)systab32->runtime;
		efi_systab.boottime		= (void *)(unsigned long)systab32->boottime;
		efi_systab.nr_tables		= systab32->nr_tables;
		efi_systab.tables		= systab32->tables;
	}
408

409
	early_memunmap(p, size);
410

411 412 413
	if (IS_ENABLED(CONFIG_X86_32) && over4g) {
		pr_err("EFI data located above 4GB, disabling EFI.\n");
		return -EINVAL;
414
	}
415

H
Huang, Ying 已提交
416 417 418 419 420
	efi.systab = &efi_systab;

	/*
	 * Verify the EFI Table
	 */
421
	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
422
		pr_err("System table signature incorrect!\n");
423 424
		return -EINVAL;
	}
H
Huang, Ying 已提交
425
	if ((efi.systab->hdr.revision >> 16) == 0)
J
Joe Perches 已提交
426
		pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
H
Huang, Ying 已提交
427 428
		       efi.systab->hdr.revision >> 16,
		       efi.systab->hdr.revision & 0xffff);
429 430

	return 0;
431
}
H
Huang, Ying 已提交
432

433 434 435 436 437 438
void __init efi_init(void)
{
	efi_char16_t *c16;
	char vendor[100] = "unknown";
	int i = 0;

439 440 441
	if (IS_ENABLED(CONFIG_X86_32) &&
	    (boot_params.efi_info.efi_systab_hi ||
	     boot_params.efi_info.efi_memmap_hi)) {
442 443 444
		pr_info("Table located above 4GB, disabling EFI.\n");
		return;
	}
445

446 447 448 449
	efi_systab_phys = boot_params.efi_info.efi_systab |
			  ((__u64)boot_params.efi_info.efi_systab_hi << 32);

	if (efi_systab_init(efi_systab_phys))
450
		return;
451

452 453 454 455
	efi.config_table = (unsigned long)efi.systab->tables;
	efi.fw_vendor	 = (unsigned long)efi.systab->fw_vendor;
	efi.runtime	 = (unsigned long)efi.systab->runtime;

456 457 458
	/*
	 * Show what we know for posterity
	 */
459 460
	c16 = early_memremap_ro(efi.systab->fw_vendor,
				sizeof(vendor) * sizeof(efi_char16_t));
461
	if (c16) {
462 463
		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
			vendor[i] = c16[i];
464
		vendor[i] = '\0';
465 466
		early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
	} else {
467
		pr_err("Could not map the firmware vendor!\n");
468
	}
469

470 471 472
	pr_info("EFI v%u.%.02u by %s\n",
		efi.systab->hdr.revision >> 16,
		efi.systab->hdr.revision & 0xffff, vendor);
473

474 475 476
	if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
		return;

477
	if (efi_config_init(arch_tables))
478
		return;
479

480 481 482 483 484
	/*
	 * Note: We currently don't support runtime services on an EFI
	 * that doesn't match the kernel 32/64-bit mode.
	 */

M
Matt Fleming 已提交
485
	if (!efi_runtime_supported())
486
		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
487 488 489 490

	if (!efi_runtime_supported() || efi_runtime_disabled()) {
		efi_memmap_unmap();
		return;
491
	}
492

493
	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
494 495
	efi_clean_memmap();

496
	if (efi_enabled(EFI_DBG))
497
		efi_print_memmap();
H
Huang, Ying 已提交
498 499
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
	u64 addr, npages;

	addr = md->virt_addr;
	npages = md->num_pages;

	memrange_efi_to_native(&addr, &npages);

	if (executable)
		set_memory_x(addr, npages);
	else
		set_memory_nx(addr, npages);
}

B
Borislav Petkov 已提交
515
void __init runtime_code_page_mkexec(void)
516 517 518 519
{
	efi_memory_desc_t *md;

	/* Make EFI runtime service code area executable */
520
	for_each_efi_memory_desc(md) {
H
Huang, Ying 已提交
521 522 523
		if (md->type != EFI_RUNTIME_SERVICES_CODE)
			continue;

524
		efi_set_executable(md, true);
525 526 527
	}
}

528
void __init efi_memory_uc(u64 addr, unsigned long size)
529 530 531 532 533 534 535 536 537
{
	unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
	u64 npages;

	npages = round_up(size, page_shift) / page_shift;
	memrange_efi_to_native(&addr, &npages);
	set_memory_uc(addr, npages);
}

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
void __init old_map_region(efi_memory_desc_t *md)
{
	u64 start_pfn, end_pfn, end;
	unsigned long size;
	void *va;

	start_pfn = PFN_DOWN(md->phys_addr);
	size	  = md->num_pages << PAGE_SHIFT;
	end	  = md->phys_addr + size;
	end_pfn   = PFN_UP(end);

	if (pfn_range_is_mapped(start_pfn, end_pfn)) {
		va = __va(md->phys_addr);

		if (!(md->attribute & EFI_MEMORY_WB))
			efi_memory_uc((u64)(unsigned long)va, size);
	} else
		va = efi_ioremap(md->phys_addr, size,
				 md->type, md->attribute);

	md->virt_addr = (u64) (unsigned long) va;
	if (!va)
		pr_err("ioremap of 0x%llX failed!\n",
		       (unsigned long long)md->phys_addr);
}

564 565
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
H
Huang, Ying 已提交
566
{
567 568
	efi_memory_desc_t *md, *prev_md = NULL;

569
	for_each_efi_memory_desc(md) {
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
		u64 prev_size;

		if (!prev_md) {
			prev_md = md;
			continue;
		}

		if (prev_md->type != md->type ||
		    prev_md->attribute != md->attribute) {
			prev_md = md;
			continue;
		}

		prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;

		if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
			prev_md->num_pages += md->num_pages;
			md->type = EFI_RESERVED_TYPE;
			md->attribute = 0;
			continue;
		}
		prev_md = md;
592 593 594 595 596 597 598
	}
}

static void __init get_systab_virt_addr(efi_memory_desc_t *md)
{
	unsigned long size;
	u64 end, systab;
599

600 601
	size = md->num_pages << EFI_PAGE_SHIFT;
	end = md->phys_addr + size;
602
	systab = efi_systab_phys;
603 604 605
	if (md->phys_addr <= systab && systab < end) {
		systab += md->virt_addr - md->phys_addr;
		efi.systab = (efi_system_table_t *)(unsigned long)systab;
606
	}
607 608
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
static void *realloc_pages(void *old_memmap, int old_shift)
{
	void *ret;

	ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
	if (!ret)
		goto out;

	/*
	 * A first-time allocation doesn't have anything to copy.
	 */
	if (!old_memmap)
		return ret;

	memcpy(ret, old_memmap, PAGE_SIZE << old_shift);

out:
	free_pages((unsigned long)old_memmap, old_shift);
	return ret;
}

630 631 632 633 634 635 636 637 638 639
/*
 * Iterate the EFI memory map in reverse order because the regions
 * will be mapped top-down. The end result is the same as if we had
 * mapped things forward, but doesn't require us to change the
 * existing implementation of efi_map_region().
 */
static inline void *efi_map_next_entry_reverse(void *entry)
{
	/* Initial call */
	if (!entry)
640
		return efi.memmap.map_end - efi.memmap.desc_size;
641

642 643
	entry -= efi.memmap.desc_size;
	if (entry < efi.memmap.map)
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
		return NULL;

	return entry;
}

/*
 * efi_map_next_entry - Return the next EFI memory map descriptor
 * @entry: Previous EFI memory map descriptor
 *
 * This is a helper function to iterate over the EFI memory map, which
 * we do in different orders depending on the current configuration.
 *
 * To begin traversing the memory map @entry must be %NULL.
 *
 * Returns %NULL when we reach the end of the memory map.
 */
static void *efi_map_next_entry(void *entry)
{
	if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
		/*
		 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
		 * config table feature requires us to map all entries
		 * in the same order as they appear in the EFI memory
		 * map. That is to say, entry N must have a lower
		 * virtual address than entry N+1. This is because the
		 * firmware toolchain leaves relative references in
		 * the code/data sections, which are split and become
		 * separate EFI memory regions. Mapping things
		 * out-of-order leads to the firmware accessing
		 * unmapped addresses.
		 *
		 * Since we need to map things this way whether or not
		 * the kernel actually makes use of
		 * EFI_PROPERTIES_TABLE, let's just switch to this
		 * scheme by default for 64-bit.
		 */
		return efi_map_next_entry_reverse(entry);
	}

	/* Initial call */
	if (!entry)
685
		return efi.memmap.map;
686

687 688
	entry += efi.memmap.desc_size;
	if (entry >= efi.memmap.map_end)
689 690 691 692 693
		return NULL;

	return entry;
}

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
static bool should_map_region(efi_memory_desc_t *md)
{
	/*
	 * Runtime regions always require runtime mappings (obviously).
	 */
	if (md->attribute & EFI_MEMORY_RUNTIME)
		return true;

	/*
	 * 32-bit EFI doesn't suffer from the bug that requires us to
	 * reserve boot services regions, and mixed mode support
	 * doesn't exist for 32-bit kernels.
	 */
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

710 711 712 713 714 715 716 717 718
	/*
	 * EFI specific purpose memory may be reserved by default
	 * depending on kernel config and boot options.
	 */
	if (md->type == EFI_CONVENTIONAL_MEMORY &&
	    efi_soft_reserve_enabled() &&
	    (md->attribute & EFI_MEMORY_SP))
		return false;

719 720 721 722
	/*
	 * Map all of RAM so that we can access arguments in the 1:1
	 * mapping when making EFI runtime calls.
	 */
723
	if (efi_is_mixed()) {
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
		if (md->type == EFI_CONVENTIONAL_MEMORY ||
		    md->type == EFI_LOADER_DATA ||
		    md->type == EFI_LOADER_CODE)
			return true;
	}

	/*
	 * Map boot services regions as a workaround for buggy
	 * firmware that accesses them even when they shouldn't.
	 *
	 * See efi_{reserve,free}_boot_services().
	 */
	if (md->type == EFI_BOOT_SERVICES_CODE ||
	    md->type == EFI_BOOT_SERVICES_DATA)
		return true;

	return false;
}

743
/*
744 745
 * Map the efi memory ranges of the runtime services and update new_mmap with
 * virtual addresses.
746
 */
747
static void * __init efi_map_regions(int *count, int *pg_shift)
748
{
749 750
	void *p, *new_memmap = NULL;
	unsigned long left = 0;
751
	unsigned long desc_size;
752
	efi_memory_desc_t *md;
753

754 755
	desc_size = efi.memmap.desc_size;

756 757
	p = NULL;
	while ((p = efi_map_next_entry(p))) {
H
Huang, Ying 已提交
758
		md = p;
759 760 761

		if (!should_map_region(md))
			continue;
H
Huang, Ying 已提交
762

763
		efi_map_region(md);
764 765
		get_systab_virt_addr(md);

766
		if (left < desc_size) {
767 768 769 770 771 772 773 774
			new_memmap = realloc_pages(new_memmap, *pg_shift);
			if (!new_memmap)
				return NULL;

			left += PAGE_SIZE << *pg_shift;
			(*pg_shift)++;
		}

775
		memcpy(new_memmap + (*count * desc_size), md, desc_size);
776

777
		left -= desc_size;
778 779
		(*count)++;
	}
780

781 782 783
	return new_memmap;
}

784 785
static void __init kexec_enter_virtual_mode(void)
{
786
#ifdef CONFIG_KEXEC_CORE
787
	efi_memory_desc_t *md;
788
	unsigned int num_pages;
789 790 791 792 793

	efi.systab = NULL;

	/*
	 * We don't do virtual mode, since we don't do runtime services, on
794 795 796
	 * non-native EFI. With efi=old_map, we don't do runtime services in
	 * kexec kernel because in the initial boot something else might
	 * have been mapped at these virtual addresses.
797
	 */
798
	if (efi_is_mixed() || efi_enabled(EFI_OLD_MEMMAP)) {
799
		efi_memmap_unmap();
800
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
801 802 803
		return;
	}

804 805 806 807 808 809
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

810 811 812 813
	/*
	* Map efi regions which were passed via setup_data. The virt_addr is a
	* fixed addr which was used in first kernel of a kexec boot.
	*/
814
	for_each_efi_memory_desc(md) {
815 816 817 818
		efi_map_region_fixed(md); /* FIXME: add error handling */
		get_systab_virt_addr(md);
	}

819 820 821 822 823 824 825 826 827 828 829 830 831
	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map.
	 */
	efi_memmap_unmap();

	if (efi_memmap_init_late(efi.memmap.phys_map,
				 efi.memmap.desc_size * efi.memmap.nr_map)) {
		pr_err("Failed to remap late EFI memory map\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

832 833
	BUG_ON(!efi.systab);

834
	num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
835 836
	num_pages >>= PAGE_SHIFT;

837
	if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
838 839 840 841
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

842 843 844 845 846 847 848 849 850
	efi_sync_low_kernel_mappings();

	/*
	 * Now that EFI is in virtual mode, update the function
	 * pointers in the runtime service table to the new virtual addresses.
	 *
	 * Call EFI services through wrapper functions.
	 */
	efi.runtime_version = efi_systab.hdr.revision;
851

852
	efi_native_runtime_setup();
853 854 855
#endif
}

856 857 858 859
/*
 * This function will switch the EFI runtime services to virtual mode.
 * Essentially, we look through the EFI memmap and map every region that
 * has the runtime attribute bit set in its memory descriptor into the
860
 * efi_pgd page table.
861 862 863 864 865 866 867 868 869
 *
 * The old method which used to update that memory descriptor with the
 * virtual address obtained from ioremap() is still supported when the
 * kernel is booted with efi=old_map on its command line. Same old
 * method enabled the runtime services to be called without having to
 * thunk back into physical mode for every invocation.
 *
 * The new method does a pagetable switch in a preemption-safe manner
 * so that we're in a different address space when calling a runtime
870 871
 * function. For function arguments passing we do copy the PUDs of the
 * kernel page table into efi_pgd prior to each call.
872 873 874
 *
 * Specially for kexec boot, efi runtime maps in previous kernel should
 * be passed in via setup_data. In that case runtime ranges will be mapped
875 876
 * to the same virtual addresses as the first kernel, see
 * kexec_enter_virtual_mode().
877
 */
878
static void __init __efi_enter_virtual_mode(void)
879
{
880
	int count = 0, pg_shift = 0;
881
	void *new_memmap = NULL;
882
	efi_status_t status;
883
	unsigned long pa;
H
Huang, Ying 已提交
884

885
	efi.systab = NULL;
886

887 888
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
889
		goto err;
890 891
	}

892 893 894 895
	efi_merge_regions();
	new_memmap = efi_map_regions(&count, &pg_shift);
	if (!new_memmap) {
		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
896
		goto err;
897
	}
898

899 900 901 902 903 904 905 906 907 908 909
	pa = __pa(new_memmap);

	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map that we are about to pass to the
	 * firmware via SetVirtualAddressMap().
	 */
	efi_memmap_unmap();

	if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
		pr_err("Failed to remap late EFI memory map\n");
910
		goto err;
911 912
	}

913 914 915 916 917
	if (efi_enabled(EFI_DBG)) {
		pr_info("EFI runtime memory map:\n");
		efi_print_memmap();
	}

918 919
	if (WARN_ON(!efi.systab))
		goto err;
H
Huang, Ying 已提交
920

921 922
	if (efi_setup_page_tables(pa, 1 << pg_shift))
		goto err;
923

924 925
	efi_sync_low_kernel_mappings();

926 927 928 929
	status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
					     efi.memmap.desc_size,
					     efi.memmap.desc_version,
					     (efi_memory_desc_t *)pa);
930
	if (status != EFI_SUCCESS) {
931 932 933
		pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
		       status);
		goto err;
H
Huang, Ying 已提交
934 935
	}

936 937
	efi_free_boot_services();

H
Huang, Ying 已提交
938 939 940 941 942 943
	/*
	 * Now that EFI is in virtual mode, update the function
	 * pointers in the runtime service table to the new virtual addresses.
	 *
	 * Call EFI services through wrapper functions.
	 */
944
	efi.runtime_version = efi_systab.hdr.revision;
945

946
	if (!efi_is_mixed())
947
		efi_native_runtime_setup();
948 949 950
	else
		efi_thunk_runtime_setup();

951 952 953 954 955 956
	/*
	 * Apply more restrictive page table mapping attributes now that
	 * SVAM() has been called and the firmware has performed all
	 * necessary relocation fixups for the new virtual addresses.
	 */
	efi_runtime_update_mappings();
957

M
Matthew Garrett 已提交
958
	/* clean DUMMY object */
959
	efi_delete_dummy_variable();
960 961 962 963
	return;

err:
	clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
H
Huang, Ying 已提交
964 965
}

966 967
void __init efi_enter_virtual_mode(void)
{
D
Daniel Kiper 已提交
968 969 970
	if (efi_enabled(EFI_PARAVIRT))
		return;

971 972 973 974
	if (efi_setup)
		kexec_enter_virtual_mode();
	else
		__efi_enter_virtual_mode();
975 976

	efi_dump_pagetable();
977 978
}

D
Dave Young 已提交
979
static int __init arch_parse_efi_cmdline(char *str)
980
{
981 982 983 984 985
	if (!str) {
		pr_warn("need at least one option\n");
		return -EINVAL;
	}

986 987
	if (parse_option_str(str, "old_map"))
		set_bit(EFI_OLD_MEMMAP, &efi.flags);
988 989 990

	return 0;
}
D
Dave Young 已提交
991
early_param("efi", arch_parse_efi_cmdline);
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005

bool efi_is_table_address(unsigned long phys_addr)
{
	unsigned int i;

	if (phys_addr == EFI_INVALID_TABLE_ADDR)
		return false;

	for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
		if (*(efi_tables[i]) == phys_addr)
			return true;

	return false;
}