efi.c 28.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
H
Huang, Ying 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Common EFI (Extensible Firmware Interface) support functions
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 1999 VA Linux Systems
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 * Copyright (C) 1999-2002 Hewlett-Packard Co.
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *	Stephane Eranian <eranian@hpl.hp.com>
 * Copyright (C) 2005-2008 Intel Co.
 *	Fenghua Yu <fenghua.yu@intel.com>
 *	Bibo Mao <bibo.mao@intel.com>
 *	Chandramouli Narayanan <mouli@linux.intel.com>
 *	Huang Ying <ying.huang@intel.com>
16 17
 * Copyright (C) 2013 SuSE Labs
 *	Borislav Petkov <bp@suse.de> - runtime services VA mapping
H
Huang, Ying 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Copied from efi_32.c to eliminate the duplicated code between EFI
 * 32/64 support code. --ying 2007-10-26
 *
 * All EFI Runtime Services are not implemented yet as EFI only
 * supports physical mode addressing on SoftSDV. This is to be fixed
 * in a future version.  --drummond 1999-07-20
 *
 * Implemented EFI runtime services and virtual mode calls.  --davidm
 *
 * Goutham Rao: <goutham.rao@intel.com>
 *	Skip non-WB memory and ignore empty memory ranges.
 */

32 33
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

H
Huang, Ying 已提交
34 35 36
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/efi.h>
37
#include <linux/efi-bgrt.h>
38
#include <linux/export.h>
39
#include <linux/memblock.h>
M
Mike Rapoport 已提交
40
#include <linux/slab.h>
H
Huang, Ying 已提交
41 42 43 44 45 46 47 48 49
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>

#include <asm/setup.h>
#include <asm/efi.h>
50
#include <asm/e820/api.h>
H
Huang, Ying 已提交
51
#include <asm/time.h>
L
Laura Abbott 已提交
52
#include <asm/set_memory.h>
53
#include <asm/tlbflush.h>
54
#include <asm/x86_init.h>
B
Borislav Petkov 已提交
55
#include <asm/uv/uv.h>
H
Huang, Ying 已提交
56

H
Harvey Harrison 已提交
57
static struct efi efi_phys __initdata;
H
Huang, Ying 已提交
58 59
static efi_system_table_t efi_systab __initdata;

J
Joe Perches 已提交
60
static efi_config_table_type_t arch_tables[] __initdata = {
61
#ifdef CONFIG_X86_UV
62
	{UV_SYSTEM_TABLE_GUID, "UVsystab", &uv_systab_phys},
63
#endif
64
	{NULL_GUID, NULL, NULL},
65 66
};

67 68 69 70 71 72 73 74 75
static const unsigned long * const efi_tables[] = {
	&efi.mps,
	&efi.acpi,
	&efi.acpi20,
	&efi.smbios,
	&efi.smbios3,
	&efi.boot_info,
	&efi.hcdp,
	&efi.uga,
76 77 78
#ifdef CONFIG_X86_UV
	&uv_systab_phys,
#endif
79 80 81 82 83 84
	&efi.fw_vendor,
	&efi.runtime,
	&efi.config_table,
	&efi.esrt,
	&efi.properties_table,
	&efi.mem_attr_table,
85 86 87
#ifdef CONFIG_EFI_RCI2_TABLE
	&rci2_table_phys,
#endif
88 89
};

90
u64 efi_setup;		/* efi setup_data physical address */
91

92
static int add_efi_memmap __initdata;
93 94 95 96 97 98 99
static int __init setup_add_efi_memmap(char *arg)
{
	add_efi_memmap = 1;
	return 0;
}
early_param("add_efi_memmap", setup_add_efi_memmap);

H
Huang, Ying 已提交
100 101 102 103 104 105 106
static efi_status_t __init phys_efi_set_virtual_address_map(
	unsigned long memory_map_size,
	unsigned long descriptor_size,
	u32 descriptor_version,
	efi_memory_desc_t *virtual_map)
{
	efi_status_t status;
107
	unsigned long flags;
108
	pgd_t *save_pgd;
H
Huang, Ying 已提交
109

110
	save_pgd = efi_call_phys_prolog();
111 112
	if (!save_pgd)
		return EFI_ABORTED;
113 114 115

	/* Disable interrupts around EFI calls: */
	local_irq_save(flags);
116 117 118
	status = efi_call_phys(efi_phys.set_virtual_address_map,
			       memory_map_size, descriptor_size,
			       descriptor_version, virtual_map);
119 120
	local_irq_restore(flags);

121
	efi_call_phys_epilog(save_pgd);
122

H
Huang, Ying 已提交
123 124 125
	return status;
}

126 127
void __init efi_find_mirror(void)
{
128
	efi_memory_desc_t *md;
129 130
	u64 mirror_size = 0, total_size = 0;

131 132 133
	if (!efi_enabled(EFI_MEMMAP))
		return;

134
	for_each_efi_memory_desc(md) {
135 136 137 138 139 140 141 142 143 144 145 146 147 148
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;

		total_size += size;
		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
			memblock_mark_mirror(start, size);
			mirror_size += size;
		}
	}
	if (mirror_size)
		pr_info("Memory: %lldM/%lldM mirrored memory\n",
			mirror_size>>20, total_size>>20);
}

149 150
/*
 * Tell the kernel about the EFI memory map.  This might include
151 152 153
 * more than the max 128 entries that can fit in the passed in e820
 * legacy (zeropage) memory map, but the kernel's e820 table can hold
 * E820_MAX_ENTRIES.
154 155
 */

156
static void __init do_add_efi_memmap(void)
157
{
158
	efi_memory_desc_t *md;
159

160 161 162
	if (!efi_enabled(EFI_MEMMAP))
		return;

163
	for_each_efi_memory_desc(md) {
164 165 166 167
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
		int e820_type;

168 169 170 171 172 173
		switch (md->type) {
		case EFI_LOADER_CODE:
		case EFI_LOADER_DATA:
		case EFI_BOOT_SERVICES_CODE:
		case EFI_BOOT_SERVICES_DATA:
		case EFI_CONVENTIONAL_MEMORY:
174 175 176 177
			if (efi_soft_reserve_enabled()
			    && (md->attribute & EFI_MEMORY_SP))
				e820_type = E820_TYPE_SOFT_RESERVED;
			else if (md->attribute & EFI_MEMORY_WB)
178
				e820_type = E820_TYPE_RAM;
179
			else
180
				e820_type = E820_TYPE_RESERVED;
181 182
			break;
		case EFI_ACPI_RECLAIM_MEMORY:
183
			e820_type = E820_TYPE_ACPI;
184 185
			break;
		case EFI_ACPI_MEMORY_NVS:
186
			e820_type = E820_TYPE_NVS;
187 188
			break;
		case EFI_UNUSABLE_MEMORY:
189
			e820_type = E820_TYPE_UNUSABLE;
190
			break;
191
		case EFI_PERSISTENT_MEMORY:
192
			e820_type = E820_TYPE_PMEM;
193
			break;
194 195 196 197 198 199
		default:
			/*
			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
			 */
200
			e820_type = E820_TYPE_RESERVED;
201 202
			break;
		}
203

204
		e820__range_add(start, size, e820_type);
205
	}
206
	e820__update_table(e820_table);
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/*
 * Given add_efi_memmap defaults to 0 and there there is no alternative
 * e820 mechanism for soft-reserved memory, import the full EFI memory
 * map if soft reservations are present and enabled. Otherwise, the
 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
 * the efi=nosoftreserve option.
 */
static bool do_efi_soft_reserve(void)
{
	efi_memory_desc_t *md;

	if (!efi_enabled(EFI_MEMMAP))
		return false;

	if (!efi_soft_reserve_enabled())
		return false;

	for_each_efi_memory_desc(md)
		if (md->type == EFI_CONVENTIONAL_MEMORY &&
		    (md->attribute & EFI_MEMORY_SP))
			return true;
	return false;
}

233
int __init efi_memblock_x86_reserve_range(void)
234
{
235
	struct efi_info *e = &boot_params.efi_info;
236
	struct efi_memory_map_data data;
237
	phys_addr_t pmap;
238
	int rv;
239

D
Daniel Kiper 已提交
240 241 242
	if (efi_enabled(EFI_PARAVIRT))
		return 0;

243
#ifdef CONFIG_X86_32
244
	/* Can't handle data above 4GB at this time */
245
	if (e->efi_memmap_hi) {
246 247 248
		pr_err("Memory map is above 4GB, disabling EFI.\n");
		return -EINVAL;
	}
249
	pmap =  e->efi_memmap;
250
#else
251
	pmap = (e->efi_memmap |	((__u64)e->efi_memmap_hi << 32));
252
#endif
253 254 255 256 257 258 259 260 261
	data.phys_map		= pmap;
	data.size 		= e->efi_memmap_size;
	data.desc_size		= e->efi_memdesc_size;
	data.desc_version	= e->efi_memdesc_version;

	rv = efi_memmap_init_early(&data);
	if (rv)
		return rv;

262
	if (add_efi_memmap || do_efi_soft_reserve())
263
		do_add_efi_memmap();
264

265 266
	efi_fake_memmap_early();

267 268 269 270
	WARN(efi.memmap.desc_version != 1,
	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
	     efi.memmap.desc_version);

271
	memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
272

273
	return 0;
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
#define OVERFLOW_ADDR_SHIFT	(64 - EFI_PAGE_SHIFT)
#define OVERFLOW_ADDR_MASK	(U64_MAX << OVERFLOW_ADDR_SHIFT)
#define U64_HIGH_BIT		(~(U64_MAX >> 1))

static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
{
	u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
	u64 end_hi = 0;
	char buf[64];

	if (md->num_pages == 0) {
		end = 0;
	} else if (md->num_pages > EFI_PAGES_MAX ||
		   EFI_PAGES_MAX - md->num_pages <
		   (md->phys_addr >> EFI_PAGE_SHIFT)) {
		end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
			>> OVERFLOW_ADDR_SHIFT;

		if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
			end_hi += 1;
	} else {
		return true;
	}

	pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");

	if (end_hi) {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end_hi, end);
	} else {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end);
	}
	return false;
}

static void __init efi_clean_memmap(void)
{
	efi_memory_desc_t *out = efi.memmap.map;
	const efi_memory_desc_t *in = out;
	const efi_memory_desc_t *end = efi.memmap.map_end;
	int i, n_removal;

	for (i = n_removal = 0; in < end; i++) {
		if (efi_memmap_entry_valid(in, i)) {
			if (out != in)
				memcpy(out, in, efi.memmap.desc_size);
			out = (void *)out + efi.memmap.desc_size;
		} else {
			n_removal++;
		}
		in = (void *)in + efi.memmap.desc_size;
	}

	if (n_removal > 0) {
		u64 size = efi.memmap.nr_map - n_removal;

		pr_warn("Removing %d invalid memory map entries.\n", n_removal);
		efi_memmap_install(efi.memmap.phys_map, size);
	}
}

340
void __init efi_print_memmap(void)
H
Huang, Ying 已提交
341 342
{
	efi_memory_desc_t *md;
343
	int i = 0;
H
Huang, Ying 已提交
344

345
	for_each_efi_memory_desc(md) {
346 347
		char buf[64];

348
		pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
349
			i++, efi_md_typeattr_format(buf, sizeof(buf), md),
350
			md->phys_addr,
351
			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
H
Huang, Ying 已提交
352 353
			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
	}
B
Borislav Petkov 已提交
354
}
H
Huang, Ying 已提交
355

356
static int __init efi_systab_init(void *phys)
H
Huang, Ying 已提交
357
{
358
	if (efi_enabled(EFI_64BIT)) {
359
		efi_system_table_64_t *systab64;
360
		struct efi_setup_data *data = NULL;
361 362
		u64 tmp = 0;

363 364 365 366 367
		if (efi_setup) {
			data = early_memremap(efi_setup, sizeof(*data));
			if (!data)
				return -ENOMEM;
		}
368
		systab64 = early_memremap((unsigned long)phys,
369 370 371
					 sizeof(*systab64));
		if (systab64 == NULL) {
			pr_err("Couldn't map the system table!\n");
372
			if (data)
373
				early_memunmap(data, sizeof(*data));
374 375 376 377
			return -ENOMEM;
		}

		efi_systab.hdr = systab64->hdr;
378 379 380
		efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
					      systab64->fw_vendor;
		tmp |= data ? data->fw_vendor : systab64->fw_vendor;
381 382 383 384 385 386 387 388 389 390 391 392 393
		efi_systab.fw_revision = systab64->fw_revision;
		efi_systab.con_in_handle = systab64->con_in_handle;
		tmp |= systab64->con_in_handle;
		efi_systab.con_in = systab64->con_in;
		tmp |= systab64->con_in;
		efi_systab.con_out_handle = systab64->con_out_handle;
		tmp |= systab64->con_out_handle;
		efi_systab.con_out = systab64->con_out;
		tmp |= systab64->con_out;
		efi_systab.stderr_handle = systab64->stderr_handle;
		tmp |= systab64->stderr_handle;
		efi_systab.stderr = systab64->stderr;
		tmp |= systab64->stderr;
394 395 396 397
		efi_systab.runtime = data ?
				     (void *)(unsigned long)data->runtime :
				     (void *)(unsigned long)systab64->runtime;
		tmp |= data ? data->runtime : systab64->runtime;
398 399 400
		efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
		tmp |= systab64->boottime;
		efi_systab.nr_tables = systab64->nr_tables;
401 402 403
		efi_systab.tables = data ? (unsigned long)data->tables :
					   systab64->tables;
		tmp |= data ? data->tables : systab64->tables;
404

405
		early_memunmap(systab64, sizeof(*systab64));
406
		if (data)
407
			early_memunmap(data, sizeof(*data));
408 409 410 411 412 413 414 415 416
#ifdef CONFIG_X86_32
		if (tmp >> 32) {
			pr_err("EFI data located above 4GB, disabling EFI.\n");
			return -EINVAL;
		}
#endif
	} else {
		efi_system_table_32_t *systab32;

417
		systab32 = early_memremap((unsigned long)phys,
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
					 sizeof(*systab32));
		if (systab32 == NULL) {
			pr_err("Couldn't map the system table!\n");
			return -ENOMEM;
		}

		efi_systab.hdr = systab32->hdr;
		efi_systab.fw_vendor = systab32->fw_vendor;
		efi_systab.fw_revision = systab32->fw_revision;
		efi_systab.con_in_handle = systab32->con_in_handle;
		efi_systab.con_in = systab32->con_in;
		efi_systab.con_out_handle = systab32->con_out_handle;
		efi_systab.con_out = systab32->con_out;
		efi_systab.stderr_handle = systab32->stderr_handle;
		efi_systab.stderr = systab32->stderr;
		efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
		efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
		efi_systab.nr_tables = systab32->nr_tables;
		efi_systab.tables = systab32->tables;

438
		early_memunmap(systab32, sizeof(*systab32));
439
	}
440

H
Huang, Ying 已提交
441 442 443 444 445
	efi.systab = &efi_systab;

	/*
	 * Verify the EFI Table
	 */
446
	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
447
		pr_err("System table signature incorrect!\n");
448 449
		return -EINVAL;
	}
H
Huang, Ying 已提交
450
	if ((efi.systab->hdr.revision >> 16) == 0)
J
Joe Perches 已提交
451
		pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
H
Huang, Ying 已提交
452 453
		       efi.systab->hdr.revision >> 16,
		       efi.systab->hdr.revision & 0xffff);
454 455

	return 0;
456
}
H
Huang, Ying 已提交
457

458
static int __init efi_runtime_init32(void)
459
{
460 461
	efi_runtime_services_32_t *runtime;

462
	runtime = early_memremap((unsigned long)efi.systab->runtime,
463 464 465 466 467
			sizeof(efi_runtime_services_32_t));
	if (!runtime) {
		pr_err("Could not map the runtime service table!\n");
		return -ENOMEM;
	}
H
Huang, Ying 已提交
468 469

	/*
470 471 472
	 * We will only need *early* access to the SetVirtualAddressMap
	 * EFI runtime service. All other runtime services will be called
	 * via the virtual mapping.
H
Huang, Ying 已提交
473
	 */
474 475 476
	efi_phys.set_virtual_address_map =
			(efi_set_virtual_address_map_t *)
			(unsigned long)runtime->set_virtual_address_map;
477
	early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
478 479 480 481 482 483 484 485

	return 0;
}

static int __init efi_runtime_init64(void)
{
	efi_runtime_services_64_t *runtime;

486
	runtime = early_memremap((unsigned long)efi.systab->runtime,
487
			sizeof(efi_runtime_services_64_t));
488
	if (!runtime) {
489
		pr_err("Could not map the runtime service table!\n");
490 491
		return -ENOMEM;
	}
492

493
	/*
494 495 496
	 * We will only need *early* access to the SetVirtualAddressMap
	 * EFI runtime service. All other runtime services will be called
	 * via the virtual mapping.
497 498
	 */
	efi_phys.set_virtual_address_map =
499 500
			(efi_set_virtual_address_map_t *)
			(unsigned long)runtime->set_virtual_address_map;
501
	early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
502 503 504 505 506 507 508 509 510 511 512 513 514

	return 0;
}

static int __init efi_runtime_init(void)
{
	int rv;

	/*
	 * Check out the runtime services table. We need to map
	 * the runtime services table so that we can grab the physical
	 * address of several of the EFI runtime functions, needed to
	 * set the firmware into virtual mode.
D
Daniel Kiper 已提交
515 516 517 518 519 520 521
	 *
	 * When EFI_PARAVIRT is in force then we could not map runtime
	 * service memory region because we do not have direct access to it.
	 * However, runtime services are available through proxy functions
	 * (e.g. in case of Xen dom0 EFI implementation they call special
	 * hypercall which executes relevant EFI functions) and that is why
	 * they are always enabled.
522 523
	 */

D
Daniel Kiper 已提交
524 525 526 527 528 529 530 531 532
	if (!efi_enabled(EFI_PARAVIRT)) {
		if (efi_enabled(EFI_64BIT))
			rv = efi_runtime_init64();
		else
			rv = efi_runtime_init32();

		if (rv)
			return rv;
	}
533

534 535
	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);

536
	return 0;
537
}
H
Huang, Ying 已提交
538

539 540 541 542 543 544 545 546
void __init efi_init(void)
{
	efi_char16_t *c16;
	char vendor[100] = "unknown";
	int i = 0;
	void *tmp;

#ifdef CONFIG_X86_32
547 548 549 550 551
	if (boot_params.efi_info.efi_systab_hi ||
	    boot_params.efi_info.efi_memmap_hi) {
		pr_info("Table located above 4GB, disabling EFI.\n");
		return;
	}
552 553 554
	efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
#else
	efi_phys.systab = (efi_system_table_t *)
555 556
			  (boot_params.efi_info.efi_systab |
			  ((__u64)boot_params.efi_info.efi_systab_hi<<32));
557 558
#endif

559
	if (efi_systab_init(efi_phys.systab))
560
		return;
561

562 563 564 565
	efi.config_table = (unsigned long)efi.systab->tables;
	efi.fw_vendor	 = (unsigned long)efi.systab->fw_vendor;
	efi.runtime	 = (unsigned long)efi.systab->runtime;

566 567 568
	/*
	 * Show what we know for posterity
	 */
569
	c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
570 571 572 573 574
	if (c16) {
		for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
			vendor[i] = *c16++;
		vendor[i] = '\0';
	} else
575
		pr_err("Could not map the firmware vendor!\n");
576
	early_memunmap(tmp, 2);
577

578 579 580
	pr_info("EFI v%u.%.02u by %s\n",
		efi.systab->hdr.revision >> 16,
		efi.systab->hdr.revision & 0xffff, vendor);
581

582 583 584
	if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
		return;

585
	if (efi_config_init(arch_tables))
586
		return;
587

588 589 590 591 592
	/*
	 * Note: We currently don't support runtime services on an EFI
	 * that doesn't match the kernel 32/64-bit mode.
	 */

M
Matt Fleming 已提交
593
	if (!efi_runtime_supported())
594
		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
595
	else {
596 597
		if (efi_runtime_disabled() || efi_runtime_init()) {
			efi_memmap_unmap();
598
			return;
599
		}
600
	}
601

602 603
	efi_clean_memmap();

604
	if (efi_enabled(EFI_DBG))
605
		efi_print_memmap();
H
Huang, Ying 已提交
606 607
}

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
	u64 addr, npages;

	addr = md->virt_addr;
	npages = md->num_pages;

	memrange_efi_to_native(&addr, &npages);

	if (executable)
		set_memory_x(addr, npages);
	else
		set_memory_nx(addr, npages);
}

B
Borislav Petkov 已提交
623
void __init runtime_code_page_mkexec(void)
624 625 626 627
{
	efi_memory_desc_t *md;

	/* Make EFI runtime service code area executable */
628
	for_each_efi_memory_desc(md) {
H
Huang, Ying 已提交
629 630 631
		if (md->type != EFI_RUNTIME_SERVICES_CODE)
			continue;

632
		efi_set_executable(md, true);
633 634 635
	}
}

636
void __init efi_memory_uc(u64 addr, unsigned long size)
637 638 639 640 641 642 643 644 645
{
	unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
	u64 npages;

	npages = round_up(size, page_shift) / page_shift;
	memrange_efi_to_native(&addr, &npages);
	set_memory_uc(addr, npages);
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
void __init old_map_region(efi_memory_desc_t *md)
{
	u64 start_pfn, end_pfn, end;
	unsigned long size;
	void *va;

	start_pfn = PFN_DOWN(md->phys_addr);
	size	  = md->num_pages << PAGE_SHIFT;
	end	  = md->phys_addr + size;
	end_pfn   = PFN_UP(end);

	if (pfn_range_is_mapped(start_pfn, end_pfn)) {
		va = __va(md->phys_addr);

		if (!(md->attribute & EFI_MEMORY_WB))
			efi_memory_uc((u64)(unsigned long)va, size);
	} else
		va = efi_ioremap(md->phys_addr, size,
				 md->type, md->attribute);

	md->virt_addr = (u64) (unsigned long) va;
	if (!va)
		pr_err("ioremap of 0x%llX failed!\n",
		       (unsigned long long)md->phys_addr);
}

672 673
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
H
Huang, Ying 已提交
674
{
675 676
	efi_memory_desc_t *md, *prev_md = NULL;

677
	for_each_efi_memory_desc(md) {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
		u64 prev_size;

		if (!prev_md) {
			prev_md = md;
			continue;
		}

		if (prev_md->type != md->type ||
		    prev_md->attribute != md->attribute) {
			prev_md = md;
			continue;
		}

		prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;

		if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
			prev_md->num_pages += md->num_pages;
			md->type = EFI_RESERVED_TYPE;
			md->attribute = 0;
			continue;
		}
		prev_md = md;
700 701 702 703 704 705 706
	}
}

static void __init get_systab_virt_addr(efi_memory_desc_t *md)
{
	unsigned long size;
	u64 end, systab;
707

708 709 710 711 712 713
	size = md->num_pages << EFI_PAGE_SHIFT;
	end = md->phys_addr + size;
	systab = (u64)(unsigned long)efi_phys.systab;
	if (md->phys_addr <= systab && systab < end) {
		systab += md->virt_addr - md->phys_addr;
		efi.systab = (efi_system_table_t *)(unsigned long)systab;
714
	}
715 716
}

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
static void *realloc_pages(void *old_memmap, int old_shift)
{
	void *ret;

	ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
	if (!ret)
		goto out;

	/*
	 * A first-time allocation doesn't have anything to copy.
	 */
	if (!old_memmap)
		return ret;

	memcpy(ret, old_memmap, PAGE_SIZE << old_shift);

out:
	free_pages((unsigned long)old_memmap, old_shift);
	return ret;
}

738 739 740 741 742 743 744 745 746 747
/*
 * Iterate the EFI memory map in reverse order because the regions
 * will be mapped top-down. The end result is the same as if we had
 * mapped things forward, but doesn't require us to change the
 * existing implementation of efi_map_region().
 */
static inline void *efi_map_next_entry_reverse(void *entry)
{
	/* Initial call */
	if (!entry)
748
		return efi.memmap.map_end - efi.memmap.desc_size;
749

750 751
	entry -= efi.memmap.desc_size;
	if (entry < efi.memmap.map)
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
		return NULL;

	return entry;
}

/*
 * efi_map_next_entry - Return the next EFI memory map descriptor
 * @entry: Previous EFI memory map descriptor
 *
 * This is a helper function to iterate over the EFI memory map, which
 * we do in different orders depending on the current configuration.
 *
 * To begin traversing the memory map @entry must be %NULL.
 *
 * Returns %NULL when we reach the end of the memory map.
 */
static void *efi_map_next_entry(void *entry)
{
	if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
		/*
		 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
		 * config table feature requires us to map all entries
		 * in the same order as they appear in the EFI memory
		 * map. That is to say, entry N must have a lower
		 * virtual address than entry N+1. This is because the
		 * firmware toolchain leaves relative references in
		 * the code/data sections, which are split and become
		 * separate EFI memory regions. Mapping things
		 * out-of-order leads to the firmware accessing
		 * unmapped addresses.
		 *
		 * Since we need to map things this way whether or not
		 * the kernel actually makes use of
		 * EFI_PROPERTIES_TABLE, let's just switch to this
		 * scheme by default for 64-bit.
		 */
		return efi_map_next_entry_reverse(entry);
	}

	/* Initial call */
	if (!entry)
793
		return efi.memmap.map;
794

795 796
	entry += efi.memmap.desc_size;
	if (entry >= efi.memmap.map_end)
797 798 799 800 801
		return NULL;

	return entry;
}

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
static bool should_map_region(efi_memory_desc_t *md)
{
	/*
	 * Runtime regions always require runtime mappings (obviously).
	 */
	if (md->attribute & EFI_MEMORY_RUNTIME)
		return true;

	/*
	 * 32-bit EFI doesn't suffer from the bug that requires us to
	 * reserve boot services regions, and mixed mode support
	 * doesn't exist for 32-bit kernels.
	 */
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

818 819 820 821 822 823 824 825 826
	/*
	 * EFI specific purpose memory may be reserved by default
	 * depending on kernel config and boot options.
	 */
	if (md->type == EFI_CONVENTIONAL_MEMORY &&
	    efi_soft_reserve_enabled() &&
	    (md->attribute & EFI_MEMORY_SP))
		return false;

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	/*
	 * Map all of RAM so that we can access arguments in the 1:1
	 * mapping when making EFI runtime calls.
	 */
	if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
		if (md->type == EFI_CONVENTIONAL_MEMORY ||
		    md->type == EFI_LOADER_DATA ||
		    md->type == EFI_LOADER_CODE)
			return true;
	}

	/*
	 * Map boot services regions as a workaround for buggy
	 * firmware that accesses them even when they shouldn't.
	 *
	 * See efi_{reserve,free}_boot_services().
	 */
	if (md->type == EFI_BOOT_SERVICES_CODE ||
	    md->type == EFI_BOOT_SERVICES_DATA)
		return true;

	return false;
}

851
/*
852 853
 * Map the efi memory ranges of the runtime services and update new_mmap with
 * virtual addresses.
854
 */
855
static void * __init efi_map_regions(int *count, int *pg_shift)
856
{
857 858
	void *p, *new_memmap = NULL;
	unsigned long left = 0;
859
	unsigned long desc_size;
860
	efi_memory_desc_t *md;
861

862 863
	desc_size = efi.memmap.desc_size;

864 865
	p = NULL;
	while ((p = efi_map_next_entry(p))) {
H
Huang, Ying 已提交
866
		md = p;
867 868 869

		if (!should_map_region(md))
			continue;
H
Huang, Ying 已提交
870

871
		efi_map_region(md);
872 873
		get_systab_virt_addr(md);

874
		if (left < desc_size) {
875 876 877 878 879 880 881 882
			new_memmap = realloc_pages(new_memmap, *pg_shift);
			if (!new_memmap)
				return NULL;

			left += PAGE_SIZE << *pg_shift;
			(*pg_shift)++;
		}

883
		memcpy(new_memmap + (*count * desc_size), md, desc_size);
884

885
		left -= desc_size;
886 887
		(*count)++;
	}
888

889 890 891
	return new_memmap;
}

892 893
static void __init kexec_enter_virtual_mode(void)
{
894
#ifdef CONFIG_KEXEC_CORE
895
	efi_memory_desc_t *md;
896
	unsigned int num_pages;
897 898 899 900 901

	efi.systab = NULL;

	/*
	 * We don't do virtual mode, since we don't do runtime services, on
902 903 904
	 * non-native EFI. With efi=old_map, we don't do runtime services in
	 * kexec kernel because in the initial boot something else might
	 * have been mapped at these virtual addresses.
905
	 */
906
	if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
907
		efi_memmap_unmap();
908
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
909 910 911
		return;
	}

912 913 914 915 916 917
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

918 919 920 921
	/*
	* Map efi regions which were passed via setup_data. The virt_addr is a
	* fixed addr which was used in first kernel of a kexec boot.
	*/
922
	for_each_efi_memory_desc(md) {
923 924 925 926
		efi_map_region_fixed(md); /* FIXME: add error handling */
		get_systab_virt_addr(md);
	}

927 928 929 930 931 932 933 934 935 936 937 938 939
	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map.
	 */
	efi_memmap_unmap();

	if (efi_memmap_init_late(efi.memmap.phys_map,
				 efi.memmap.desc_size * efi.memmap.nr_map)) {
		pr_err("Failed to remap late EFI memory map\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

940 941
	BUG_ON(!efi.systab);

942
	num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
943 944
	num_pages >>= PAGE_SHIFT;

945
	if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
946 947 948 949
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

950 951 952 953 954 955 956 957 958
	efi_sync_low_kernel_mappings();

	/*
	 * Now that EFI is in virtual mode, update the function
	 * pointers in the runtime service table to the new virtual addresses.
	 *
	 * Call EFI services through wrapper functions.
	 */
	efi.runtime_version = efi_systab.hdr.revision;
959

960
	efi_native_runtime_setup();
961

962 963 964 965 966 967 968
	efi.set_virtual_address_map = NULL;

	if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
		runtime_code_page_mkexec();
#endif
}

969 970 971 972
/*
 * This function will switch the EFI runtime services to virtual mode.
 * Essentially, we look through the EFI memmap and map every region that
 * has the runtime attribute bit set in its memory descriptor into the
973
 * efi_pgd page table.
974 975 976 977 978 979 980 981 982
 *
 * The old method which used to update that memory descriptor with the
 * virtual address obtained from ioremap() is still supported when the
 * kernel is booted with efi=old_map on its command line. Same old
 * method enabled the runtime services to be called without having to
 * thunk back into physical mode for every invocation.
 *
 * The new method does a pagetable switch in a preemption-safe manner
 * so that we're in a different address space when calling a runtime
983 984
 * function. For function arguments passing we do copy the PUDs of the
 * kernel page table into efi_pgd prior to each call.
985 986 987
 *
 * Specially for kexec boot, efi runtime maps in previous kernel should
 * be passed in via setup_data. In that case runtime ranges will be mapped
988 989
 * to the same virtual addresses as the first kernel, see
 * kexec_enter_virtual_mode().
990
 */
991
static void __init __efi_enter_virtual_mode(void)
992
{
993
	int count = 0, pg_shift = 0;
994
	void *new_memmap = NULL;
995
	efi_status_t status;
996
	unsigned long pa;
H
Huang, Ying 已提交
997

998
	efi.systab = NULL;
999

1000 1001 1002 1003 1004 1005
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

1006 1007 1008 1009
	efi_merge_regions();
	new_memmap = efi_map_regions(&count, &pg_shift);
	if (!new_memmap) {
		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
1010
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1011
		return;
1012
	}
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	pa = __pa(new_memmap);

	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map that we are about to pass to the
	 * firmware via SetVirtualAddressMap().
	 */
	efi_memmap_unmap();

	if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
		pr_err("Failed to remap late EFI memory map\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}

1029 1030 1031 1032 1033
	if (efi_enabled(EFI_DBG)) {
		pr_info("EFI runtime memory map:\n");
		efi_print_memmap();
	}

H
Huang, Ying 已提交
1034 1035
	BUG_ON(!efi.systab);

1036
	if (efi_setup_page_tables(pa, 1 << pg_shift)) {
1037
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1038
		return;
1039
	}
1040

1041 1042
	efi_sync_low_kernel_mappings();

1043 1044
	if (efi_is_native()) {
		status = phys_efi_set_virtual_address_map(
1045 1046 1047
				efi.memmap.desc_size * count,
				efi.memmap.desc_size,
				efi.memmap.desc_version,
1048
				(efi_memory_desc_t *)pa);
1049 1050 1051
	} else {
		status = efi_thunk_set_virtual_address_map(
				efi_phys.set_virtual_address_map,
1052 1053 1054
				efi.memmap.desc_size * count,
				efi.memmap.desc_size,
				efi.memmap.desc_version,
1055
				(efi_memory_desc_t *)pa);
1056
	}
1057

1058 1059 1060 1061
	if (status != EFI_SUCCESS) {
		pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
			 status);
		panic("EFI call to SetVirtualAddressMap() failed!");
H
Huang, Ying 已提交
1062 1063
	}

1064 1065
	efi_free_boot_services();

H
Huang, Ying 已提交
1066 1067 1068 1069 1070 1071
	/*
	 * Now that EFI is in virtual mode, update the function
	 * pointers in the runtime service table to the new virtual addresses.
	 *
	 * Call EFI services through wrapper functions.
	 */
1072
	efi.runtime_version = efi_systab.hdr.revision;
1073 1074

	if (efi_is_native())
1075
		efi_native_runtime_setup();
1076 1077 1078
	else
		efi_thunk_runtime_setup();

1079
	efi.set_virtual_address_map = NULL;
1080

1081 1082 1083 1084 1085 1086
	/*
	 * Apply more restrictive page table mapping attributes now that
	 * SVAM() has been called and the firmware has performed all
	 * necessary relocation fixups for the new virtual addresses.
	 */
	efi_runtime_update_mappings();
1087

M
Matthew Garrett 已提交
1088
	/* clean DUMMY object */
1089
	efi_delete_dummy_variable();
H
Huang, Ying 已提交
1090 1091
}

1092 1093
void __init efi_enter_virtual_mode(void)
{
D
Daniel Kiper 已提交
1094 1095 1096
	if (efi_enabled(EFI_PARAVIRT))
		return;

1097 1098 1099 1100
	if (efi_setup)
		kexec_enter_virtual_mode();
	else
		__efi_enter_virtual_mode();
1101 1102

	efi_dump_pagetable();
1103 1104
}

D
Dave Young 已提交
1105
static int __init arch_parse_efi_cmdline(char *str)
1106
{
1107 1108 1109 1110 1111
	if (!str) {
		pr_warn("need at least one option\n");
		return -EINVAL;
	}

1112 1113
	if (parse_option_str(str, "old_map"))
		set_bit(EFI_OLD_MEMMAP, &efi.flags);
1114 1115 1116

	return 0;
}
D
Dave Young 已提交
1117
early_param("efi", arch_parse_efi_cmdline);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

bool efi_is_table_address(unsigned long phys_addr)
{
	unsigned int i;

	if (phys_addr == EFI_INVALID_TABLE_ADDR)
		return false;

	for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
		if (*(efi_tables[i]) == phys_addr)
			return true;

	return false;
}