efi.h 11.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
H
Huang, Ying 已提交
4

5
#include <asm/fpu/api.h>
6
#include <asm/pgtable.h>
7
#include <asm/processor-flags.h>
8
#include <asm/tlb.h>
9
#include <asm/nospec-branch.h>
10
#include <asm/mmu_context.h>
11
#include <linux/build_bug.h>
12

13 14
extern unsigned long efi_fw_vendor, efi_config_table;

15 16 17 18 19 20 21 22 23 24
/*
 * We map the EFI regions needed for runtime services non-contiguously,
 * with preserved alignment on virtual addresses starting from -4G down
 * for a total max space of 64G. This way, we provide for stable runtime
 * services addresses across kernels so that a kexec'd kernel can still
 * use them.
 *
 * This is the main reason why we're doing stable VA mappings for RT
 * services.
 *
25 26 27
 * SGI UV1 machines are known to be incompatible with this scheme, so we
 * provide an opt-out for these machines via a DMI quirk that sets the
 * attribute below.
28
 */
29 30 31 32 33 34
#define EFI_UV1_MEMMAP         EFI_ARCH_1

static inline bool efi_have_uv1_memmap(void)
{
	return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP);
}
35

36 37 38
#define EFI32_LOADER_SIGNATURE	"EL32"
#define EFI64_LOADER_SIGNATURE	"EL64"

39
#define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * The EFI services are called through variadic functions in many cases. These
 * functions are implemented in assembler and support only a fixed number of
 * arguments. The macros below allows us to check at build time that we don't
 * try to call them with too many arguments.
 *
 * __efi_nargs() will return the number of arguments if it is 7 or less, and
 * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
 * impossible to calculate the exact number of arguments beyond some
 * pre-defined limit. The maximum number of arguments currently supported by
 * any of the thunks is 7, so this is good enough for now and can be extended
 * in the obvious way if we ever need more.
 */

#define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
#define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,	\
	__efi_arg_sentinel(7), __efi_arg_sentinel(6),		\
	__efi_arg_sentinel(5), __efi_arg_sentinel(4),		\
	__efi_arg_sentinel(3), __efi_arg_sentinel(2),		\
	__efi_arg_sentinel(1), __efi_arg_sentinel(0))
#define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...)	\
	__take_second_arg(n,					\
		({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; }))
#define __efi_arg_sentinel(n) , n

/*
 * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
 * represents more than n arguments.
 */

#define __efi_nargs_check(f, n, ...)					\
	__efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
#define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
#define __efi_nargs_check__(f, p, n) ({					\
	BUILD_BUG_ON_MSG(						\
		(p) > (n),						\
		#f " called with too many arguments (" #p ">" #n ")");	\
})

80
#ifdef CONFIG_X86_32
81 82 83 84 85 86 87 88 89 90 91 92
#define arch_efi_call_virt_setup()					\
({									\
	kernel_fpu_begin();						\
	firmware_restrict_branch_speculation_start();			\
})

#define arch_efi_call_virt_teardown()					\
({									\
	firmware_restrict_branch_speculation_end();			\
	kernel_fpu_end();						\
})

93

94
#define arch_efi_call_virt(p, f, args...)	p->f(args)
95

96
#define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
97

H
Huang, Ying 已提交
98 99
#else /* !CONFIG_X86_32 */

100 101
#define EFI_LOADER_SIGNATURE	"EL64"

102 103 104 105 106 107
extern asmlinkage u64 __efi_call(void *fp, ...);

#define efi_call(...) ({						\
	__efi_nargs_check(efi_call, 7, __VA_ARGS__);			\
	__efi_call(__VA_ARGS__);					\
})
108

109
/*
110 111 112
 * struct efi_scratch - Scratch space used while switching to/from efi_mm
 * @phys_stack: stack used during EFI Mixed Mode
 * @prev_mm:    store/restore stolen mm_struct while switching to/from efi_mm
113 114
 */
struct efi_scratch {
115 116
	u64			phys_stack;
	struct mm_struct	*prev_mm;
117 118
} __packed;

119
#define arch_efi_call_virt_setup()					\
120 121
({									\
	efi_sync_low_kernel_mappings();					\
122
	kernel_fpu_begin();						\
123
	firmware_restrict_branch_speculation_start();			\
124
									\
125
	if (!efi_have_uv1_memmap())					\
126
		efi_switch_mm(&efi_mm);					\
127 128
})

129 130
#define arch_efi_call_virt(p, f, args...)				\
	efi_call((void *)p->f, args)					\
131 132 133

#define arch_efi_call_virt_teardown()					\
({									\
134
	if (!efi_have_uv1_memmap())					\
135
		efi_switch_mm(efi_scratch.prev_mm);			\
136
									\
137
	firmware_restrict_branch_speculation_end();			\
138
	kernel_fpu_end();						\
139 140
})

141 142
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
					u32 type, u64 attribute);
143

144
#ifdef CONFIG_KASAN
145 146 147 148 149 150 151 152 153
/*
 * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
 * only in kernel binary.  Since the EFI stub linked into a separate binary it
 * doesn't have __memset().  So we should use standard memset from
 * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
 */
#undef memcpy
#undef memset
#undef memmove
154
#endif
155

H
Huang, Ying 已提交
156 157
#endif /* CONFIG_X86_32 */

158
extern struct efi_scratch efi_scratch;
159 160
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
161
extern void __init efi_print_memmap(void);
162
extern void __init efi_memory_uc(u64 addr, unsigned long size);
163
extern void __init efi_map_region(efi_memory_desc_t *md);
164
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
165
extern void efi_sync_low_kernel_mappings(void);
166
extern int __init efi_alloc_page_tables(void);
167
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
168
extern void __init old_map_region(efi_memory_desc_t *md);
B
Borislav Petkov 已提交
169
extern void __init runtime_code_page_mkexec(void);
170
extern void __init efi_runtime_update_mappings(void);
171
extern void __init efi_dump_pagetable(void);
B
Borislav Petkov 已提交
172
extern void __init efi_apply_memmap_quirks(void);
173 174
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
175
extern void efi_switch_mm(struct mm_struct *mm);
176
extern void efi_recover_from_page_fault(unsigned long phys_addr);
177
extern void efi_free_boot_services(void);
178 179
extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
H
Huang, Ying 已提交
180

181 182 183 184 185 186 187 188 189
struct efi_setup_data {
	u64 fw_vendor;
	u64 tables;
	u64 smbios;
	u64 reserved[8];
};

extern u64 efi_setup;

190
#ifdef CONFIG_EFI
191 192 193 194 195 196
extern efi_status_t __efi64_thunk(u32, ...);

#define efi64_thunk(...) ({						\
	__efi_nargs_check(efi64_thunk, 6, __VA_ARGS__);			\
	__efi64_thunk(__VA_ARGS__);					\
})
197

198
static inline bool efi_is_mixed(void)
199
{
200 201 202
	if (!IS_ENABLED(CONFIG_EFI_MIXED))
		return false;
	return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
203 204
}

M
Matt Fleming 已提交
205 206
static inline bool efi_runtime_supported(void)
{
207
	if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
M
Matt Fleming 已提交
208 209
		return true;

210
	return IS_ENABLED(CONFIG_EFI_MIXED);
M
Matt Fleming 已提交
211 212
}

213
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
214

215 216
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);

217
extern void efi_thunk_runtime_setup(void);
218 219 220 221
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
					 unsigned long descriptor_size,
					 u32 descriptor_version,
					 efi_memory_desc_t *virtual_map);
A
Ard Biesheuvel 已提交
222 223 224

/* arch specific definitions used by the stub code */

225
__attribute_const__ bool efi_is_64bit(void);
226

227 228 229 230
static inline bool efi_is_native(void)
{
	if (!IS_ENABLED(CONFIG_X86_64))
		return true;
231 232
	if (!IS_ENABLED(CONFIG_EFI_MIXED))
		return true;
233 234 235 236 237 238 239 240
	return efi_is_64bit();
}

#define efi_mixed_mode_cast(attr)					\
	__builtin_choose_expr(						\
		__builtin_types_compatible_p(u32, __typeof__(attr)),	\
			(unsigned long)(attr), (attr))

241 242 243 244 245
#define efi_table_attr(inst, attr)					\
	(efi_is_native()						\
		? inst->attr						\
		: (__typeof__(inst->attr))				\
			efi_mixed_mode_cast(inst->mixed_mode.attr))
246

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
/*
 * The following macros allow translating arguments if necessary from native to
 * mixed mode. The use case for this is to initialize the upper 32 bits of
 * output parameters, and where the 32-bit method requires a 64-bit argument,
 * which must be split up into two arguments to be thunked properly.
 *
 * As examples, the AllocatePool boot service returns the address of the
 * allocation, but it will not set the high 32 bits of the address. To ensure
 * that the full 64-bit address is initialized, we zero-init the address before
 * calling the thunk.
 *
 * The FreePages boot service takes a 64-bit physical address even in 32-bit
 * mode. For the thunk to work correctly, a native 64-bit call of
 * 	free_pages(addr, size)
 * must be translated to
 * 	efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
 * so that the two 32-bit halves of addr get pushed onto the stack separately.
 */

static inline void *efi64_zero_upper(void *p)
{
	((u32 *)p)[1] = 0;
	return p;
}

#define __efi64_argmap_free_pages(addr, size)				\
	((addr), 0, (size))

#define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)	\
	((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))

#define __efi64_argmap_allocate_pool(type, size, buffer)		\
	((type), (size), efi64_zero_upper(buffer))

#define __efi64_argmap_handle_protocol(handle, protocol, interface)	\
	((handle), (protocol), efi64_zero_upper(interface))

#define __efi64_argmap_locate_protocol(protocol, reg, interface)	\
	((protocol), (reg), efi64_zero_upper(interface))

287 288 289
#define __efi64_argmap_locate_device_path(protocol, path, handle)	\
	((protocol), (path), efi64_zero_upper(handle))

290 291 292 293 294
/* PCI I/O */
#define __efi64_argmap_get_location(protocol, seg, bus, dev, func)	\
	((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),	\
	 efi64_zero_upper(dev), efi64_zero_upper(func))

295 296 297 298
/* LoadFile */
#define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf)	\
	((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
/*
 * The macros below handle the plumbing for the argument mapping. To add a
 * mapping for a specific EFI method, simply define a macro
 * __efi64_argmap_<method name>, following the examples above.
 */

#define __efi64_thunk_map(inst, func, ...)				\
	efi64_thunk(inst->mixed_mode.func,				\
		__efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),	\
			       (__VA_ARGS__)))

#define __efi64_argmap(mapped, args)					\
	__PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
#define __efi64_argmap__0(mapped, args) __efi_eval mapped
#define __efi64_argmap__1(mapped, args) __efi_eval args

#define __efi_eat(...)
#define __efi_eval(...) __VA_ARGS__

/* The three macros below handle dispatching via the thunk if needed */

320
#define efi_call_proto(inst, func, ...)					\
321
	(efi_is_native()						\
322
		? inst->func(inst, ##__VA_ARGS__)			\
323
		: __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__))
324

325
#define efi_bs_call(func, ...)						\
326
	(efi_is_native()						\
327
		? efi_system_table()->boottime->func(__VA_ARGS__)	\
328 329
		: __efi64_thunk_map(efi_table_attr(efi_system_table(),	\
				boottime), func, __VA_ARGS__))
A
Ard Biesheuvel 已提交
330

331
#define efi_rt_call(func, ...)						\
332
	(efi_is_native()						\
333
		? efi_system_table()->runtime->func(__VA_ARGS__)	\
334 335
		: __efi64_thunk_map(efi_table_attr(efi_system_table(),	\
				runtime), func, __VA_ARGS__))
336

337
extern bool efi_reboot_required(void);
338
extern bool efi_is_table_address(unsigned long phys_addr);
339

340 341
extern void efi_find_mirror(void);
extern void efi_reserve_boot_services(void);
342
#else
343
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
344 345 346 347
static inline bool efi_reboot_required(void)
{
	return false;
}
348 349 350 351
static inline  bool efi_is_table_address(unsigned long phys_addr)
{
	return false;
}
352 353 354 355 356 357
static inline void efi_find_mirror(void)
{
}
static inline void efi_reserve_boot_services(void)
{
}
358 359
#endif /* CONFIG_EFI */

360 361 362 363 364 365 366 367
#ifdef CONFIG_EFI_FAKE_MEMMAP
extern void __init efi_fake_memmap_early(void);
#else
static inline void efi_fake_memmap_early(void)
{
}
#endif

H
H. Peter Anvin 已提交
368
#endif /* _ASM_X86_EFI_H */