efi.h 11.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
H
Huang, Ying 已提交
4

5
#include <asm/fpu/api.h>
6
#include <asm/pgtable.h>
7
#include <asm/processor-flags.h>
8
#include <asm/tlb.h>
9
#include <asm/nospec-branch.h>
10
#include <asm/mmu_context.h>
11
#include <linux/build_bug.h>
12

13 14 15 16 17 18 19 20 21 22
/*
 * We map the EFI regions needed for runtime services non-contiguously,
 * with preserved alignment on virtual addresses starting from -4G down
 * for a total max space of 64G. This way, we provide for stable runtime
 * services addresses across kernels so that a kexec'd kernel can still
 * use them.
 *
 * This is the main reason why we're doing stable VA mappings for RT
 * services.
 *
23 24 25
 * SGI UV1 machines are known to be incompatible with this scheme, so we
 * provide an opt-out for these machines via a DMI quirk that sets the
 * attribute below.
26
 */
27 28 29 30 31 32
#define EFI_UV1_MEMMAP         EFI_ARCH_1

static inline bool efi_have_uv1_memmap(void)
{
	return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP);
}
33

34 35 36
#define EFI32_LOADER_SIGNATURE	"EL32"
#define EFI64_LOADER_SIGNATURE	"EL64"

37
#define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/*
 * The EFI services are called through variadic functions in many cases. These
 * functions are implemented in assembler and support only a fixed number of
 * arguments. The macros below allows us to check at build time that we don't
 * try to call them with too many arguments.
 *
 * __efi_nargs() will return the number of arguments if it is 7 or less, and
 * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
 * impossible to calculate the exact number of arguments beyond some
 * pre-defined limit. The maximum number of arguments currently supported by
 * any of the thunks is 7, so this is good enough for now and can be extended
 * in the obvious way if we ever need more.
 */

#define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
#define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,	\
	__efi_arg_sentinel(7), __efi_arg_sentinel(6),		\
	__efi_arg_sentinel(5), __efi_arg_sentinel(4),		\
	__efi_arg_sentinel(3), __efi_arg_sentinel(2),		\
	__efi_arg_sentinel(1), __efi_arg_sentinel(0))
#define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...)	\
	__take_second_arg(n,					\
		({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; }))
#define __efi_arg_sentinel(n) , n

/*
 * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
 * represents more than n arguments.
 */

#define __efi_nargs_check(f, n, ...)					\
	__efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
#define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
#define __efi_nargs_check__(f, p, n) ({					\
	BUILD_BUG_ON_MSG(						\
		(p) > (n),						\
		#f " called with too many arguments (" #p ">" #n ")");	\
})

78
#ifdef CONFIG_X86_32
79 80 81 82 83 84 85 86 87 88 89 90
#define arch_efi_call_virt_setup()					\
({									\
	kernel_fpu_begin();						\
	firmware_restrict_branch_speculation_start();			\
})

#define arch_efi_call_virt_teardown()					\
({									\
	firmware_restrict_branch_speculation_end();			\
	kernel_fpu_end();						\
})

91

92
#define arch_efi_call_virt(p, f, args...)	p->f(args)
93

94
#define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
95

H
Huang, Ying 已提交
96 97
#else /* !CONFIG_X86_32 */

98 99
#define EFI_LOADER_SIGNATURE	"EL64"

100 101 102 103 104 105
extern asmlinkage u64 __efi_call(void *fp, ...);

#define efi_call(...) ({						\
	__efi_nargs_check(efi_call, 7, __VA_ARGS__);			\
	__efi_call(__VA_ARGS__);					\
})
106

107
/*
108 109 110
 * struct efi_scratch - Scratch space used while switching to/from efi_mm
 * @phys_stack: stack used during EFI Mixed Mode
 * @prev_mm:    store/restore stolen mm_struct while switching to/from efi_mm
111 112
 */
struct efi_scratch {
113 114
	u64			phys_stack;
	struct mm_struct	*prev_mm;
115 116
} __packed;

117
#define arch_efi_call_virt_setup()					\
118 119
({									\
	efi_sync_low_kernel_mappings();					\
120
	kernel_fpu_begin();						\
121
	firmware_restrict_branch_speculation_start();			\
122
									\
123
	if (!efi_have_uv1_memmap())					\
124
		efi_switch_mm(&efi_mm);					\
125 126
})

127 128
#define arch_efi_call_virt(p, f, args...)				\
	efi_call((void *)p->f, args)					\
129 130 131

#define arch_efi_call_virt_teardown()					\
({									\
132
	if (!efi_have_uv1_memmap())					\
133
		efi_switch_mm(efi_scratch.prev_mm);			\
134
									\
135
	firmware_restrict_branch_speculation_end();			\
136
	kernel_fpu_end();						\
137 138
})

139 140
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
					u32 type, u64 attribute);
141

142
#ifdef CONFIG_KASAN
143 144 145 146 147 148 149 150 151
/*
 * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
 * only in kernel binary.  Since the EFI stub linked into a separate binary it
 * doesn't have __memset().  So we should use standard memset from
 * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
 */
#undef memcpy
#undef memset
#undef memmove
152
#endif
153

H
Huang, Ying 已提交
154 155
#endif /* CONFIG_X86_32 */

156
extern struct efi_scratch efi_scratch;
157 158
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
159
extern void __init efi_print_memmap(void);
160
extern void __init efi_memory_uc(u64 addr, unsigned long size);
161
extern void __init efi_map_region(efi_memory_desc_t *md);
162
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
163
extern void efi_sync_low_kernel_mappings(void);
164
extern int __init efi_alloc_page_tables(void);
165
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
166
extern void __init old_map_region(efi_memory_desc_t *md);
B
Borislav Petkov 已提交
167
extern void __init runtime_code_page_mkexec(void);
168
extern void __init efi_runtime_update_mappings(void);
169
extern void __init efi_dump_pagetable(void);
B
Borislav Petkov 已提交
170
extern void __init efi_apply_memmap_quirks(void);
171 172
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
173
extern void efi_switch_mm(struct mm_struct *mm);
174
extern void efi_recover_from_page_fault(unsigned long phys_addr);
175
extern void efi_free_boot_services(void);
176 177
extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
H
Huang, Ying 已提交
178

179 180 181 182 183 184 185 186 187
struct efi_setup_data {
	u64 fw_vendor;
	u64 tables;
	u64 smbios;
	u64 reserved[8];
};

extern u64 efi_setup;

188
#ifdef CONFIG_EFI
189 190 191 192 193 194
extern efi_status_t __efi64_thunk(u32, ...);

#define efi64_thunk(...) ({						\
	__efi_nargs_check(efi64_thunk, 6, __VA_ARGS__);			\
	__efi64_thunk(__VA_ARGS__);					\
})
195

196
static inline bool efi_is_mixed(void)
197
{
198 199 200
	if (!IS_ENABLED(CONFIG_EFI_MIXED))
		return false;
	return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
201 202
}

M
Matt Fleming 已提交
203 204
static inline bool efi_runtime_supported(void)
{
205
	if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
M
Matt Fleming 已提交
206 207
		return true;

208
	return IS_ENABLED(CONFIG_EFI_MIXED);
M
Matt Fleming 已提交
209 210
}

211
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
212

213 214
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);

215
extern void efi_thunk_runtime_setup(void);
216 217 218 219
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
					 unsigned long descriptor_size,
					 u32 descriptor_version,
					 efi_memory_desc_t *virtual_map);
A
Ard Biesheuvel 已提交
220 221 222

/* arch specific definitions used by the stub code */

223
__attribute_const__ bool efi_is_64bit(void);
224

225 226 227 228
static inline bool efi_is_native(void)
{
	if (!IS_ENABLED(CONFIG_X86_64))
		return true;
229 230
	if (!IS_ENABLED(CONFIG_EFI_MIXED))
		return true;
231 232 233 234 235 236 237 238
	return efi_is_64bit();
}

#define efi_mixed_mode_cast(attr)					\
	__builtin_choose_expr(						\
		__builtin_types_compatible_p(u32, __typeof__(attr)),	\
			(unsigned long)(attr), (attr))

239 240 241 242 243
#define efi_table_attr(inst, attr)					\
	(efi_is_native()						\
		? inst->attr						\
		: (__typeof__(inst->attr))				\
			efi_mixed_mode_cast(inst->mixed_mode.attr))
244

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
/*
 * The following macros allow translating arguments if necessary from native to
 * mixed mode. The use case for this is to initialize the upper 32 bits of
 * output parameters, and where the 32-bit method requires a 64-bit argument,
 * which must be split up into two arguments to be thunked properly.
 *
 * As examples, the AllocatePool boot service returns the address of the
 * allocation, but it will not set the high 32 bits of the address. To ensure
 * that the full 64-bit address is initialized, we zero-init the address before
 * calling the thunk.
 *
 * The FreePages boot service takes a 64-bit physical address even in 32-bit
 * mode. For the thunk to work correctly, a native 64-bit call of
 * 	free_pages(addr, size)
 * must be translated to
 * 	efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
 * so that the two 32-bit halves of addr get pushed onto the stack separately.
 */

static inline void *efi64_zero_upper(void *p)
{
	((u32 *)p)[1] = 0;
	return p;
}

#define __efi64_argmap_free_pages(addr, size)				\
	((addr), 0, (size))

#define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)	\
	((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))

#define __efi64_argmap_allocate_pool(type, size, buffer)		\
	((type), (size), efi64_zero_upper(buffer))

#define __efi64_argmap_handle_protocol(handle, protocol, interface)	\
	((handle), (protocol), efi64_zero_upper(interface))

#define __efi64_argmap_locate_protocol(protocol, reg, interface)	\
	((protocol), (reg), efi64_zero_upper(interface))

285 286 287
#define __efi64_argmap_locate_device_path(protocol, path, handle)	\
	((protocol), (path), efi64_zero_upper(handle))

288 289 290 291 292
/* PCI I/O */
#define __efi64_argmap_get_location(protocol, seg, bus, dev, func)	\
	((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),	\
	 efi64_zero_upper(dev), efi64_zero_upper(func))

293 294 295 296
/* LoadFile */
#define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf)	\
	((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * The macros below handle the plumbing for the argument mapping. To add a
 * mapping for a specific EFI method, simply define a macro
 * __efi64_argmap_<method name>, following the examples above.
 */

#define __efi64_thunk_map(inst, func, ...)				\
	efi64_thunk(inst->mixed_mode.func,				\
		__efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),	\
			       (__VA_ARGS__)))

#define __efi64_argmap(mapped, args)					\
	__PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
#define __efi64_argmap__0(mapped, args) __efi_eval mapped
#define __efi64_argmap__1(mapped, args) __efi_eval args

#define __efi_eat(...)
#define __efi_eval(...) __VA_ARGS__

/* The three macros below handle dispatching via the thunk if needed */

318
#define efi_call_proto(inst, func, ...)					\
319
	(efi_is_native()						\
320
		? inst->func(inst, ##__VA_ARGS__)			\
321
		: __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__))
322

323
#define efi_bs_call(func, ...)						\
324
	(efi_is_native()						\
325
		? efi_system_table()->boottime->func(__VA_ARGS__)	\
326 327
		: __efi64_thunk_map(efi_table_attr(efi_system_table(),	\
				boottime), func, __VA_ARGS__))
A
Ard Biesheuvel 已提交
328

329
#define efi_rt_call(func, ...)						\
330
	(efi_is_native()						\
331
		? efi_system_table()->runtime->func(__VA_ARGS__)	\
332 333
		: __efi64_thunk_map(efi_table_attr(efi_system_table(),	\
				runtime), func, __VA_ARGS__))
334

335
extern bool efi_reboot_required(void);
336
extern bool efi_is_table_address(unsigned long phys_addr);
337

338 339
extern void efi_find_mirror(void);
extern void efi_reserve_boot_services(void);
340
#else
341
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
342 343 344 345
static inline bool efi_reboot_required(void)
{
	return false;
}
346 347 348 349
static inline  bool efi_is_table_address(unsigned long phys_addr)
{
	return false;
}
350 351 352 353 354 355
static inline void efi_find_mirror(void)
{
}
static inline void efi_reserve_boot_services(void)
{
}
356 357
#endif /* CONFIG_EFI */

358 359 360 361 362 363 364 365
#ifdef CONFIG_EFI_FAKE_MEMMAP
extern void __init efi_fake_memmap_early(void);
#else
static inline void efi_fake_memmap_early(void)
{
}
#endif

H
H. Peter Anvin 已提交
366
#endif /* _ASM_X86_EFI_H */